hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7942dd2668c55d2a3ac382fcebaee4c54379be08 | 288 | py | Python | API/onepanman_api/admin/placementRule.py | CMS0503/CodeOnBoard | 2df8c9d934f6ffb05dbfbde329f84c66f2348618 | [
"MIT"
] | null | null | null | API/onepanman_api/admin/placementRule.py | CMS0503/CodeOnBoard | 2df8c9d934f6ffb05dbfbde329f84c66f2348618 | [
"MIT"
] | 12 | 2020-11-19T09:24:02.000Z | 2020-12-02T11:07:22.000Z | API/onepanman_api/admin/placementRule.py | CMS0503/CodeOnBoard | 2df8c9d934f6ffb05dbfbde329f84c66f2348618 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .. import models
@admin.register(models.PlacementRule)
class PlacementRuleAdmin(admin.ModelAdmin):
"""
착수 규칙 정보
"""
list_display = ['pk', 'type1', 'type2', 'rule_number', 'name']
class Meta:
model = models.PlacementRule | 22.153846 | 66 | 0.666667 |
7942dd9fad01a113bccf4c4452a3d100ba26b9a1 | 7,391 | py | Python | data_analyzer.py | whiteTyger/NEUR325 | 04dc5980e9a2deb7bfebb4f7989ca7f99262446e | [
"MIT"
] | 1 | 2018-07-31T20:49:43.000Z | 2018-07-31T20:49:43.000Z | data_analyzer.py | whiteTyger/NEUR325_experiment | 04dc5980e9a2deb7bfebb4f7989ca7f99262446e | [
"MIT"
] | null | null | null | data_analyzer.py | whiteTyger/NEUR325_experiment | 04dc5980e9a2deb7bfebb4f7989ca7f99262446e | [
"MIT"
] | null | null | null | import os
import csv
import codecs
from collections import defaultdict
import argparse
__author__='Eileen Cho'
#read in answer keys
#depending on the typdict param, returns a dict mapping words to response or category
def ansKey(filename,typedict):
f=open(filename)
reader = csv.reader(f)
returnAns = defaultdict(str)
for row in reader:
k = row[1]
if typedict=="cat":
v = row[2] #category
else:
v = row[0] #response y/n
returnAns[k] = v
f.close()
return returnAns
#read in data and returns three dictionaries
#cResponse maps question number to response given
#pResponse maps practice words to correct or incorrect value
#exResponse maps experimental words to correct or incorrect value
def dataReader(filename):
f=open(filename)
reader = csv.reader(f)
headers = reader.next()
correctIndex = headers.index('correct')
wordIndex = headers.index('target')
formIndex = headers.index('form_response')
exResponse = defaultdict(int)
pResponse = defaultdict(int)
cResponse = defaultdict(str)
for index, row in enumerate(reader):
if index<4: #looking at content questions
cResponse[index+1] = row[formIndex]
elif index in range(4,10): #looking at practice questions
pResponse[row[wordIndex]] = int(row[correctIndex])
else:
exResponse[row[wordIndex]] = int(row[correctIndex])
f.close()
return cResponse, pResponse, exResponse
#score breakdown (frequency)
#sorts responses by separating into
def breakdown(pResponse, exResponse,pKey,exKey):
pBreakdown = {'present':0,'trick':0,'not':0, 'overall':0}
exBreakdown = {'present':0,'trick':0,'not':0,'overall':0}
#total correct answers
pBreakdown['overall'] = sum(pResponse.values())
exBreakdown['overall'] = sum(exResponse.values())
#breakdown of incorrect answers
for k in pResponse: #practice answers
if pResponse[k]==0:
pBreakdown[pKey[k]]+=1
for k in exResponse: #experiment answers
if exResponse[k]==0:
exBreakdown[exKey[k]]+=1
return pBreakdown, exBreakdown
'''convert scores to percentages'''
def percentage(pBreakdown, exBreakdown):
pReturn = defaultdict(float)
exReturn = defaultdict(float)
divisor = 0
for k in pBreakdown:
if k=='overall':
divisor = 6
else:
divisor = 2
pReturn[k] = float(pBreakdown[k])/divisor*100
for k in exBreakdown:
if k=='overall':
divisor = 60
else:
divisor = 20
exReturn[k] = float(exBreakdown[k])/divisor*100
return pReturn,exReturn
#Extracts the accuracty of multiple choice content questions.
def mcAccuracy(cData):
cAnswer = {1:"c. Wolverine", 2:"a. Dancing", 3:"d. To compete in the Daytona 200", 4:"c. Gorilla mask"}
cResult = defaultdict(str)
total = 0
for k in cData:
if cData[k]==cAnswer[k]:
cResult[k] = "CORRECT"
total+=1
else:
cResult[k] = "INCORRECT"
return cResult,total
#put into string formatting breakdown of subject name, score breakdown, and condition.
def toString(pBreakdown,exBreakdown,subjName,condition):
s=''
s+= "Subject: "+ str(subjName)+"\n"
s+= "Condition: "+ str(condition)+"\n"
s+= "Practice overall score: "+ str(pBreakdown['overall'])+" correct out of 6\n"
s+= "Incorrect words category breakdown"+"\n"
s+= "present: "+ str(pBreakdown['present'])+" out of 2 incorrect\n"
s+= "trick: "+ str(pBreakdown['trick'])+" out of 2 incorrect\n"
s+= "not: "+ str(pBreakdown['not'])+" out of 2 incorrect\n"
s+= "\n"
s+= "Experiment overall score: "+str(exBreakdown['overall'])+" correct out of 60\n"
s+= "Incorrect words category breakdown"+"\n"
s+= "present: "+ str(exBreakdown['present'])+" out of 20 incorrect\n"
s+= "trick: "+ str(exBreakdown['trick'])+" out of 20 incorrect\n"
s+= "not: "+ str(exBreakdown['not'])+" out of 20 incorrect\n"
s+= "\n"
s+= "."*40
s+= "\n"
return s
#put into string formatting the totals across all subjects, categorized by nap or wake.
def printTotals(subjTotalsNap,subjTotalsWake,napCount,wakeCount):
s=''
s+= "Nap Total Scores (averaged)"+"\n"
s+= "Overall: "+ str(subjTotalsNap['overall']/napCount)+" out of 60 correct\n"
s+= "Incorrect words category breakdown"+"\n"
s+= "present: "+ str((subjTotalsNap['present']/napCount))+" out of 20 incorrect\n"
s+= "trick: "+ str((subjTotalsNap['trick']/napCount))+" out of 20 incorrect\n"
s+= "not: "+ str((subjTotalsNap['not']/napCount))+" out of 20 incorrect\n"
s+= "\n"
s+= "Wake Total Scores (averaged)"+"\n"
s+= "Overall: "+ str(subjTotalsWake['overall']/wakeCount)+" out of 60 correct\n"
s+= "Incorrect words category breakdown"+"\n"
s+= "present: "+ str((subjTotalsWake['present']/wakeCount))+" out of 20 incorrect\n"
s+= "trick: "+ str((subjTotalsWake['trick']/wakeCount))+" out of 20 incorrect\n"
s+= "not: "+ str((subjTotalsWake['not']/wakeCount))+" out of 20 incorrect\n"
s+= "\n"
s+= "-"*40
s+= "\n"
return s
#put into string formatting subject multiple choice answers and accuracy.
def printMC(cdata,subjName):
result,total=mcAccuracy(cdata)
s=""
s+="Subject: " + str(subjName) + "\n"
s+="a1: "+ str(cdata[1]) + " " + str(result[1]) + "\n"
s+="a2: "+ str(cdata[2]) + " " +str(result[2]) + "\n"
s+="a3: "+ str(cdata[3]) + " " + str(result[3]) + "\n"
s+="a4: "+ str(cdata[4]) + " " +str(result[4]) + "\n"
s+="Total correct: " + str(total) + "\n\n"
return s
#main function. Extracts necessary data from file, processes into proper formatting, do prelim calculations, output to console and file
def main():
#get all necessary data
practiceFile = 'practice_words.csv'
experimentFile = 'experimental_words.csv'
fnames = os.listdir('behavioral_data')
f = open(os.path.join('analyzed_data','subject_analysis.txt'),'w') #file to write subject experimental task results
g = open(os.path.join('analyzed_data','mc_questions.txt'),'w') #file to write subject multiple choice results
#parse out answer keys
practiceCat = ansKey(practiceFile,'cat')
experimentCat = ansKey(experimentFile,'cat')
#initialize dictionaries to store results of data extraction, contains totals across subjects, separated by category.
subjTotalsNap = defaultdict(float)
subjTotalsWake = defaultdict(float)
mcTotalsNap = defaultdict(float)
mcTotalsWake = defaultdict(float)
#keep count of number of subjects per category for averaging use.
napCount = 0
wakeCount = 0
for subj in fnames:
#reading in subject files...
sFilename = list(subj)
condition = ''
#determine subject condition based on file name. If 0, then nap, if 1 then awake.
if sFilename[14]=='0':
condition = 'nap'
napCount+=1
else:
condition = 'awake'
wakeCount+=1
#read in necessary data from data file...
cdata,pdata,exdata = dataReader(os.path.join('behavioral_data',subj))
#calculating incorrect breakdown...
pBreakdown, exBreakdown = breakdown(pdata,exdata,practiceCat,experimentCat)
#calculating subject totals...
for k in exBreakdown:
if condition == 'nap':
subjTotalsNap[k]+=exBreakdown[k]
else:
subjTotalsWake[k]+=exBreakdown[k]
#print to console for each subject
print printMC(cdata,subj)
print toString(pBreakdown,exBreakdown, subj, condition)
#write to txt file for each subject
f.write(toString(pBreakdown,exBreakdown, subj, condition))
g.write(printMC(cdata,subj))
#print to console for totals and write to file
print printTotals(subjTotalsNap,subjTotalsWake,napCount,wakeCount)
f.write(printTotals(subjTotalsNap,subjTotalsWake,napCount,wakeCount))
#run main function
if __name__=='__main__':
main()
| 33.292793 | 135 | 0.703829 |
7942ddf7369d686ef1b401e937de0e409106d33a | 970 | py | Python | utils/scrapper.py | typhonshambo/desi-memer | 74d400e35e3861945cde0425948ebf30b225b4ee | [
"MIT"
] | 1 | 2021-12-02T14:26:35.000Z | 2021-12-02T14:26:35.000Z | utils/scrapper.py | typhonshambo/desi-memer | 74d400e35e3861945cde0425948ebf30b225b4ee | [
"MIT"
] | null | null | null | utils/scrapper.py | typhonshambo/desi-memer | 74d400e35e3861945cde0425948ebf30b225b4ee | [
"MIT"
] | null | null | null | import discord
import requests
def ranMeme():
request = requests.get("https://meme-api.herokuapp.com/gimme/")
data = request.json()
postLink = data['postLink']
title = data['title']
url = data['url']
ups = data['ups']
nsfw = data['nsfw']
embed = discord.Embed(
color=discord.Colour.random(),
title = title,
timestamp=discord.utils.utcnow()
)
embed.set_image(url=url)
embed.set_footer(text=f"👍 {ups}")
return embed
def desiMeme():
request = requests.get("https://meme-api.herokuapp.com/gimme/IndianDankMemes/")
data = request.json()
postLink = data['postLink']
title = data['title']
url = data['url']
ups = data['ups']
nsfw = data['nsfw']
embed = discord.Embed(
color=discord.Colour.random(),
title = title,
timestamp=discord.utils.utcnow()
)
embed.set_image(url=url)
embed.set_footer(text=f"👍 {ups}")
return embed
| 21.555556 | 83 | 0.601031 |
7942e1b2c32f9e208a7efdb796ad62f8299fa3e0 | 3,138 | py | Python | ansible/roles/mn-find-collateral/scripts/find-collateral.py | AXErunners/axe-network-deploy | b0a89725d3d3d6bee4794a9899003dcb1505d369 | [
"MIT"
] | 2 | 2019-01-26T22:08:07.000Z | 2019-07-18T02:33:35.000Z | ansible/roles/mn-find-collateral/scripts/find-collateral.py | AXErunners/axe-network-deploy | b0a89725d3d3d6bee4794a9899003dcb1505d369 | [
"MIT"
] | null | null | null | ansible/roles/mn-find-collateral/scripts/find-collateral.py | AXErunners/axe-network-deploy | b0a89725d3d3d6bee4794a9899003dcb1505d369 | [
"MIT"
] | 1 | 2019-01-26T22:08:11.000Z | 2019-01-26T22:08:11.000Z | #!/usr/bin/env python3
import subprocess
import sys
import json
COIN = 100000000
rpcargs = sys.argv[1]
mn_address = sys.argv[2]
find_protx = sys.argv[3] == 'True'
blockchaininfo_s = subprocess.run("axe-cli %s getblockchaininfo" % (rpcargs), shell=True, check=True, stdout=subprocess.PIPE).stdout.decode("utf-8")
unspent_s = subprocess.run("axe-cli %s listunspent 0 9999999 \'[\"%s\"]\'" % (rpcargs, mn_address), shell=True, check=True, stdout=subprocess.PIPE).stdout.decode("utf-8")
addressutxos_s = subprocess.run("axe-cli %s getaddressutxos \'{\"addresses\":[\"%s\"]}\'" % (rpcargs, mn_address), shell=True, check=True, stdout=subprocess.PIPE).stdout.decode("utf-8")
protxs_s = subprocess.run("axe-cli %s protx list wallet 1" % (rpcargs), shell=True, check=True, stdout=subprocess.PIPE).stdout.decode("utf-8")
# print("blockchaininfo_s: %s" % blockchaininfo_s, file=sys.stderr)
# print("unspent_s: %s" % unspent_s, file=sys.stderr)
# print("addressutxos_s: %s" % addressutxos_s, file=sys.stderr)
print("protxs_s: %s" % protxs_s, file=sys.stderr)
blockchaininfo = json.loads(blockchaininfo_s)
unspent = json.loads(unspent_s)
addressutxos = json.loads(addressutxos_s)
protxs = json.loads(protxs_s)
tipHeight = blockchaininfo.get('blocks') - 1
# We have to look at both results from listunspent and getaddressutxos
# listunspent will not include already locked masternode UTXOs
# getaddressutxos will not include unconfirmed UTXOs
utxos = unspent
for u in addressutxos:
e = {
"txid": u.get('txid'),
"amount": u.get('satoshis') / COIN,
"vout": u.get('outputIndex'),
"confirmations": tipHeight - u.get('height')
}
# We might end up with duplicate entries, but who cares
utxos.append(e)
for protx in protxs:
if protx['state']['payoutAddress'] != mn_address:
continue
e = {
"txid": protx['proTxHash'],
"amount": 1000,
"vout": protx['collateralIndex'],
"confirmations": protx['confirmations']
}
# We might end up with duplicate entries, but who cares
utxos.append(e)
best_txid = None
best_vout = None
best_conf = -1
print("find_protx: %s" % str(find_protx), file=sys.stderr)
for u in utxos:
txid = u.get('txid')
vout = u.get('vout')
if u.get('amount') != 1000:
continue
if find_protx:
rawtx_s = subprocess.run("axe-cli %s getrawtransaction %s 1" % (rpcargs, txid), shell=True, check=True, stdout=subprocess.PIPE).stdout.decode("utf-8")
rawtx = json.loads(rawtx_s)
#print("getrawtransaction: %s" % rawtx, file=sys.stderr)
if rawtx['version'] < 3 or rawtx['type'] != 1:
continue
better = best_txid is None
if not better:
t1 = "%s-%d" % (txid, vout)
t2 = "%s-%d" % (best_txid, best_vout)
c = u.get('confirmations')
if best_conf == c:
better = t1 < t2
else:
better = c < best_conf
if better:
best_txid = u.get('txid')
best_vout = u.get('vout')
best_conf = u.get('confirmations')
if best_vout is None:
sys.exit(1)
print(best_txid)
print(best_vout)
| 31.69697 | 185 | 0.650733 |
7942e2232397c5460f0fc50247746cfaec82a9a3 | 3,668 | py | Python | Bayes Guards SMS/Vectorize/bayes.py | jetbrains-academy/Machine-Learning-101 | 7b583dbff1e90115296dcaeac78ca88363c158c9 | [
"MIT"
] | null | null | null | Bayes Guards SMS/Vectorize/bayes.py | jetbrains-academy/Machine-Learning-101 | 7b583dbff1e90115296dcaeac78ca88363c158c9 | [
"MIT"
] | 10 | 2021-11-22T16:51:52.000Z | 2022-02-14T12:57:57.000Z | Bayes Guards SMS/Vectorize/bayes.py | jetbrains-academy/Machine-Learning-101 | 7b583dbff1e90115296dcaeac78ca88363c158c9 | [
"MIT"
] | null | null | null | import numpy as np
from vectorize import *
class NaiveBayes:
# a predefined method needed for 'Laplace Smoothing' that initializes the
# smoothing alpha parameter, by default it's 1
def __init__(self, alpha=1):
self.alpha = alpha
def fit(self, X, y):
# This allows us to get the uniques classes from the array of all class labels
self.unique_classes = np.unique(y)
# get the unique dictionary and input data representation array using vectorize()
self.dictionary, X = vectorize(X)
self.dict_size = len(self.dictionary)
# create three arrays of required dimensions to store class prior probabilities, total number
# of words in each class and relative word frequencies for each class, respectively:
self.classes_prior = np.zeros(len(self.unique_classes), dtype=np.float64)
self.classes_words_count = np.zeros(len(self.unique_classes), dtype=np.float64)
self.likelihood = np.full((len(self.unique_classes), self.dict_size + 1), 0, dtype=np.float64)
for i, clazz in enumerate(self.unique_classes):
# create a mask to filter data based on which class in processed right now
y_i_mask = # TODO
# count the sum of bools in the mask array to get the number of class occurrences
# in the whole training set
y_i_sum = np.sum(y_i_mask)
# class prior probability is its fraction in the training set
self.classes_prior[i] = # TODO
# count the number of words in each class
self.classes_words_count[i] = # TODO
# calculate the likelihood nominator by counting how many times each unique word
# is encountered in all messages of this class
self.likelihood[i, :-1] += # TODO
# get the denominator for this class and finalize the calculation of the
# likelihood of this word for this class
denominator = # TODO
self.likelihood[i] = self.likelihood[i] / denominator
# the following methods are needed for the task 'Predict'
def predict(self, X):
pass
# result = []
# # transform each message within the input array into a vector of words
# X = split_by_words(X)
#
# # in each message find the unique words and create an array of zeros of the appropriate size
# for message in X:
# unique = np.unique(message)
# index_array = # TODO
#
# # look for each unique word in the dictionary, and add its index to the array we just created,
# # if its not there - add the index equal to the length of the dictionary
# for i, word in enumerate(unique):
# word_index = # TODO
# index_array[i] = word_index
#
# # slice the likelihood array to leave only the words for words that are in the current message
# # and apply logarithm to calculate log likelihood
# log_likelihood = # TODO
# # refer to the formula and hints in the text to calculate the posterior probability
# # for each class and select the class with the largest probability, append it to the result
# posterior = # TODO
# predicted = # TODO
# result.append(predicted)
# return result
# This method should run the algorithm on the test set, compare the obtained classification
# results with the real class labels, and return the proportion of correctly classified objects.
def score(self, X, y):
pass
# return # TODO | 47.636364 | 108 | 0.635769 |
7942e3245fd27e8b3fce161784e84a5dc79b89ce | 38 | py | Python | trainer/test_train2.py | transcendentsky/mixup | 41886ffa45412932f4ec5f7653df6c410a1365cc | [
"BSD-3-Clause"
] | 2 | 2018-06-18T12:09:03.000Z | 2019-05-20T14:04:06.000Z | trainer/test_train2.py | transcendentsky/mixup | 41886ffa45412932f4ec5f7653df6c410a1365cc | [
"BSD-3-Clause"
] | null | null | null | trainer/test_train2.py | transcendentsky/mixup | 41886ffa45412932f4ec5f7653df6c410a1365cc | [
"BSD-3-Clause"
] | 1 | 2019-03-26T01:49:11.000Z | 2019-03-26T01:49:11.000Z | import os
print(os.path.abspath('.')) | 19 | 27 | 0.684211 |
7942e3aa6c544f1d2752a81897daef8b68fdaafb | 4,596 | py | Python | piece.py | jschopmeyer13/Stratego_Ai | ec9916df0e4459351694b43858c07903da5e8068 | [
"MIT"
] | 1 | 2020-09-11T00:24:21.000Z | 2020-09-11T00:24:21.000Z | piece.py | jschopmeyer13/Stratego_Ai | ec9916df0e4459351694b43858c07903da5e8068 | [
"MIT"
] | null | null | null | piece.py | jschopmeyer13/Stratego_Ai | ec9916df0e4459351694b43858c07903da5e8068 | [
"MIT"
] | null | null | null | from board import *
FLAG = 11
BOMB = 12
LAKE = -1
class Piece:
def __init__(self, x, y, rank, team, count, knownStat = []):
self.X = x
self.Y = y
self.rank = rank # switche id to rank for better naming
self.count = count
self.team = team
self.isKnown = knownStat
def print(self, show = True):
if show:
output = "Piece: (" + str(self.X) +"," + str(self.Y) + "), rank: " + str(self.rank) + ", team:" + str(self.team) +", count:" + str(self.count)
print(output)
else:
output = "Piece(" + str(self.X) +"," + str(self.Y) + ", " + str(self.rank) + ", " + str(self.team) + ", " + str(self.count) + ")"
return output
def inBoard(self, pos):
if pos >= 0 and pos <=9:
return True
return False
def isvalidPiece(self, piece, team):
if piece == 0 or piece == -1 or piece.rank == BOMB or piece.rank == FLAG:
# print("invalid piece selection")
return False
elif piece.rank == 11:
return False
if not (piece.inBoard(piece.X) and piece.inBoard(piece.Y)):
return False
if piece.team != team:
return False
return True
def isValidMove(self, piece, team):
if piece == 0:
return True
if piece == LAKE:
return False
if piece.team == team:
return False
else:
# Attacks
return True
# gets all the valid moves for the piece
def getValidMoves(self,board, formatted = False):
if board.board[self.X][self.Y]==0 or board.board[self.X][self.Y] ==11 or board.board[self.X][self.Y]==12 or board.board[self.X][self.Y] == FLAG:
if formatted:
return [0,0,0,0]
else:
return []
elif self.rank != 9:
out = []
form = []
neibs = [(self.X + 1, self.Y), (self.X - 1, self.Y),
(self.X, self.Y + 1), (self.X, self.Y-1)]
for i in neibs:
if self.inBoard(i[0]) and self.inBoard(i[1]):
if self.isValidMove(board.board[i[0]][i[1]],board.board[self.X][self.Y].team):
out.append((self.X, self.Y, i[0], i[1]))
form.append((self.X, self.Y, i[0], i[1]))
else:
form.append(0) # placeholder value for direction used for ManualPlayer
else:
form.append(0)
if formatted:
return form
else:
return out
else:
# Scout handling
# order [Down, Up, Right, Left]
directions = [(1,0),(-1,0),(0,1),(0,-1)]
out = []
for d in directions:
val = self.scoutHandle(board,d[0],d[1],formatted)
if formatted:
out.append(val)
elif val != None:
out.append(val)
return out
# Returns the valid moves for a scout
def scoutHandle(self, board, xMove, yMove, formatted=False):
team = board.board[self.X][self.Y].team
x = self.X
y = self.Y
while True:
if self.inBoard(x+xMove) and self.inBoard(y+yMove):
if board.board[x+xMove][y+yMove] == 0:
x+=xMove
y+=yMove
else:
if board.board[x+xMove][y+yMove] != LAKE and board.board[x+xMove][y+yMove].team != team:
x+=xMove
y+=yMove
break
else:
# out of bounds
break
if not (x == self.X and y == self.Y):
return (self.X, self.Y, x, y)
elif formatted:
# used for formatting for manual player
return 0
| 35.90625 | 159 | 0.400783 |
7942e42b60ff8089bc3f7dce1c8f318ffee802d4 | 970 | py | Python | year2020/day18/test_solver.py | Sebaestschjin/advent-of-code | 5fd708efa355483fc0ccddf7548b62682662bcc8 | [
"MIT"
] | null | null | null | year2020/day18/test_solver.py | Sebaestschjin/advent-of-code | 5fd708efa355483fc0ccddf7548b62682662bcc8 | [
"MIT"
] | null | null | null | year2020/day18/test_solver.py | Sebaestschjin/advent-of-code | 5fd708efa355483fc0ccddf7548b62682662bcc8 | [
"MIT"
] | null | null | null | import pytest
from assertpy import assert_that
import year2020.day18.reader as reader
import year2020.day18.solver as solver
def test_example_a():
expressions = ['1 + (2 * 3) + (4 * (5 + 6))', '2 * 3 + (4 * 5)', '5 + (8 * 3 + 9 + 3 * 4 * 3)']
result = solver.solve_a(reader.read_lines(expressions, operators={'+': 0, '*': 0}))
assert_that(result).is_equal_to(514)
@pytest.mark.solution
def test_solution_a():
result = solver.solve_a(reader.read(operators={'+': 0, '*': 0}))
assert_that(result).is_equal_to(98621258158412)
def test_example_b():
expressions = ['1 + (2 * 3) + (4 * (5 + 6))', '2 * 3 + (4 * 5)', '5 + (8 * 3 + 9 + 3 * 4 * 3)']
result = solver.solve_b(reader.read_lines(expressions, operators={'+': 1, '*': 0}))
assert_that(result).is_equal_to(1542)
@pytest.mark.solution
def test_solution_b():
result = solver.solve_b(reader.read(operators={'+': 1, '*': 0}))
assert_that(result).is_equal_to(241216538527890)
| 32.333333 | 99 | 0.629897 |
7942e4630f9b569c4428ece897b75ab42033a934 | 40,162 | py | Python | tempita/__init__.py | RomanSharapov/tempita | 0823acafebcc1815e01337ad56bca43148b1d6d5 | [
"MIT"
] | null | null | null | tempita/__init__.py | RomanSharapov/tempita | 0823acafebcc1815e01337ad56bca43148b1d6d5 | [
"MIT"
] | null | null | null | tempita/__init__.py | RomanSharapov/tempita | 0823acafebcc1815e01337ad56bca43148b1d6d5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
A small templating language
This implements a small templating language. This language implements
if/elif/else, for/continue/break, expressions, and blocks of Python
code. The syntax is::
{{any expression (function calls etc)}}
{{any expression | filter}}
{{for x in y}}...{{endfor}}
{{if x}}x{{elif y}}y{{else}}z{{endif}}
{{py:x=1}}
{{py:
def foo(bar):
return 'baz'
}}
{{default var = default_value}}
{{# comment}}
You use this with the ``Template`` class or the ``sub`` shortcut.
The ``Template`` class takes the template string and the name of
the template (for errors) and a default namespace. Then (like
``string.Template``) you can call the ``tmpl.substitute(**kw)``
method to make a substitution (or ``tmpl.substitute(a_dict)``).
``sub(content, **kw)`` substitutes the template immediately. You
can use ``__name='tmpl.html'`` to set the name of the template.
If there are syntax errors ``TemplateError`` will be raised.
"""
import cgi
import os
import re
import sys
import tokenize
if sys.version_info[0] == 2:
from cStringIO import StringIO
from urllib import quote as url_quote
else:
from io import StringIO
from urllib.parse import quote as url_quote
from tempita._looper import looper
from tempita.compat3 import bytes, basestring_, next, is_unicode, coerce_text
__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate',
'sub_html', 'html', 'bunch']
in_re = re.compile(r'\s+in\s+')
var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
class TemplateError(Exception):
"""Exception raised while parsing a template
"""
def __init__(self, message, position, name=None):
Exception.__init__(self, message)
self.position = position
self.name = name
def __str__(self):
msg = ' '.join(self.args)
if self.position:
msg = '%s at line %s column %s' % (
msg, self.position[0], self.position[1])
if self.name:
msg += ' in %s' % self.name
return msg
class _TemplateContinue(Exception):
pass
class _TemplateBreak(Exception):
pass
def get_file_template(name, from_template):
path = os.path.join(os.path.dirname(from_template.name), name)
return from_template.__class__.from_filename(
path, namespace=from_template.namespace,
get_template=from_template.get_template)
class Template(object):
default_namespace = {
'start_braces': '{{',
'end_braces': '}}',
'looper': looper,
}
default_encoding = 'utf8'
default_inherit = None
def __init__(self, content, name=None, namespace=None, stacklevel=None,
get_template=None, default_inherit=None, line_offset=0,
delimiters=None):
self.content = content
# set delimiters
if delimiters is None:
delimiters = (self.default_namespace['start_braces'],
self.default_namespace['end_braces'])
else:
assert len(delimiters) == 2 and all([isinstance(delimeter, basestring)
for delimeter in delimiters])
self.default_namespace = self.__class__.default_namespace.copy()
self.default_namespace['start_braces'] = delimiters[0]
self.default_namespace['end_braces'] = delimiters[1]
self.delimiters = delimiters
self._unicode = is_unicode(content)
if name is None and stacklevel is not None:
try:
caller = sys._getframe(stacklevel)
except ValueError:
pass
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__file__' in globals:
name = globals['__file__']
if name.endswith('.pyc') or name.endswith('.pyo'):
name = name[:-1]
elif '__name__' in globals:
name = globals['__name__']
else:
name = '<string>'
if lineno:
name += ':%s' % lineno
self.name = name
self._parsed = parse(content, name=name, line_offset=line_offset, delimiters=self.delimiters)
if namespace is None:
namespace = {}
self.namespace = namespace
self.get_template = get_template
if default_inherit is not None:
self.default_inherit = default_inherit
def from_filename(cls, filename, namespace=None, encoding=None,
default_inherit=None, get_template=get_file_template):
f = open(filename, 'rb')
c = f.read()
f.close()
if encoding:
c = c.decode(encoding)
return cls(content=c, name=filename, namespace=namespace,
default_inherit=default_inherit, get_template=get_template)
from_filename = classmethod(from_filename)
def __repr__(self):
return '<%s %s name=%r>' % (
self.__class__.__name__,
hex(id(self))[2:], self.name)
def substitute(self, *args, **kw):
if args:
if kw:
raise TypeError(
"You can only give positional *or* keyword arguments")
if len(args) > 1:
raise TypeError(
"You can only give one positional argument")
if not hasattr(args[0], 'items'):
raise TypeError(
("If you pass in a single argument, you must pass in a ",
"dict-like object (with a .items() method); you gave %r")
% (args[0],))
kw = args[0]
ns = kw
ns['__template_name__'] = self.name
if self.namespace:
ns.update(self.namespace)
result, defs, inherit = self._interpret(ns)
if not inherit:
inherit = self.default_inherit
if inherit:
result = self._interpret_inherit(result, defs, inherit, ns)
return result
def _interpret(self, ns):
__traceback_hide__ = True
parts = []
defs = {}
self._interpret_codes(self._parsed, ns, out=parts, defs=defs)
if '__inherit__' in defs:
inherit = defs.pop('__inherit__')
else:
inherit = None
return ''.join(parts), defs, inherit
def _interpret_inherit(self, body, defs, inherit_template, ns):
__traceback_hide__ = True
if not self.get_template:
raise TemplateError(
'You cannot use inheritance without passing in get_template',
position=None, name=self.name)
templ = self.get_template(inherit_template, self)
self_ = TemplateObject(self.name)
for name, value in defs.iteritems():
setattr(self_, name, value)
self_.body = body
ns = ns.copy()
ns['self'] = self_
return templ.substitute(ns)
def _interpret_codes(self, codes, ns, out, defs):
__traceback_hide__ = True
for item in codes:
if isinstance(item, basestring_):
out.append(item)
else:
self._interpret_code(item, ns, out, defs)
def _interpret_code(self, code, ns, out, defs):
__traceback_hide__ = True
name, pos = code[0], code[1]
if name == 'py':
self._exec(code[2], ns, pos)
elif name == 'continue':
raise _TemplateContinue()
elif name == 'break':
raise _TemplateBreak()
elif name == 'for':
vars, expr, content = code[2], code[3], code[4]
expr = self._eval(expr, ns, pos)
self._interpret_for(vars, expr, content, ns, out, defs)
elif name == 'cond':
parts = code[2:]
self._interpret_if(parts, ns, out, defs)
elif name == 'expr':
parts = code[2].split('|')
base = self._eval(parts[0], ns, pos)
for part in parts[1:]:
func = self._eval(part, ns, pos)
base = func(base)
out.append(self._repr(base, pos))
elif name == 'default':
var, expr = code[2], code[3]
if var not in ns:
result = self._eval(expr, ns, pos)
ns[var] = result
elif name == 'inherit':
expr = code[2]
value = self._eval(expr, ns, pos)
defs['__inherit__'] = value
elif name == 'def':
name = code[2]
signature = code[3]
parts = code[4]
ns[name] = defs[name] = TemplateDef(self, name, signature, body=parts, ns=ns,
pos=pos)
elif name == 'comment':
return
else:
assert 0, "Unknown code: %r" % name
def _interpret_for(self, vars, expr, content, ns, out, defs):
__traceback_hide__ = True
for item in expr:
if len(vars) == 1:
ns[vars[0]] = item
else:
if len(vars) != len(item):
raise ValueError(
'Need %i items to unpack (got %i items)'
% (len(vars), len(item)))
for name, value in zip(vars, item):
ns[name] = value
try:
self._interpret_codes(content, ns, out, defs)
except _TemplateContinue:
continue
except _TemplateBreak:
break
def _interpret_if(self, parts, ns, out, defs):
__traceback_hide__ = True
# @@: if/else/else gets through
for part in parts:
assert not isinstance(part, basestring_)
name, pos = part[0], part[1]
if name == 'else':
result = True
else:
result = self._eval(part[2], ns, pos)
if result:
self._interpret_codes(part[3], ns, out, defs)
break
def _eval(self, code, ns, pos):
__traceback_hide__ = True
try:
try:
value = eval(code, self.default_namespace, ns)
except SyntaxError as e:
raise SyntaxError(
'invalid syntax in expression: %s' % code)
return value
except:
exc_info = sys.exc_info()
e = exc_info[1]
if getattr(e, 'args', None):
arg0 = e.args[0]
else:
arg0 = coerce_text(e)
e.args = (self._add_line_info(arg0, pos),)
raise (exc_info[1], e, exc_info[2])
def _exec(self, code, ns, pos):
__traceback_hide__ = True
try:
exec(code, self.default_namespace, ns)
except:
exc_info = sys.exc_info()
e = exc_info[1]
if e.args:
e.args = (self._add_line_info(e.args[0], pos),)
else:
e.args = (self._add_line_info(None, pos),)
raise(exc_info[1], e, exc_info[2])
def _repr(self, value, pos):
__traceback_hide__ = True
try:
if value is None:
return ''
if self._unicode:
try:
value = unicode(value)
except UnicodeDecodeError:
value = bytes(value)
else:
if not isinstance(value, basestring_):
value = coerce_text(value)
if (is_unicode(value)
and self.default_encoding):
value = value.encode(self.default_encoding)
except:
exc_info = sys.exc_info()
e = exc_info[1]
e.args = (self._add_line_info(e.args[0], pos),)
raise(exc_info[1], e, exc_info[2])
else:
if self._unicode and isinstance(value, bytes):
if not self.default_encoding:
raise UnicodeDecodeError(
'Cannot decode bytes value %r into unicode '
'(no default_encoding provided)' % value)
try:
value = value.decode(self.default_encoding)
except UnicodeDecodeError as e:
raise UnicodeDecodeError(
e.encoding,
e.object,
e.start,
e.end,
e.reason + ' in string %r' % value)
elif not self._unicode and is_unicode(value):
if not self.default_encoding:
raise UnicodeEncodeError(
'Cannot encode unicode value %r into bytes '
'(no default_encoding provided)' % value)
value = value.encode(self.default_encoding)
return value
def _add_line_info(self, msg, pos):
msg = "%s at line %s column %s" % (
msg, pos[0], pos[1])
if self.name:
msg += " in file %s" % self.name
return msg
def sub(content, delimiters=None, **kw):
name = kw.get('__name')
tmpl = Template(content, name=name, delimiters=delimiters)
return tmpl.substitute(kw)
def paste_script_template_renderer(content, vars, filename=None):
tmpl = Template(content, name=filename)
return tmpl.substitute(vars)
class bunch(dict):
def __init__(self, **kw):
for name, value in kw.iteritems():
setattr(self, name, value)
def __setattr__(self, name, value):
self[name] = value
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, key):
if 'default' in self:
try:
return dict.__getitem__(self, key)
except KeyError:
return dict.__getitem__(self, 'default')
else:
return dict.__getitem__(self, key)
def __repr__(self):
items = [
(k, v) for k, v in self.iteritems()]
items.sort()
return '<%s %s>' % (
self.__class__.__name__,
' '.join(['%s=%r' % (k, v) for k, v in items]))
############################################################
## HTML Templating
############################################################
class html(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __html__(self):
return self.value
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__, self.value)
def html_quote(value, force=True):
if not force and hasattr(value, '__html__'):
return value.__html__()
if value is None:
return ''
if not isinstance(value, basestring_):
value = coerce_text(value)
if sys.version >= "3" and isinstance(value, bytes):
value = cgi.escape(value.decode('latin1'), 1)
value = value.encode('latin1')
else:
value = cgi.escape(value, 1)
if sys.version < "3":
if is_unicode(value):
value = value.encode('ascii', 'xmlcharrefreplace')
return value
def url(v):
v = coerce_text(v)
if is_unicode(v):
v = v.encode('utf8')
return url_quote(v)
def attr(**kw):
kw = list(kw.iteritems())
kw.sort()
parts = []
for name, value in kw:
if value is None:
continue
if name.endswith('_'):
name = name[:-1]
parts.append('%s="%s"' % (html_quote(name), html_quote(value)))
return html(' '.join(parts))
class HTMLTemplate(Template):
default_namespace = Template.default_namespace.copy()
default_namespace.update(dict(
html=html,
attr=attr,
url=url,
html_quote=html_quote,
))
def _repr(self, value, pos):
if hasattr(value, '__html__'):
value = value.__html__()
quote = False
else:
quote = True
plain = Template._repr(self, value, pos)
if quote:
return html_quote(plain)
else:
return plain
def sub_html(content, **kw):
name = kw.get('__name')
tmpl = HTMLTemplate(content, name=name)
return tmpl.substitute(kw)
class TemplateDef(object):
def __init__(self, template, func_name, func_signature,
body, ns, pos, bound_self=None):
self._template = template
self._func_name = func_name
self._func_signature = func_signature
self._body = body
self._ns = ns
self._pos = pos
self._bound_self = bound_self
def __repr__(self):
return '<tempita function %s(%s) at %s:%s>' % (
self._func_name, self._func_signature,
self._template.name, self._pos)
def __str__(self):
return self()
def __call__(self, *args, **kw):
values = self._parse_signature(args, kw)
ns = self._ns.copy()
ns.update(values)
if self._bound_self is not None:
ns['self'] = self._bound_self
out = []
subdefs = {}
self._template._interpret_codes(self._body, ns, out, subdefs)
return ''.join(out)
def __get__(self, obj, type=None):
if obj is None:
return self
return self.__class__(
self._template, self._func_name, self._func_signature,
self._body, self._ns, self._pos, bound_self=obj)
def _parse_signature(self, args, kw):
values = {}
sig_args, var_args, var_kw, defaults = self._func_signature
extra_kw = {}
for name, value in kw.iteritems():
if not var_kw and name not in sig_args:
raise TypeError(
'Unexpected argument %s' % name)
if name in sig_args:
values[sig_args] = value
else:
extra_kw[name] = value
args = list(args)
sig_args = list(sig_args)
while args:
while sig_args and sig_args[0] in values:
sig_args.pop(0)
if sig_args:
name = sig_args.pop(0)
values[name] = args.pop(0)
elif var_args:
values[var_args] = tuple(args)
break
else:
raise TypeError(
'Extra position arguments: %s'
% ', '.join(repr(v) for v in args))
for name, value_expr in defaults.iteritems():
if name not in values:
values[name] = self._template._eval(
value_expr, self._ns, self._pos)
for name in sig_args:
if name not in values:
raise TypeError(
'Missing argument: %s' % name)
if var_kw:
values[var_kw] = extra_kw
return values
class TemplateObject(object):
def __init__(self, name):
self.__name = name
self.get = TemplateObjectGetter(self)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.__name)
class TemplateObjectGetter(object):
def __init__(self, template_obj):
self.__template_obj = template_obj
def __getattr__(self, attr):
return getattr(self.__template_obj, attr, Empty)
def __repr__(self):
return '<%s around %r>' % (self.__class__.__name__, self.__template_obj)
class _Empty(object):
def __call__(self, *args, **kw):
return self
def __str__(self):
return ''
def __repr__(self):
return 'Empty'
def __unicode__(self):
return u''
def __iter__(self):
return iter(())
def __bool__(self):
return False
if sys.version < "3":
__nonzero__ = __bool__
Empty = _Empty()
del _Empty
############################################################
## Lexing and Parsing
############################################################
def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None):
"""
Lex a string into chunks:
>>> lex('hey')
['hey']
>>> lex('hey {{you}}')
['hey ', ('you', (1, 7))]
>>> lex('hey {{')
Traceback (most recent call last):
...
TemplateError: No }} to finish last expression at line 1 column 7
>>> lex('hey }}')
Traceback (most recent call last):
...
TemplateError: }} outside expression at line 1 column 7
>>> lex('hey {{ {{')
Traceback (most recent call last):
...
TemplateError: {{ inside expression at line 1 column 10
"""
if delimiters is None:
delimiters = ( Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'] )
in_expr = False
chunks = []
last = 0
last_pos = (line_offset + 1, 1)
token_re = re.compile(r'%s|%s' % (re.escape(delimiters[0]),
re.escape(delimiters[1])))
for match in token_re.finditer(s):
expr = match.group(0)
pos = find_position(s, match.end(), line_offset, last_pos)
if expr == delimiters[0] and in_expr:
raise TemplateError('%s inside expression' % delimiters[0],
position=pos,
name=name)
elif expr == delimiters[1] and not in_expr:
raise TemplateError('%s outside expression' % delimiters[1],
position=pos,
name=name)
if expr == delimiters[0]:
part = s[last:match.start()]
if part:
chunks.append(part)
in_expr = True
else:
chunks.append((s[last:match.start()], last_pos))
in_expr = False
last = match.end()
last_pos = pos
if in_expr:
raise TemplateError('No %s to finish last expression' % delimiters[1],
name=name, position=last_pos)
part = s[last:]
if part:
chunks.append(part)
if trim_whitespace:
chunks = trim_lex(chunks)
return chunks
statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)')
single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break']
trail_whitespace_re = re.compile(r'\n\r?[\t ]*$')
lead_whitespace_re = re.compile(r'^[\t ]*\n')
def trim_lex(tokens):
r"""
Takes a lexed set of tokens, and removes whitespace when there is
a directive on a line by itself:
>>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
>>> tokens
[('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
>>> trim_lex(tokens)
[('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
"""
last_trim = None
for i in range(len(tokens)):
current = tokens[i]
if isinstance(tokens[i], basestring_):
# we don't trim this
continue
item = current[0]
if not statement_re.search(item) and item not in single_statements:
continue
if not i:
prev = ''
else:
prev = tokens[i - 1]
if i + 1 >= len(tokens):
next_chunk = ''
else:
next_chunk = tokens[i + 1]
if (not isinstance(next_chunk, basestring_)
or not isinstance(prev, basestring_)):
continue
prev_ok = not prev or trail_whitespace_re.search(prev)
if i == 1 and not prev.strip():
prev_ok = True
if last_trim is not None and last_trim + 2 == i and not prev.strip():
prev_ok = 'last'
if (prev_ok
and (not next_chunk or lead_whitespace_re.search(next_chunk)
or (i == len(tokens) - 2 and not next_chunk.strip()))):
if prev:
if ((i == 1 and not prev.strip())
or prev_ok == 'last'):
tokens[i - 1] = ''
else:
m = trail_whitespace_re.search(prev)
# +1 to leave the leading \n on:
prev = prev[:m.start() + 1]
tokens[i - 1] = prev
if next_chunk:
last_trim = i
if i == len(tokens) - 2 and not next_chunk.strip():
tokens[i + 1] = ''
else:
m = lead_whitespace_re.search(next_chunk)
next_chunk = next_chunk[m.end():]
tokens[i + 1] = next_chunk
return tokens
def find_position(string, index, last_index, last_pos=(1, 1)):
"""
Given a string and index, return (line, column)
"""
lines = string.count('\n', last_index, index)
if lines > 0:
column = index - string.rfind('\n', last_index, index)
else:
column = last_pos[1] + (index - last_index)
return (last_pos[0] + lines, column)
def parse(s, name=None, line_offset=0, delimiters=None):
r"""
Parses a string into a kind of AST
>>> parse('{{x}}')
[('expr', (1, 3), 'x')]
>>> parse('foo')
['foo']
>>> parse('{{if x}}test{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
>>> parse('series->{{for x in y}}x={{x}}{{endfor}}')
['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
>>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
[('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
>>> parse('{{py:x=1}}')
[('py', (1, 3), 'x=1')]
>>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
Some exceptions::
>>> parse('{{continue}}')
Traceback (most recent call last):
...
TemplateError: continue outside of for loop at line 1 column 3
>>> parse('{{if x}}foo')
Traceback (most recent call last):
...
TemplateError: No {{endif}} at line 1 column 3
>>> parse('{{else}}')
Traceback (most recent call last):
...
TemplateError: else outside of an if block at line 1 column 3
>>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Unexpected endif at line 1 column 25
>>> parse('{{if}}{{endif}}')
Traceback (most recent call last):
...
TemplateError: if with no expression at line 1 column 3
>>> parse('{{for x y}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
>>> parse('{{py:x=1\ny=2}}')
Traceback (most recent call last):
...
TemplateError: Multi-line py blocks must start with a newline at line 1 column 3
"""
if delimiters is None:
delimiters = ( Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'] )
tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters)
result = []
while tokens:
next_chunk, tokens = parse_expr(tokens, name)
result.append(next_chunk)
return result
def parse_expr(tokens, name, context=()):
if isinstance(tokens[0], basestring_):
return tokens[0], tokens[1:]
expr, pos = tokens[0]
expr = expr.strip()
if expr.startswith('py:'):
expr = expr[3:].lstrip(' \t')
if expr.startswith('\n') or expr.startswith('\r'):
expr = expr.lstrip('\r\n')
if '\r' in expr:
expr = expr.replace('\r\n', '\n')
expr = expr.replace('\r', '')
expr += '\n'
else:
if '\n' in expr:
raise TemplateError(
'Multi-line py blocks must start with a newline',
position=pos, name=name)
return ('py', pos, expr), tokens[1:]
elif expr in ('continue', 'break'):
if 'for' not in context:
raise TemplateError(
'continue outside of for loop',
position=pos, name=name)
return (expr, pos), tokens[1:]
elif expr.startswith('if '):
return parse_cond(tokens, name, context)
elif (expr.startswith('elif ')
or expr == 'else'):
raise TemplateError(
'%s outside of an if block' % expr.split()[0],
position=pos, name=name)
elif expr in ('if', 'elif', 'for'):
raise TemplateError(
'%s with no expression' % expr,
position=pos, name=name)
elif expr in ('endif', 'endfor', 'enddef'):
raise TemplateError(
'Unexpected %s' % expr,
position=pos, name=name)
elif expr.startswith('for '):
return parse_for(tokens, name, context)
elif expr.startswith('default '):
return parse_default(tokens, name, context)
elif expr.startswith('inherit '):
return parse_inherit(tokens, name, context)
elif expr.startswith('def '):
return parse_def(tokens, name, context)
elif expr.startswith('#'):
return ('comment', pos, tokens[0][0]), tokens[1:]
return ('expr', pos, tokens[0][0]), tokens[1:]
def parse_cond(tokens, name, context):
start = tokens[0][1]
pieces = []
context = context + ('if',)
while 1:
if not tokens:
raise TemplateError(
'Missing {{endif}}',
position=start, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'endif'):
return ('cond', start) + tuple(pieces), tokens[1:]
next_chunk, tokens = parse_one_cond(tokens, name, context)
pieces.append(next_chunk)
def parse_one_cond(tokens, name, context):
(first, pos), tokens = tokens[0], tokens[1:]
content = []
if first.endswith(':'):
first = first[:-1]
if first.startswith('if '):
part = ('if', pos, first[3:].lstrip(), content)
elif first.startswith('elif '):
part = ('elif', pos, first[5:].lstrip(), content)
elif first == 'else':
part = ('else', pos, None, content)
else:
assert 0, "Unexpected token %r at %s" % (first, pos)
while 1:
if not tokens:
raise TemplateError(
'No {{endif}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple)
and (tokens[0][0] == 'endif'
or tokens[0][0].startswith('elif ')
or tokens[0][0] == 'else')):
return part, tokens
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_for(tokens, name, context):
first, pos = tokens[0]
tokens = tokens[1:]
context = ('for',) + context
content = []
assert first.startswith('for ')
if first.endswith(':'):
first = first[:-1]
first = first[3:].strip()
match = in_re.search(first)
if not match:
raise TemplateError(
'Bad for (no "in") in %r' % first,
position=pos, name=name)
vars = first[:match.start()]
if '(' in vars:
raise TemplateError(
'You cannot have () in the variable section of a for loop (%r)'
% vars, position=pos, name=name)
vars = tuple([
v.strip() for v in first[:match.start()].split(',')
if v.strip()])
expr = first[match.end():]
while 1:
if not tokens:
raise TemplateError(
'No {{endfor}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'endfor'):
return ('for', pos, vars, expr, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_default(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('default ')
first = first.split(None, 1)[1]
parts = first.split('=', 1)
if len(parts) == 1:
raise TemplateError(
"Expression must be {{default var=value}}; no = found in %r" % first,
position=pos, name=name)
var = parts[0].strip()
if ',' in var:
raise TemplateError(
"{{default x, y = ...}} is not supported",
position=pos, name=name)
if not var_re.search(var):
raise TemplateError(
"Not a valid variable name for {{default}}: %r"
% var, position=pos, name=name)
expr = parts[1].strip()
return ('default', pos, var, expr), tokens[1:]
def parse_inherit(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('inherit ')
expr = first.split(None, 1)[1]
return ('inherit', pos, expr), tokens[1:]
def parse_def(tokens, name, context):
first, start = tokens[0]
tokens = tokens[1:]
assert first.startswith('def ')
first = first.split(None, 1)[1]
if first.endswith(':'):
first = first[:-1]
if '(' not in first:
func_name = first
sig = ((), None, None, {})
elif not first.endswith(')'):
raise TemplateError("Function definition doesn't end with ): %s" % first,
position=start, name=name)
else:
first = first[:-1]
func_name, sig_text = first.split('(', 1)
sig = parse_signature(sig_text, name, start)
context = context + ('def',)
content = []
while 1:
if not tokens:
raise TemplateError(
'Missing {{enddef}}',
position=start, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'enddef'):
return ('def', start, func_name, sig, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_signature(sig_text, name, pos):
tokens = tokenize.generate_tokens(StringIO(sig_text).readline)
sig_args = []
var_arg = None
var_kw = None
defaults = {}
def get_token(pos=False):
try:
tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens)
except StopIteration:
return tokenize.ENDMARKER, ''
if pos:
return tok_type, tok_string, (srow, scol), (erow, ecol)
else:
return tok_type, tok_string
while 1:
var_arg_type = None
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER:
break
if tok_type == tokenize.OP and (tok_string == '*' or tok_string == '**'):
var_arg_type = tok_string
tok_type, tok_string = get_token()
if tok_type != tokenize.NAME:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
var_name = tok_string
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','):
if var_arg_type == '*':
var_arg = var_name
elif var_arg_type == '**':
var_kw = var_name
else:
sig_args.append(var_name)
if tok_type == tokenize.ENDMARKER:
break
continue
if var_arg_type is not None:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if tok_type == tokenize.OP and tok_string == '=':
nest_type = None
unnest_type = None
nest_count = 0
start_pos = end_pos = None
parts = []
while 1:
tok_type, tok_string, s, e = get_token(True)
if start_pos is None:
start_pos = s
end_pos = e
if tok_type == tokenize.ENDMARKER and nest_count:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if (not nest_count and
(tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','))):
default_expr = isolate_expression(sig_text, start_pos, end_pos)
defaults[var_name] = default_expr
sig_args.append(var_name)
break
parts.append((tok_type, tok_string))
if nest_count and tok_type == tokenize.OP and tok_string == nest_type:
nest_count += 1
elif nest_count and tok_type == tokenize.OP and tok_string == unnest_type:
nest_count -= 1
if not nest_count:
nest_type = unnest_type = None
elif not nest_count and tok_type == tokenize.OP and tok_string in ('(', '[', '{'):
nest_type = tok_string
nest_count = 1
unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type]
return sig_args, var_arg, var_kw, defaults
def isolate_expression(string, start_pos, end_pos):
srow, scol = start_pos
srow -= 1
erow, ecol = end_pos
erow -= 1
lines = string.splitlines(True)
if srow == erow:
return lines[srow][scol:ecol]
parts = [lines[srow][scol:]]
parts.extend(lines[srow+1:erow])
if erow < len(lines):
# It'll sometimes give (end_row_past_finish, 0)
parts.append(lines[erow][:ecol])
return ''.join(parts)
_fill_command_usage = """\
%prog [OPTIONS] TEMPLATE arg=value
Use py:arg=value to set a Python value; otherwise all values are
strings.
"""
def fill_command(args=None):
import sys
import optparse
import os
if args is None:
args = sys.argv[1:]
kwargs = dict(usage=_fill_command_usage)
try:
import pkg_resources
dist = pkg_resources.get_distribution('tempita')
kwargs['version'] = coerce_text(dist)
except ImportError:
# pkg_resources not available
pass
parser = optparse.OptionParser(**kwargs)
parser.add_option(
'-o', '--output',
dest='output',
metavar="FILENAME",
help="File to write output to (default stdout)")
parser.add_option(
'--html',
dest='use_html',
action='store_true',
help="Use HTML style filling (including automatic HTML quoting)")
parser.add_option(
'--env',
dest='use_env',
action='store_true',
help="Put the environment in as top-level variables")
options, args = parser.parse_args(args)
if len(args) < 1:
print('You must give a template filename')
sys.exit(2)
template_name = args[0]
args = args[1:]
vars = {}
if options.use_env:
vars.update(os.environ)
for value in args:
if '=' not in value:
print('Bad argument: %r' % value)
sys.exit(2)
name, value = value.split('=', 1)
if name.startswith('py:'):
name = name[:3]
value = eval(value)
vars[name] = value
if template_name == '-':
template_content = sys.stdin.read()
template_name = '<stdin>'
else:
f = open(template_name, 'rb')
template_content = f.read()
f.close()
if options.use_html:
TemplateClass = HTMLTemplate
else:
TemplateClass = Template
template = TemplateClass(template_content, name=template_name)
result = template.substitute(vars)
if options.output:
f = open(options.output, 'wb')
f.write(result)
f.close()
else:
sys.stdout.write(result)
if __name__ == '__main__':
fill_command()
| 33.357143 | 117 | 0.531722 |
7942e5f89672596006d2e95cf089c6f9797938d2 | 137,835 | py | Python | hail/python/hail/expr/expressions/typed_expressions.py | FINNGEN/hail | 03fabf5dad71415aeca641ef1618e5352639d683 | [
"MIT"
] | 789 | 2016-09-05T04:14:25.000Z | 2022-03-30T09:51:54.000Z | hail/python/hail/expr/expressions/typed_expressions.py | FINNGEN/hail | 03fabf5dad71415aeca641ef1618e5352639d683 | [
"MIT"
] | 5,724 | 2016-08-29T18:58:40.000Z | 2022-03-31T23:49:42.000Z | hail/python/hail/expr/expressions/typed_expressions.py | FINNGEN/hail | 03fabf5dad71415aeca641ef1618e5352639d683 | [
"MIT"
] | 233 | 2016-08-31T20:42:38.000Z | 2022-02-17T16:42:39.000Z | from typing import Mapping, Dict, Sequence
from deprecated import deprecated
import hail as hl
from .indices import Indices, Aggregation
from .base_expression import Expression, ExpressionException, to_expr, \
unify_all, unify_types
from .expression_typecheck import coercer_from_dtype, \
expr_any, expr_array, expr_set, expr_bool, expr_numeric, expr_int32, \
expr_int64, expr_str, expr_dict, expr_interval, expr_tuple, expr_oneof, \
expr_ndarray
from hail.expr.types import HailType, tint32, tint64, tfloat32, \
tfloat64, tbool, tcall, tset, tarray, tstruct, tdict, ttuple, tstr, \
tndarray, tlocus, tinterval, is_numeric
import hail.ir as ir
from hail.typecheck import typecheck, typecheck_method, func_spec, oneof, \
identity, nullable, tupleof, sliceof, dictof, anyfunc
from hail.utils.java import Env, warning
from hail.utils.linkedlist import LinkedList
from hail.utils.misc import wrap_to_list, wrap_to_tuple, get_nice_field_error, get_nice_attr_error
import numpy as np
class CollectionExpression(Expression):
"""Expression of type :class:`.tarray` or :class:`.tset`
>>> a = hl.literal([1, 2, 3, 4, 5])
>>> s3 = hl.literal({'Alice', 'Bob', 'Charlie'})
"""
def _filter_missing_method(self, filter_missing: bool, name: str, ret_type: HailType, *args):
collection = self
if filter_missing:
collection = self.filter(hl.is_defined)
return collection._method(name, ret_type, *args)
@typecheck_method(f=func_spec(1, expr_bool))
def any(self, f):
"""Returns ``True`` if `f` returns ``True`` for any element.
Examples
--------
>>> hl.eval(a.any(lambda x: x % 2 == 0))
True
>>> hl.eval(s3.any(lambda x: x[0] == 'D'))
False
Notes
-----
This method always returns ``False`` for empty collections.
Parameters
----------
f : function ( (arg) -> :class:`.BooleanExpression`)
Function to evaluate for each element of the collection. Must return a
:class:`.BooleanExpression`.
Returns
-------
:class:`.BooleanExpression`.
``True`` if `f` returns ``True`` for any element, ``False`` otherwise.
"""
return hl.array(self).fold(lambda accum, elt: accum | f(elt), False)
@typecheck_method(f=func_spec(1, expr_bool))
def filter(self, f):
"""Returns a new collection containing elements where `f` returns ``True``.
Examples
--------
>>> hl.eval(a.filter(lambda x: x % 2 == 0))
[2, 4]
>>> hl.eval(s3.filter(lambda x: ~(x[-1] == 'e'))) # doctest: +SKIP_OUTPUT_CHECK
frozenset({'Bob'})
Notes
-----
Returns a same-type expression; evaluated on a :class:`.SetExpression`, returns a
:class:`.SetExpression`. Evaluated on an :class:`.ArrayExpression`,
returns an :class:`.ArrayExpression`.
Parameters
----------
f : function ( (arg) -> :class:`.BooleanExpression`)
Function to evaluate for each element of the collection. Must return a
:class:`.BooleanExpression`.
Returns
-------
:class:`.CollectionExpression`
Expression of the same type as the callee.
"""
# FIXME: enable doctest
def unify_ret(t):
if t != tbool:
raise TypeError("'filter' expects 'f' to return an expression of type 'bool', found '{}'".format(t))
return hl.tarray(self._type.element_type)
def transform_ir(array, name, body):
return ir.ToArray(ir.StreamFilter(ir.ToStream(array), name, body))
array_filter = hl.array(self)._ir_lambda_method(transform_ir, f, self.dtype.element_type, unify_ret)
if isinstance(self.dtype, tset):
return hl.set(array_filter)
else:
assert isinstance(self.dtype, tarray), self.dtype
return array_filter
@typecheck_method(f=func_spec(1, expr_bool))
def find(self, f):
"""Returns the first element where `f` returns ``True``.
Examples
--------
>>> hl.eval(a.find(lambda x: x ** 2 > 20))
5
>>> hl.eval(s3.find(lambda x: x[0] == 'D'))
None
Notes
-----
If `f` returns ``False`` for every element, then the result is missing.
Parameters
----------
f : function ( (arg) -> :class:`.BooleanExpression`)
Function to evaluate for each element of the collection. Must return a
:class:`.BooleanExpression`.
Returns
-------
:class:`.Expression`
Expression whose type is the element type of the collection.
"""
# FIXME this should short-circuit
return self.fold(lambda accum, x:
hl.if_else(hl.is_missing(accum) & f(x), x, accum),
hl.missing(self._type.element_type))
@typecheck_method(f=func_spec(1, expr_any))
def flatmap(self, f):
"""Map each element of the collection to a new collection, and flatten the results.
Examples
--------
>>> hl.eval(a.flatmap(lambda x: hl.range(0, x)))
[0, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 3, 4]
>>> hl.eval(s3.flatmap(lambda x: hl.set(hl.range(0, x.length()).map(lambda i: x[i])))) # doctest: +SKIP_OUTPUT_CHECK
{'A', 'B', 'C', 'a', 'b', 'c', 'e', 'h', 'i', 'l', 'o', 'r'}
Parameters
----------
f : function ( (arg) -> :class:`.CollectionExpression`)
Function from the element type of the collection to the type of the
collection. For instance, `flatmap` on a ``set<str>`` should take
a ``str`` and return a ``set``.
Returns
-------
:class:`.CollectionExpression`
"""
expected_type, s = (tarray, 'array') if isinstance(self._type, tarray) else (tset, 'set')
value_type = f(construct_variable(Env.get_uid(), self.dtype.element_type)).dtype
if not isinstance(value_type, expected_type):
raise TypeError("'flatmap' expects 'f' to return an expression of type '{}', found '{}'".format(s, value_type))
def f2(x):
return hl.array(f(x)) if isinstance(value_type, tset) else f(x)
def transform_ir(array, name, body):
return ir.ToArray(ir.StreamFlatMap(ir.ToStream(array), name, ir.ToStream(body)))
array_flatmap = hl.array(self)._ir_lambda_method(transform_ir, f2, self.dtype.element_type, identity)
if isinstance(self.dtype, tset):
return hl.set(array_flatmap)
assert isinstance(self.dtype, tarray), self.dtype
return array_flatmap
@typecheck_method(f=func_spec(2, expr_any), zero=expr_any)
def fold(self, f, zero):
"""Reduces the collection with the given function `f`, provided the initial value `zero`.
Examples
--------
>>> a = [0, 1, 2]
>>> hl.eval(hl.fold(lambda i, j: i + j, 0, a))
3
Parameters
----------
f : function ( (:class:`.Expression`, :class:`.Expression`) -> :class:`.Expression`)
Function which takes the cumulative value and the next element, and
returns a new value.
zero : :class:`.Expression`
Initial value to pass in as left argument of `f`.
Returns
-------
:class:`.Expression`.
"""
collection = self
if isinstance(collection.dtype, tset):
collection = hl.array(collection)
indices, aggregations = unify_all(collection, zero)
accum_name = Env.get_uid()
elt_name = Env.get_uid()
accum_ref = construct_variable(accum_name, zero.dtype, indices, aggregations)
elt_ref = construct_variable(elt_name, collection.dtype.element_type, collection._indices, collection._aggregations)
body = to_expr(f(accum_ref, elt_ref))
if body.dtype != zero.dtype:
zero_coercer = coercer_from_dtype(zero.dtype)
if zero_coercer.can_coerce(body.dtype):
body = zero_coercer.coerce(body)
else:
body_coercer = coercer_from_dtype(body.dtype)
if body_coercer.can_coerce(zero.dtype):
zero_coerced = body_coercer.coerce(zero)
accum_ref = construct_variable(accum_name, zero_coerced.dtype, indices, aggregations)
new_body = to_expr(f(accum_ref, elt_ref))
if body_coercer.can_coerce(new_body.dtype):
body = body_coercer.coerce(new_body)
zero = zero_coerced
if body.dtype != zero.dtype:
raise ExpressionException("'CollectionExpression.fold' must take function returning "
"same expression type as zero value: \n"
" zero.dtype: {}\n"
" f.dtype: {}".format(
zero.dtype,
body.dtype))
x = ir.StreamFold(ir.ToStream(collection._ir), zero._ir, accum_name, elt_name, body._ir)
indices, aggregations = unify_all(self, zero, body)
return construct_expr(x, body.dtype, indices, aggregations)
@typecheck_method(f=func_spec(1, expr_bool))
def all(self, f):
"""Returns ``True`` if `f` returns ``True`` for every element.
Examples
--------
>>> hl.eval(a.all(lambda x: x < 10))
True
Notes
-----
This method returns ``True`` if the collection is empty.
Parameters
----------
f : function ( (arg) -> :class:`.BooleanExpression`)
Function to evaluate for each element of the collection. Must return a
:class:`.BooleanExpression`.
Returns
-------
:class:`.BooleanExpression`.
``True`` if `f` returns ``True`` for every element, ``False`` otherwise.
"""
return hl.array(self).fold(lambda accum, elt: accum & f(elt), True)
@typecheck_method(f=func_spec(1, expr_any))
def group_by(self, f):
"""Group elements into a dict according to a lambda function.
Examples
--------
>>> hl.eval(a.group_by(lambda x: x % 2 == 0)) # doctest: +SKIP_OUTPUT_CHECK
{False: [1, 3, 5], True: [2, 4]}
>>> hl.eval(s3.group_by(lambda x: x.length())) # doctest: +SKIP_OUTPUT_CHECK
{3: {'Bob'}, 5: {'Alice'}, 7: {'Charlie'}}
Parameters
----------
f : function ( (arg) -> :class:`.Expression`)
Function to evaluate for each element of the collection to produce a key for the
resulting dictionary.
Returns
-------
:class:`.DictExpression`.
Dictionary keyed by results of `f`.
"""
keyed = hl.array(self).map(lambda x: hl.tuple([f(x), x]))
types = keyed.dtype.element_type.types
return construct_expr(ir.GroupByKey(ir.ToStream(keyed._ir)), tdict(types[0], tarray(types[1])), keyed._indices, keyed._aggregations)
@typecheck_method(f=func_spec(1, expr_any))
def map(self, f):
"""Transform each element of a collection.
Examples
--------
>>> hl.eval(a.map(lambda x: x ** 3))
[1.0, 8.0, 27.0, 64.0, 125.0]
>>> hl.eval(s3.map(lambda x: x.length()))
frozenset({3, 5, 7})
Parameters
----------
f : function ( (arg) -> :class:`.Expression`)
Function to transform each element of the collection.
Returns
-------
:class:`.CollectionExpression`.
Collection where each element has been transformed according to `f`.
"""
def transform_ir(array, name, body):
a = ir.ToArray(ir.StreamMap(ir.ToStream(array), name, body))
if isinstance(self.dtype, tset):
a = ir.ToSet(ir.ToStream(a))
return a
array_map = hl.array(self)._ir_lambda_method(transform_ir, f, self._type.element_type, lambda t: self._type.__class__(t))
if isinstance(self._type, tset):
return hl.set(array_map)
assert isinstance(self._type, tarray)
return array_map
@typecheck_method(f=anyfunc)
def starmap(self, f):
r"""Transform each element of a collection of tuples.
Examples
--------
>>> hl.eval(hl.array([(1, 2), (2, 3)]).starmap(lambda x, y: x+y))
[3, 5]
Parameters
----------
f : function ( (\*args) -> :class:`.Expression`)
Function to transform each element of the collection.
Returns
-------
:class:`.CollectionExpression`.
Collection where each element has been transformed according to `f`.
"""
return self.map(lambda e: f(*e))
def length(self):
"""Returns the size of a collection.
Examples
--------
>>> hl.eval(a.length())
5
>>> hl.eval(s3.length())
3
Returns
-------
:class:`.Expression` of type :py:data:`.tint32`
The number of elements in the collection.
"""
return self.size()
def size(self):
"""Returns the size of a collection.
Examples
--------
>>> hl.eval(a.size())
5
>>> hl.eval(s3.size())
3
Returns
-------
:class:`.Expression` of type :py:data:`.tint32`
The number of elements in the collection.
"""
return apply_expr(lambda x: ir.ArrayLen(ir.CastToArray(x)), tint32, hl.array(self))
def _extra_summary_fields(self, agg_result):
return {
'Min Size': agg_result[0],
'Max Size': agg_result[1],
'Mean Size': agg_result[2],
}
def _nested_summary(self, agg_result, top):
elt = construct_variable(Env.get_uid(), self.dtype.element_type, indices=self._indices)
return {'[<elements>]': elt._summarize(agg_result[3])}
def _summary_aggs(self):
length = hl.len(self)
return hl.tuple((
hl.agg.min(length),
hl.agg.max(length),
hl.agg.mean(length),
hl.agg.explode(lambda elt: elt._all_summary_aggs(), self)))
def __contains__(self, element):
class_name = type(self).__name__
raise TypeError(f"Cannot use `in` operator on hail `{class_name}`s. Use the `contains` method instead."
"`names.contains('Charlie')` instead of `'Charlie' in names`")
class ArrayExpression(CollectionExpression):
"""Expression of type :class:`.tarray`.
>>> names = hl.literal(['Alice', 'Bob', 'Charlie'])
See Also
--------
:class:`.CollectionExpression`
"""
def __getitem__(self, item):
"""Index into or slice the array.
Examples
--------
Index with a single integer:
>>> hl.eval(names[1])
'Bob'
>>> hl.eval(names[-1])
'Charlie'
Slicing is also supported:
>>> hl.eval(names[1:])
['Bob', 'Charlie']
Parameters
----------
item : slice or :class:`.Expression` of type :py:data:`.tint32`
Index or slice.
Returns
-------
:class:`.Expression`
Element or array slice.
"""
if isinstance(item, slice):
return self._slice(item.start, item.stop, item.step)
item = to_expr(item)
if not item.dtype == tint32:
raise TypeError("array expects key to be type 'slice' or expression of type 'int32', "
"found expression of type '{}'".format(item._type))
else:
return self._method("indexArray", self.dtype.element_type, item)
@typecheck_method(start=nullable(expr_int32), stop=nullable(expr_int32), step=nullable(expr_int32))
def _slice(self, start=None, stop=None, step=None):
if step is None:
step = hl.int(1)
if start is None:
start = hl.if_else(step >= 0, 0, -1)
if stop is not None:
slice_ir = ir.ArraySlice(self._ir, start._ir, stop._ir, step._ir)
else:
slice_ir = ir.ArraySlice(self._ir, start._ir, stop, step._ir)
return construct_expr(slice_ir, self.dtype, self._indices, self._aggregations)
@typecheck_method(item=expr_any)
def contains(self, item):
"""Returns a boolean indicating whether `item` is found in the array.
Examples
--------
>>> hl.eval(names.contains('Charlie'))
True
>>> hl.eval(names.contains('Helen'))
False
Parameters
----------
item : :class:`.Expression`
Item for inclusion test.
Warning
-------
This method takes time proportional to the length of the array. If a
pipeline uses this method on the same array several times, it may be
more efficient to convert the array to a set first early in the script
(:func:`~hail.expr.functions.set`).
Returns
-------
:class:`.BooleanExpression`
``True`` if the element is found in the array, ``False`` otherwise.
"""
return self._method("contains", tbool, item)
@deprecated(version="0.2.58", reason="Replaced by first")
def head(self):
"""Deprecated in favor of :meth:`~.ArrayExpression.first`.
Returns the first element of the array, or missing if empty.
Returns
-------
:class:`.Expression`
Element.
Examples
--------
>>> hl.eval(names.head())
'Alice'
If the array has no elements, then the result is missing:
>>> hl.eval(names.filter(lambda x: x.startswith('D')).head())
None
"""
return self.first()
def first(self):
"""Returns the first element of the array, or missing if empty.
Returns
-------
:class:`.Expression`
Element.
Examples
--------
>>> hl.eval(names.first())
'Alice'
If the array has no elements, then the result is missing:
>>> hl.eval(names.filter(lambda x: x.startswith('D')).first())
None
"""
# FIXME: this should generate short-circuiting IR when that is possible
return hl.rbind(self, lambda x: hl.or_missing(hl.len(x) > 0, x[0]))
def last(self):
"""Returns the last element of the array, or missing if empty.
Returns
-------
:class:`.Expression`
Element.
Examples
--------
>>> hl.eval(names.last())
'Charlie'
If the array has no elements, then the result is missing:
>>> hl.eval(names.filter(lambda x: x.startswith('D')).last())
None
"""
return hl.rbind(self, hl.len(self), lambda x, n: hl.or_missing(n > 0, x[n - 1]))
@typecheck_method(x=oneof(func_spec(1, expr_any), expr_any))
def index(self, x):
"""Returns the first index of `x`, or missing.
Parameters
----------
x : :class:`.Expression` or :obj:`typing.Callable`
Value to find, or function from element to Boolean expression.
Returns
-------
:class:`.Int32Expression`
Examples
--------
>>> hl.eval(names.index('Bob'))
1
>>> hl.eval(names.index('Beth'))
None
>>> hl.eval(names.index(lambda x: x.endswith('e')))
0
>>> hl.eval(names.index(lambda x: x.endswith('h')))
None
"""
if callable(x):
def f(elt, x):
return x(elt)
else:
def f(elt, x):
return elt == x
return hl.bind(lambda a: hl.range(0, a.length()).filter(lambda i: f(a[i], x)).first(), self)
@typecheck_method(item=expr_any)
def append(self, item):
"""Append an element to the array and return the result.
Examples
--------
>>> hl.eval(names.append('Dan'))
['Alice', 'Bob', 'Charlie', 'Dan']
Note
----
This method does not mutate the caller, but instead returns a new
array by copying the caller and adding `item`.
Parameters
----------
item : :class:`.Expression`
Element to append, same type as the array element type.
Returns
-------
:class:`.ArrayExpression`
"""
if not item._type == self._type.element_type:
raise TypeError("'ArrayExpression.append' expects 'item' to be the same type as its elements\n"
" array element type: '{}'\n"
" type of arg 'item': '{}'".format(self._type._element_type, item._type))
return self._method("append", self._type, item)
@typecheck_method(a=expr_array())
def extend(self, a):
"""Concatenate two arrays and return the result.
Examples
--------
>>> hl.eval(names.extend(['Dan', 'Edith']))
['Alice', 'Bob', 'Charlie', 'Dan', 'Edith']
Parameters
----------
a : :class:`.ArrayExpression`
Array to concatenate, same type as the callee.
Returns
-------
:class:`.ArrayExpression`
"""
if not a._type == self._type:
raise TypeError("'ArrayExpression.extend' expects 'a' to be the same type as the caller\n"
" caller type: '{}'\n"
" type of 'a': '{}'".format(self._type, a._type))
return self._method("extend", self._type, a)
@typecheck_method(f=func_spec(2, expr_any), zero=expr_any)
def scan(self, f, zero):
"""Map each element of the array to cumulative value of function `f`, with initial value `zero`.
Examples
--------
>>> a = [0, 1, 2]
>>> hl.eval(hl.array_scan(lambda i, j: i + j, 0, a))
[0, 0, 1, 3]
Parameters
----------
f : function ( (:class:`.Expression`, :class:`.Expression`) -> :class:`.Expression`)
Function which takes the cumulative value and the next element, and
returns a new value.
zero : :class:`.Expression`
Initial value to pass in as left argument of `f`.
Returns
-------
:class:`.ArrayExpression`.
"""
indices, aggregations = unify_all(self, zero)
accum_name = Env.get_uid()
elt_name = Env.get_uid()
accum_ref = construct_variable(accum_name, zero.dtype, indices, aggregations)
elt_ref = construct_variable(elt_name, self.dtype.element_type, self._indices, self._aggregations)
body = to_expr(f(accum_ref, elt_ref))
if body.dtype != zero.dtype:
zero_coercer = coercer_from_dtype(zero.dtype)
if zero_coercer.can_coerce(body.dtype):
body = zero_coercer.coerce(body)
else:
body_coercer = coercer_from_dtype(body.dtype)
if body_coercer.can_coerce(zero.dtype):
zero_coerced = body_coercer.coerce(zero)
accum_ref = construct_variable(accum_name, zero_coerced.dtype, indices, aggregations)
new_body = to_expr(f(accum_ref, elt_ref))
if body_coercer.can_coerce(new_body.dtype):
body = body_coercer.coerce(new_body)
zero = zero_coerced
if body.dtype != zero.dtype:
raise ExpressionException("'ArrayExpression.scan' must take function returning "
"same expression type as zero value: \n"
" zero.dtype: {}\n"
" f.dtype: {}".format(
zero.dtype,
body.dtype))
x = ir.ToArray(ir.StreamScan(ir.ToStream(self._ir), zero._ir, accum_name, elt_name, body._ir))
indices, aggregations = unify_all(self, zero, body)
return construct_expr(x, tarray(body.dtype), indices, aggregations)
@typecheck_method(group_size=expr_int32)
def grouped(self, group_size):
"""Partition an array into fixed size subarrays.
Examples
--------
>>> a = hl.array([0, 1, 2, 3, 4])
>>> hl.eval(a.grouped(2))
[[0, 1], [2, 3], [4]]
Parameters
----------
group_size : :class:`.Int32Expression`
The number of elements per group.
Returns
-------
:class:`.ArrayExpression`.
"""
indices, aggregations = unify_all(self, group_size)
stream_ir = ir.StreamGrouped(ir.ToStream(self._ir), group_size._ir)
mapping_identifier = Env.get_uid("stream_grouped_map_to_arrays")
mapped_to_arrays = ir.StreamMap(stream_ir, mapping_identifier, ir.ToArray(ir.Ref(mapping_identifier)))
return construct_expr(ir.ToArray(mapped_to_arrays), tarray(self._type), indices, aggregations)
class ArrayStructExpression(ArrayExpression):
"""Expression of type :class:`.tarray` that eventually contains structs.
>>> people = hl.literal([hl.struct(name='Alice', age=57),
... hl.struct(name='Bob', age=12),
... hl.struct(name='Charlie', age=34)])
Nested collections that contain structs are also
:class:`.ArrayStructExpressions`s
>>> people = hl.literal([[hl.struct(name='Alice', age=57), hl.struct(name='Bob', age=12)],
... [hl.struct(name='Charlie', age=34)]])
See Also
--------
:class:`.ArrayExpression`, class:`.CollectionExpression`, :class:`.SetStructExpression`
"""
def __getattr__(self, item):
try:
return ArrayStructExpression.__getitem__(self, item)
except KeyError as e:
dt = self.dtype.element_type
while not isinstance(dt, tstruct):
dt = dt.element_type
self._fields = dt
raise AttributeError(get_nice_attr_error(self, item)) from e
def __getitem__(self, item):
"""If a string, get a field from each struct in this array. If an integer, get
the item at that index.
Examples
--------
>>> x = hl.array([hl.struct(a='foo', b=3), hl.struct(a='bar', b=4)])
>>> hl.eval(x.a)
['foo', 'bar']
>>> a = hl.array([hl.struct(b=[hl.struct(inner=1),
... hl.struct(inner=2)]),
... hl.struct(b=[hl.struct(inner=3)])])
>>> hl.eval(a.b)
[[Struct(inner=1), Struct(inner=2)], [Struct(inner=3)]]
>>> hl.eval(a.b.inner)
[[1, 2], [3]]
>>> hl.eval(hl.flatten(a.b).inner)
[1, 2, 3]
>>> hl.eval(hl.flatten(a.b.inner))
[1, 2, 3]
Parameters
----------
item : :class:`str`
Field name
Returns
-------
:class:`.ArrayExpression`
An array formed by getting the given field for each struct in
this array
See Also
--------
:meth:`.ArrayExpression.__getitem__`
"""
if isinstance(item, str):
return self.map(lambda x: x[item])
return super().__getitem__(item)
class ArrayNumericExpression(ArrayExpression):
"""Expression of type :class:`.tarray` with a numeric type.
Numeric arrays support arithmetic both with scalar values and other arrays.
Arithmetic between two numeric arrays requires that the length of each array
is identical, and will apply the operation positionally (``a1 * a2`` will
multiply the first element of ``a1`` by the first element of ``a2``, the
second element of ``a1`` by the second element of ``a2``, and so on).
Arithmetic with a scalar will apply the operation to each element of the
array.
>>> a1 = hl.literal([0, 1, 2, 3, 4, 5])
>>> a2 = hl.literal([1, -1, 1, -1, 1, -1])
"""
def __neg__(self):
"""Negate elements of the array.
Examples
--------
>>> hl.eval(-a1)
[0, -1, -2, -3, -4, -5]
Returns
-------
:class:`.ArrayNumericExpression`
Array expression of the same type.
"""
return self * -1
def __add__(self, other):
"""Positionally add an array or a scalar.
Examples
--------
>>> hl.eval(a1 + 5)
[5, 6, 7, 8, 9, 10]
>>> hl.eval(a1 + a2)
[1, 0, 3, 2, 5, 4]
Parameters
----------
other : :class:`.NumericExpression` or :class:`.ArrayNumericExpression`
Value or array to add.
Returns
-------
:class:`.ArrayNumericExpression`
Array of positional sums.
"""
return self._bin_op_numeric("+", other)
def __radd__(self, other):
return self._bin_op_numeric_reverse("+", other)
def __sub__(self, other):
"""Positionally subtract an array or a scalar.
Examples
--------
>>> hl.eval(a2 - 1)
[0, -2, 0, -2, 0, -2]
>>> hl.eval(a1 - a2)
[-1, 2, 1, 4, 3, 6]
Parameters
----------
other : :class:`.NumericExpression` or :class:`.ArrayNumericExpression`
Value or array to subtract.
Returns
-------
:class:`.ArrayNumericExpression`
Array of positional differences.
"""
return self._bin_op_numeric("-", other)
def __rsub__(self, other):
return self._bin_op_numeric_reverse("-", other)
def __mul__(self, other):
"""Positionally multiply by an array or a scalar.
Examples
--------
>>> hl.eval(a2 * 5)
[5, -5, 5, -5, 5, -5]
>>> hl.eval(a1 * a2)
[0, -1, 2, -3, 4, -5]
Parameters
----------
other : :class:`.NumericExpression` or :class:`.ArrayNumericExpression`
Value or array to multiply by.
Returns
-------
:class:`.ArrayNumericExpression`
Array of positional products.
"""
return self._bin_op_numeric("*", other)
def __rmul__(self, other):
return self._bin_op_numeric_reverse("*", other)
def __truediv__(self, other):
"""Positionally divide by an array or a scalar.
Examples
--------
>>> hl.eval(a1 / 10) # doctest: +SKIP_OUTPUT_CHECK
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
>>> hl.eval(a2 / a1) # doctest: +SKIP_OUTPUT_CHECK
[inf, -1.0, 0.5, -0.3333333333333333, 0.25, -0.2]
Parameters
----------
other : :class:`.NumericExpression` or :class:`.ArrayNumericExpression`
Value or array to divide by.
Returns
-------
:class:`.ArrayNumericExpression`
Array of positional quotients.
"""
return self._bin_op_numeric("/", other, self._div_ret_type_f)
def __rtruediv__(self, other):
return self._bin_op_numeric_reverse("/", other, self._div_ret_type_f)
def __floordiv__(self, other):
"""Positionally divide by an array or a scalar using floor division.
Examples
--------
>>> hl.eval(a1 // 2)
[0, 0, 1, 1, 2, 2]
Parameters
----------
other : :class:`.NumericExpression` or :class:`.ArrayNumericExpression`
Returns
-------
:class:`.ArrayNumericExpression`
"""
return self._bin_op_numeric('//', other)
def __rfloordiv__(self, other):
return self._bin_op_numeric_reverse('//', other)
def __mod__(self, other):
"""Positionally compute the left modulo the right.
Examples
--------
>>> hl.eval(a1 % 2)
[0, 1, 0, 1, 0, 1]
Parameters
----------
other : :class:`.NumericExpression` or :class:`.ArrayNumericExpression`
Returns
-------
:class:`.ArrayNumericExpression`
"""
return self._bin_op_numeric('%', other)
def __rmod__(self, other):
return self._bin_op_numeric_reverse('%', other)
def __pow__(self, other):
"""Positionally raise to the power of an array or a scalar.
Examples
--------
>>> hl.eval(a1 ** 2)
[0.0, 1.0, 4.0, 9.0, 16.0, 25.0]
>>> hl.eval(a1 ** a2)
[0.0, 1.0, 2.0, 0.3333333333333333, 4.0, 0.2]
Parameters
----------
other : :class:`.NumericExpression` or :class:`.ArrayNumericExpression`
Returns
-------
:class:`.ArrayNumericExpression`
"""
return self._bin_op_numeric('**', other, lambda _: tfloat64)
def __rpow__(self, other):
return self._bin_op_numeric_reverse('**', other, lambda _: tfloat64)
class SetExpression(CollectionExpression):
"""Expression of type :class:`.tset`.
>>> s1 = hl.literal({1, 2, 3})
>>> s2 = hl.literal({1, 3, 5})
See Also
--------
:class:`.CollectionExpression`
"""
@typecheck_method(x=ir.IR, type=HailType, indices=Indices, aggregations=LinkedList)
def __init__(self, x, type, indices=Indices(), aggregations=LinkedList(Aggregation)):
super(SetExpression, self).__init__(x, type, indices, aggregations)
assert isinstance(type, tset)
self._ec = coercer_from_dtype(type.element_type)
@typecheck_method(item=expr_any)
def add(self, item):
"""Returns a new set including `item`.
Examples
--------
>>> hl.eval(s1.add(10)) # doctest: +SKIP_OUTPUT_CHECK
{1, 2, 3, 10}
Parameters
----------
item : :class:`.Expression`
Value to add.
Returns
-------
:class:`.SetExpression`
Set with `item` added.
"""
if not self._ec.can_coerce(item.dtype):
raise TypeError("'SetExpression.add' expects 'item' to be the same type as its elements\n"
" set element type: '{}'\n"
" type of arg 'item': '{}'".format(self.dtype.element_type, item.dtype))
return self._method("add", self.dtype, self._ec.coerce(item))
@typecheck_method(item=expr_any)
def remove(self, item):
"""Returns a new set excluding `item`.
Examples
--------
>>> hl.eval(s1.remove(1))
frozenset({2, 3})
Parameters
----------
item : :class:`.Expression`
Value to remove.
Returns
-------
:class:`.SetExpression`
Set with `item` removed.
"""
if not self._ec.can_coerce(item.dtype):
raise TypeError("'SetExpression.remove' expects 'item' to be the same type as its elements\n"
" set element type: '{}'\n"
" type of arg 'item': '{}'".format(self.dtype.element_type, item.dtype))
return self._method("remove", self._type, self._ec.coerce(item))
@typecheck_method(item=expr_any)
def contains(self, item):
"""Returns ``True`` if `item` is in the set.
Examples
--------
>>> hl.eval(s1.contains(1))
True
>>> hl.eval(s1.contains(10))
False
Parameters
----------
item : :class:`.Expression`
Value for inclusion test.
Returns
-------
:class:`.BooleanExpression`
``True`` if `item` is in the set.
"""
if not self._ec.can_coerce(item.dtype):
raise TypeError("'SetExpression.contains' expects 'item' to be the same type as its elements\n"
" set element type: '{}'\n"
" type of arg 'item': '{}'".format(self.dtype.element_type, item.dtype))
return self._method("contains", tbool, self._ec.coerce(item))
@typecheck_method(s=expr_set())
def difference(self, s):
"""Return the set of elements in the set that are not present in set `s`.
Examples
--------
>>> hl.eval(s1.difference(s2))
frozenset({2})
>>> hl.eval(s2.difference(s1))
frozenset({5})
Parameters
----------
s : :class:`.SetExpression`
Set expression of the same type.
Returns
-------
:class:`.SetExpression`
Set of elements not in `s`.
"""
if not s._type.element_type == self._type.element_type:
raise TypeError("'SetExpression.difference' expects 's' to be the same type\n"
" set type: '{}'\n"
" type of 's': '{}'".format(self._type, s._type))
return self._method("difference", self._type, s)
@typecheck_method(s=expr_set())
def intersection(self, s):
"""Return the intersection of the set and set `s`.
Examples
--------
>>> hl.eval(s1.intersection(s2))
frozenset({1, 3})
Parameters
----------
s : :class:`.SetExpression`
Set expression of the same type.
Returns
-------
:class:`.SetExpression`
Set of elements present in `s`.
"""
if not s._type.element_type == self._type.element_type:
raise TypeError("'SetExpression.intersection' expects 's' to be the same type\n"
" set type: '{}'\n"
" type of 's': '{}'".format(self._type, s._type))
return self._method("intersection", self._type, s)
@typecheck_method(s=expr_set())
def is_subset(self, s):
"""Returns ``True`` if every element is contained in set `s`.
Examples
--------
>>> hl.eval(s1.is_subset(s2))
False
>>> hl.eval(s1.remove(2).is_subset(s2))
True
Parameters
----------
s : :class:`.SetExpression`
Set expression of the same type.
Returns
-------
:class:`.BooleanExpression`
``True`` if every element is contained in set `s`.
"""
if not s._type.element_type == self._type.element_type:
raise TypeError("'SetExpression.is_subset' expects 's' to be the same type\n"
" set type: '{}'\n"
" type of 's': '{}'".format(self._type, s._type))
return self._method("isSubset", tbool, s)
@typecheck_method(s=expr_set())
def union(self, s):
"""Return the union of the set and set `s`.
Examples
--------
>>> hl.eval(s1.union(s2))
frozenset({1, 2, 3, 5})
Parameters
----------
s : :class:`.SetExpression`
Set expression of the same type.
Returns
-------
:class:`.SetExpression`
Set of elements present in either set.
"""
if not s._type.element_type == self._type.element_type:
raise TypeError("'SetExpression.union' expects 's' to be the same type\n"
" set type: '{}'\n"
" type of 's': '{}'".format(self._type, s._type))
return self._method("union", self._type, s)
def __le__(self, other):
"""Test whether every element in the set is in `other`.
Parameters
----------
other : :class:`.SetExpression`
Set expression of the same type.
Returns
-------
:class:`.BooleanExpression`
``True`` if every element in the set is in `other`. ``False`` otherwise.
"""
other = to_expr(other)
if isinstance(other.dtype, hl.tset):
return self.is_subset(other)
return NotImplemented
def __lt__(self, other):
"""Test whether the set is a proper subset of `other` (``set <= other and set != other``).
Parameters
----------
other : :class:`.SetExpression`
Set expression of the same type.
Returns
-------
:class:`.BooleanExpression`
``True`` if the set is a proper subset of `other`. ``False`` otherwise.
"""
other = to_expr(other)
if isinstance(other.dtype, hl.tset):
return self.is_subset(other) & (self != other)
return NotImplemented
def __ge__(self, other):
"""Test whether every element in `other` is in the set.
Parameters
----------
other : :class:`.SetExpression`
Set expression of the same type.
Returns
-------
:class:`.BooleanExpression`
``True`` if every element in `other` is in the set. ``False`` otherwise.
"""
other = to_expr(other)
if isinstance(other.dtype, hl.tset):
return other.is_subset(self)
return NotImplemented
def __gt__(self, other):
"""Test whether `other` is a proper subset of the set (``other <= set and other != set``).
Parameters
----------
other : :class:`.SetExpression`
Set expression of the same type.
Returns
-------
:class:`.BooleanExpression`
``True`` if `other` is a proper subset of the set. ``False`` otherwise.
"""
other = to_expr(other)
if isinstance(other.dtype, hl.tset):
return other.is_subset(self) & (self != other)
return NotImplemented
def __sub__(self, other):
"""Return the difference of the set and `other`.
Examples
--------
>>> hl.eval(s1 - s2)
frozenset({2})
>>> hl.eval(s2 - s1)
frozenset({5})
Parameters
----------
other : :class:`.SetExpression`
Set expression of the same type.
Returns
-------
:class:`.SetExpression`
Set of elements in the set that are not in `other`.
"""
other = to_expr(other)
if isinstance(other.dtype, hl.tset):
return self.difference(other)
return NotImplemented
def __rsub__(self, other):
other = to_expr(other)
if isinstance(other.dtype, hl.tset):
return other.difference(self)
return NotImplemented
def __and__(self, other):
"""Return the intersection of the set and `other`.
Examples
--------
>>> hl.eval(s1 & s2)
frozenset({1, 3})
Parameters
----------
other : :class:`.SetExpression`
Set expression of the same type.
Returns
-------
:class:`.SetExpression`
Set of elements present in both the set and `other`.
"""
other = to_expr(other)
if isinstance(other.dtype, hl.tset):
return self.intersection(other)
return NotImplemented
def __rand__(self, other):
return self.__and__(other)
def __or__(self, other):
"""Return the union of the set and `other`.
Examples
--------
>>> hl.eval(s1 | s2)
frozenset({1, 2, 3, 5})
Parameters
----------
other : :class:`.SetExpression`
Set expression of the same type.
Returns
-------
:class:`.SetExpression`
Set of elements present in either set.
"""
other = to_expr(other)
if isinstance(other.dtype, hl.tset):
return self.union(other)
return NotImplemented
def __ror__(self, other):
return self.__or__(other)
def __xor__(self, other):
"""Return the symmetric difference of the set and `other`.
Examples
--------
>>> hl.eval(s1 ^ s2)
frozenset({2, 5})
Parameters
----------
other : :class:`.SetExpression`
Set expression of the same type.
Returns
-------
:class:`.SetExpression`
Set of elements present in either the set or `other` but not both.
"""
other = to_expr(other)
if isinstance(other.dtype, hl.tset):
return self.union(other).difference(self.intersection(other))
return NotImplemented
def __rxor__(self, other):
return self.__xor__(other)
class SetStructExpression(SetExpression):
"""Expression of type :class:`.tset` that eventually contains structs.
>>> people = hl.literal({hl.struct(name='Alice', age=57),
... hl.struct(name='Bob', age=12),
... hl.struct(name='Charlie', age=34)})
Nested collections that contain structs are also
:class:`.SetStructExpressions`s
>>> people = hl.set([hl.set([hl.struct(name='Alice', age=57), hl.struct(name='Bob', age=12)]),
... hl.set([hl.struct(name='Charlie', age=34)])])
See Also
--------
:class:`.SetExpression`, class:`.CollectionExpression`, :class:`.SetStructExpression`
"""
def __getattr__(self, item):
try:
return SetStructExpression.__getitem__(self, item)
except KeyError as e:
dt = self.dtype.element_type
while not isinstance(dt, tstruct):
dt = dt.element_type
self._fields = dt
raise AttributeError(get_nice_attr_error(self, item)) from e
@typecheck_method(item=oneof(str))
def __getitem__(self, item):
"""Get a field from each struct in this set.
Examples
--------
>>> x = hl.set({hl.struct(a='foo', b=3), hl.struct(a='bar', b=4)})
>>> hl.eval(x.a) == {'foo', 'bar'}
True
>>> a = hl.set({hl.struct(b={hl.struct(inner=1),
... hl.struct(inner=2)}),
... hl.struct(b={hl.struct(inner=3)})})
>>> hl.eval(hl.flatten(a.b).inner) == {1, 2, 3}
True
>>> hl.eval(hl.flatten(a.b.inner)) == {1, 2, 3}
True
Parameters
----------
item : :class:`str`
Field name
Returns
-------
:class:`.SetExpression`
A set formed by getting the given field for each struct in
this set
"""
return self.map(lambda x: x[item])
class DictExpression(Expression):
"""Expression of type :class:`.tdict`.
>>> d = hl.literal({'Alice': 43, 'Bob': 33, 'Charles': 44})
"""
@typecheck_method(x=ir.IR, type=HailType, indices=Indices, aggregations=LinkedList)
def __init__(self, x, type, indices=Indices(), aggregations=LinkedList(Aggregation)):
super(DictExpression, self).__init__(x, type, indices, aggregations)
assert isinstance(type, tdict)
self._kc = coercer_from_dtype(type.key_type)
self._vc = coercer_from_dtype(type.value_type)
@typecheck_method(item=expr_any)
def __getitem__(self, item):
"""Get the value associated with key `item`.
Examples
--------
>>> hl.eval(d['Alice'])
43
Notes
-----
Raises an error if `item` is not a key of the dictionary. Use
:meth:`.DictExpression.get` to return missing instead of an error.
Parameters
----------
item : :class:`.Expression`
Key expression.
Returns
-------
:class:`.Expression`
Value associated with key `item`.
"""
if not self._kc.can_coerce(item.dtype):
raise TypeError("dict encountered an invalid key type\n"
" dict key type: '{}'\n"
" type of 'item': '{}'".format(self.dtype.key_type, item.dtype))
return self._index(self.dtype.value_type, self._kc.coerce(item))
@typecheck_method(item=expr_any)
def contains(self, item):
"""Returns whether a given key is present in the dictionary.
Examples
--------
>>> hl.eval(d.contains('Alice'))
True
>>> hl.eval(d.contains('Anne'))
False
Parameters
----------
item : :class:`.Expression`
Key to test for inclusion.
Returns
-------
:class:`.BooleanExpression`
``True`` if `item` is a key of the dictionary, ``False`` otherwise.
"""
if not self._kc.can_coerce(item.dtype):
raise TypeError("'DictExpression.contains' encountered an invalid key type\n"
" dict key type: '{}'\n"
" type of 'item': '{}'".format(self._type.key_type, item.dtype))
return self._method("contains", tbool, self._kc.coerce(item))
@typecheck_method(item=expr_any, default=nullable(expr_any))
def get(self, item, default=None):
"""Returns the value associated with key `k` or a default value if that key is not present.
Examples
--------
>>> hl.eval(d.get('Alice'))
43
>>> hl.eval(d.get('Anne'))
None
>>> hl.eval(d.get('Anne', 0))
0
Parameters
----------
item : :class:`.Expression`
Key.
default : :class:`.Expression`
Default value. Must be same type as dictionary values.
Returns
-------
:class:`.Expression`
The value associated with `item`, or `default`.
"""
if not self._kc.can_coerce(item.dtype):
raise TypeError("'DictExpression.get' encountered an invalid key type\n"
" dict key type: '{}'\n"
" type of 'item': '{}'".format(self.dtype.key_type, item.dtype))
key = self._kc.coerce(item)
if default is not None:
if not self._vc.can_coerce(default.dtype):
raise TypeError("'get' expects parameter 'default' to have the same type "
"as the dictionary value type, expected '{}' and found '{}'"
.format(self.dtype.value_type, default.dtype))
return self._method("get", self.dtype.value_type, key, self._vc.coerce(default))
else:
return self._method("get", self.dtype.value_type, key)
def key_set(self):
"""Returns the set of keys in the dictionary.
Examples
--------
>>> hl.eval(d.key_set()) # doctest: +SKIP_OUTPUT_CHECK
{'Alice', 'Bob', 'Charles'}
Returns
-------
:class:`.SetExpression`
Set of all keys.
"""
return self._method("keySet", tset(self.dtype.key_type))
def keys(self):
"""Returns an array with all keys in the dictionary.
Examples
--------
>>> hl.eval(d.keys()) # doctest: +SKIP_OUTPUT_CHECK
['Bob', 'Charles', 'Alice']
Returns
-------
:class:`.ArrayExpression`
Array of all keys.
"""
return self._method("keys", tarray(self.dtype.key_type))
@typecheck_method(f=func_spec(1, expr_any))
def map_values(self, f):
"""Transform values of the dictionary according to a function.
Examples
--------
>>> hl.eval(d.map_values(lambda x: x * 10)) # doctest: +SKIP_OUTPUT_CHECK
{'Alice': 430, 'Bob': 330, 'Charles': 440}
Parameters
----------
f : function ( (arg) -> :class:`.Expression`)
Function to apply to each value.
Returns
-------
:class:`.DictExpression`
Dictionary with transformed values.
"""
return hl.dict(hl.array(self).map(lambda elt: hl.tuple([elt[0], f(elt[1])])))
def size(self):
"""Returns the size of the dictionary.
Examples
--------
>>> hl.eval(d.size())
3
Returns
-------
:class:`.Expression` of type :py:data:`.tint32`
Size of the dictionary.
"""
return apply_expr(lambda x: ir.ArrayLen(ir.CastToArray(x)), tint32, self)
def values(self):
"""Returns an array with all values in the dictionary.
Examples
--------
>>> hl.eval(d.values()) # doctest: +SKIP_OUTPUT_CHECK
[33, 44, 43]
Returns
-------
:class:`.ArrayExpression`
All values in the dictionary.
"""
return self._method("values", tarray(self.dtype.value_type))
def items(self):
"""Returns an array of tuples containing key/value pairs in the dictionary.
Examples
--------
>>> hl.eval(d.items()) # doctest: +SKIP_OUTPUT_CHECK
[('Alice', 430), ('Bob', 330), ('Charles', 440)]
Returns
-------
:class:`.ArrayExpression`
All key/value pairs in the dictionary.
"""
return hl.array(self)
def _extra_summary_fields(self, agg_result):
return {
'Min Size': agg_result[0],
'Max Size': agg_result[1],
'Mean Size': agg_result[2],
}
def _nested_summary(self, agg_result, top):
k = construct_variable(Env.get_uid(), self.dtype.key_type, indices=self._indices)
v = construct_variable(Env.get_uid(), self.dtype.value_type, indices=self._indices)
return {
'[<keys>]': k._summarize(agg_result[3][0]),
'[<values>]': v._summarize(agg_result[3][1]),
}
def _summary_aggs(self):
length = hl.len(self)
return hl.tuple((
hl.agg.min(length),
hl.agg.max(length),
hl.agg.mean(length),
hl.agg.explode(lambda elt: hl.tuple((elt[0]._all_summary_aggs(), elt[1]._all_summary_aggs())), hl.array(self))))
class StructExpression(Mapping[str, Expression], Expression):
"""Expression of type :class:`.tstruct`.
>>> struct = hl.struct(a=5, b='Foo')
Struct fields are accessible as attributes and keys. It is therefore
possible to access field `a` of struct `s` with dot syntax:
>>> hl.eval(struct.a)
5
However, it is recommended to use square brackets to select fields:
>>> hl.eval(struct['a'])
5
The latter syntax is safer, because fields that share their name with
an existing attribute of :class:`.StructExpression` (`keys`, `values`,
`annotate`, `drop`, etc.) will only be accessible using the
:meth:`.StructExpression.__getitem__` syntax. This is also the only way
to access fields that are not valid Python identifiers, like fields with
spaces or symbols.
"""
@classmethod
def _from_fields(cls, fields: 'Dict[str, Expression]'):
t = tstruct(**{k: v.dtype for k, v in fields.items()})
x = ir.MakeStruct([(n, expr._ir) for (n, expr) in fields.items()])
indices, aggregations = unify_all(*fields.values())
s = StructExpression.__new__(cls)
s._fields = {}
for k, v in fields.items():
s._set_field(k, v)
super(StructExpression, s).__init__(x, t, indices, aggregations)
return s
@typecheck_method(x=ir.IR, type=HailType, indices=Indices, aggregations=LinkedList)
def __init__(self, x, type, indices=Indices(), aggregations=LinkedList(Aggregation)):
super(StructExpression, self).__init__(x, type, indices, aggregations)
self._fields: Dict[str, Expression] = {}
for i, (f, t) in enumerate(self.dtype.items()):
if isinstance(self._ir, ir.MakeStruct):
expr = construct_expr(self._ir.fields[i][1], t, self._indices,
self._aggregations)
elif isinstance(self._ir, ir.SelectFields):
expr = construct_expr(ir.GetField(self._ir.old, f), t, self._indices,
self._aggregations)
else:
expr = construct_expr(ir.GetField(self._ir, f), t, self._indices,
self._aggregations)
self._set_field(f, expr)
def _set_field(self, key, value):
self._fields[key] = value
if key not in self.__dict__:
self.__dict__[key] = value
def _get_field(self, item):
if item in self._fields:
return self._fields[item]
else:
raise KeyError(get_nice_field_error(self, item))
def __getattr__(self, item):
if item in self.__dict__:
return self.__dict__[item]
else:
raise AttributeError(get_nice_attr_error(self, item))
def __len__(self):
return len(self._fields)
def __bool__(self):
return bool(len(self))
@typecheck_method(item=oneof(str, int, slice))
def __getitem__(self, item):
"""Access a field of the struct by name or index.
Examples
--------
>>> hl.eval(struct['a'])
5
>>> hl.eval(struct[1])
'Foo'
Parameters
----------
item : :class:`str`
Field name.
Returns
-------
:class:`.Expression`
Struct field.
"""
if isinstance(item, str):
return self._get_field(item)
if isinstance(item, int):
return self._get_field(self.dtype.fields[item])
else:
assert item.start is None or isinstance(item.start, int)
assert item.stop is None or isinstance(item.stop, int)
assert item.step is None or isinstance(item.step, int)
return self.select(
*self.dtype.fields[item.start:item.stop:item.step])
def __iter__(self):
return iter(self._fields)
def __contains__(self, item):
return item in self._fields
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
"""Check each field for equality.
Parameters
----------
other : :class:`.Expression`
An expression of the same type.
"""
return Expression.__eq__(self, other)
def __ne__(self, other):
return Expression.__ne__(self, other)
def __nonzero__(self):
return Expression.__nonzero__(self)
def _annotate_ordered(self, insertions_dict, field_order):
def get_type(field):
e = insertions_dict.get(field)
if e is None:
e = self._fields[field]
return e.dtype
new_type = hl.tstruct(**{f: get_type(f) for f in field_order})
indices, aggregations = unify_all(self, *insertions_dict.values())
return construct_expr(ir.InsertFields.construct_with_deduplication(
self._ir, [(field, expr._ir) for field, expr in insertions_dict.items()], field_order),
new_type,
indices,
aggregations)
@typecheck_method(named_exprs=expr_any)
def annotate(self, **named_exprs):
"""Add new fields or recompute existing fields.
Examples
--------
>>> hl.eval(struct.annotate(a=10, c=2*2*2))
Struct(a=10, b='Foo', c=8)
Notes
-----
If an expression in `named_exprs` shares a name with a field of the
struct, then that field will be replaced but keep its position in
the struct. New fields will be appended to the end of the struct.
Parameters
----------
named_exprs : keyword args of :class:`.Expression`
Fields to add.
Returns
-------
:class:`.StructExpression`
Struct with new or updated fields.
"""
new_types = {n: t for (n, t) in self.dtype.items()}
for f, e in named_exprs.items():
new_types[f] = e.dtype
result_type = tstruct(**new_types)
indices, aggregations = unify_all(self, *[x for (f, x) in named_exprs.items()])
return construct_expr(ir.InsertFields.construct_with_deduplication(
self._ir, list(map(lambda x: (x[0], x[1]._ir), named_exprs.items())), None),
result_type, indices, aggregations)
@typecheck_method(fields=str, named_exprs=expr_any)
def select(self, *fields, **named_exprs):
"""Select existing fields and compute new ones.
Examples
--------
>>> hl.eval(struct.select('a', c=['bar', 'baz']))
Struct(a=5, c=['bar', 'baz'])
Notes
-----
The `fields` argument is a list of field names to keep. These fields
will appear in the resulting struct in the order they appear in
`fields`.
The `named_exprs` arguments are new field expressions.
Parameters
----------
fields : varargs of :class:`str`
Field names to keep.
named_exprs : keyword args of :class:`.Expression`
New field expressions.
Returns
-------
:class:`.StructExpression`
Struct containing specified existing fields and computed fields.
"""
name_set = set()
for a in fields:
if a not in self._fields:
raise KeyError("Struct has no field '{}'\n"
" Fields: [ {} ]".format(a, ', '.join("'{}'".format(x) for x in self._fields)))
if a in name_set:
raise ExpressionException("'StructExpression.select' does not support duplicate identifiers.\n"
" Identifier '{}' appeared more than once".format(a))
name_set.add(a)
for (n, _) in named_exprs.items():
if n in name_set:
raise ExpressionException("Cannot select and assign '{}' in the same 'select' call".format(n))
selected_type = tstruct(**{f: self.dtype[f] for f in fields})
selected_expr = construct_expr(ir.SelectFields(self._ir, fields), selected_type, self._indices, self._aggregations)
if len(named_exprs) == 0:
return selected_expr
else:
return selected_expr.annotate(**named_exprs)
@typecheck_method(mapping=dictof(str, str))
def rename(self, mapping):
"""Rename fields of the struct.
Examples
--------
>>> s = hl.struct(x='hello', y='goodbye', a='dogs')
>>> s.rename({'x' : 'y', 'y' : 'z'}).show()
+----------+----------+-----------+
| <expr>.a | <expr>.y | <expr>.z |
+----------+----------+-----------+
| str | str | str |
+----------+----------+-----------+
| "dogs" | "hello" | "goodbye" |
+----------+----------+-----------+
Parameters
----------
mapping : :obj:`dict` of :class:`str`, :obj:`str`
Mapping from old field names to new field names.
Notes
-----
Any field that does not appear as a key in `mapping` will not be
renamed.
Returns
-------
:class:`.StructExpression`
Struct with renamed fields.
"""
old_fields = set(self._fields)
new_to_old = dict()
for old, new in mapping.items():
if old not in old_fields:
raise ValueError(f'{old} is not a field of this struct: {self.dtype}.')
if new in old_fields and new not in mapping:
raise ValueError(f'{old} is renamed to {new} but {new} is already in the '
f'struct: {self.dtype}.')
if new in new_to_old:
raise ValueError(f'{new} is the new name of both {old} and {new_to_old[new]}.')
new_to_old[new] = old
return self.select(
*list(set(self._fields) - set(mapping)),
**{new: self._get_field(old) for old, new in mapping.items()}
)
@typecheck_method(fields=str)
def drop(self, *fields):
"""Drop fields from the struct.
Examples
--------
>>> hl.eval(struct.drop('b'))
Struct(a=5)
Parameters
----------
fields: varargs of :class:`str`
Fields to drop.
Returns
-------
:class:`.StructExpression`
Struct without certain fields.
"""
to_drop = set()
for a in fields:
if a not in self._fields:
raise KeyError("Struct has no field '{}'\n"
" Fields: [ {} ]".format(a, ', '.join("'{}'".format(x) for x in self._fields)))
if a in to_drop:
warning("Found duplicate field name in 'StructExpression.drop': '{}'".format(a))
to_drop.add(a)
to_keep = [f for f in self.dtype.keys() if f not in to_drop]
return self.select(*to_keep)
def flatten(self):
"""Recursively eliminate struct fields by adding their fields to this struct."""
def _flatten(prefix, s):
if isinstance(s, StructExpression):
return [(k, v) for (f, e) in s.items() for (k, v) in _flatten(prefix + '.' + f, e)]
else:
return [(prefix, s)]
return self.select(**{k: v for (f, e) in self.items() for (k, v) in _flatten(f, e)})
def _nested_summary(self, agg_result, top):
sep = '' if top else '.'
return {f'{sep}{k}': f._summarize(agg_result[k]) for k, f in self.items()}
def _summary_aggs(self):
return hl.struct(**{k: f._all_summary_aggs() for k, f in self.items()})
def get(self, k, default=None):
"""See :meth:`StructExpression.__getitem__`"""
return super().get(k, default)
def items(self):
"""A list of pairs of field name and expression for said field."""
return super().items()
def keys(self):
"""The list of field names."""
return super().keys()
def values(self):
"""A list of expressions for each field."""
return super().values()
class TupleExpression(Expression, Sequence):
"""Expression of type :class:`.ttuple`.
>>> tup = hl.literal(("a", 1, [1, 2, 3]))
"""
@typecheck_method(item=oneof(int, slice))
def __getitem__(self, item):
"""Index into the tuple.
Examples
--------
>>> hl.eval(tup[1])
1
Parameters
----------
item : :obj:`int`
Element index.
Returns
-------
:class:`.Expression`
"""
if isinstance(item, slice):
assert item.start is None or isinstance(item.start, int)
assert item.stop is None or isinstance(item.stop, int)
assert item.step is None or isinstance(item.step, int)
return hl.or_missing(hl.is_defined(self),
hl.tuple([
self[i]
for i in range(len(self))[item.start:item.stop:item.step]]))
if not 0 <= item < len(self):
raise IndexError("Out of bounds index, {}. Tuple length is {}.".format(
item, len(self)))
return construct_expr(ir.GetTupleElement(self._ir, item), self.dtype.types[item], self._indices)
def __len__(self):
"""Returns the length of the tuple.
Examples
--------
>>> len(tup)
3
Returns
-------
:obj:`int`
"""
return len(self.dtype.types)
def __bool__(self):
return bool(len(self))
def __iter__(self):
for i in range(len(self)):
yield self[i]
def _nested_summary(self, agg_result, top):
return {f'[{i}]': self[i]._summarize(agg_result[i]) for i in range(len(self))}
def _summary_aggs(self):
return hl.tuple([self[i]._all_summary_aggs() for i in range(len(self))])
def count(self, value):
"""Do not use this method.
This only exists for compatibility with the Python Sequence abstract
base class.
"""
return super().count()
def index(self, value, start=0, stop=None):
"""Do not use this method.
This only exists for compatibility with the Python Sequence abstract
base class.
"""
return super().index()
class NumericExpression(Expression):
"""Expression of numeric type.
>>> x = hl.literal(3)
>>> y = hl.literal(4.5)
"""
@typecheck_method(other=expr_numeric)
def __lt__(self, other):
"""Less-than comparison.
Examples
--------
>>> hl.eval(x < 5)
True
Parameters
----------
other : :class:`.NumericExpression`
Right side for comparison.
Returns
-------
:class:`.BooleanExpression`
``True`` if the left side is smaller than the right side.
"""
return self._bin_op_numeric("<", other, lambda _: tbool)
@typecheck_method(other=expr_numeric)
def __le__(self, other):
"""Less-than-or-equals comparison.
Examples
--------
>>> hl.eval(x <= 3)
True
Parameters
----------
other : :class:`.NumericExpression`
Right side for comparison.
Returns
-------
:class:`.BooleanExpression`
``True`` if the left side is smaller than or equal to the right side.
"""
return self._bin_op_numeric("<=", other, lambda _: tbool)
@typecheck_method(other=expr_numeric)
def __gt__(self, other):
"""Greater-than comparison.
Examples
--------
>>> hl.eval(y > 4)
True
Parameters
----------
other : :class:`.NumericExpression`
Right side for comparison.
Returns
-------
:class:`.BooleanExpression`
``True`` if the left side is greater than the right side.
"""
return self._bin_op_numeric(">", other, lambda _: tbool)
@typecheck_method(other=expr_numeric)
def __ge__(self, other):
"""Greater-than-or-equals comparison.
Examples
--------
>>> hl.eval(y >= 4)
True
Parameters
----------
other : :class:`.NumericExpression`
Right side for comparison.
Returns
-------
:class:`.BooleanExpression`
``True`` if the left side is greater than or equal to the right side.
"""
return self._bin_op_numeric(">=", other, lambda _: tbool)
def __pos__(self):
return self
def __neg__(self):
"""Negate the number (multiply by -1).
Examples
--------
>>> hl.eval(-x)
-3
Returns
-------
:class:`.NumericExpression`
Negated number.
"""
return expr_numeric.coerce(self)._unary_op("-")
def __add__(self, other):
"""Add two numbers.
Examples
--------
>>> hl.eval(x + 2)
5
>>> hl.eval(x + y)
7.5
Parameters
----------
other : :class:`.NumericExpression`
Number to add.
Returns
-------
:class:`.NumericExpression`
Sum of the two numbers.
"""
return self._bin_op_numeric("+", other)
def __radd__(self, other):
return self._bin_op_numeric_reverse("+", other)
def __sub__(self, other):
"""Subtract the right number from the left.
Examples
--------
>>> hl.eval(x - 2)
1
>>> hl.eval(x - y)
-1.5
Parameters
----------
other : :class:`.NumericExpression`
Number to subtract.
Returns
-------
:class:`.NumericExpression`
Difference of the two numbers.
"""
return self._bin_op_numeric("-", other)
def __rsub__(self, other):
return self._bin_op_numeric_reverse("-", other)
def __mul__(self, other):
"""Multiply two numbers.
Examples
--------
>>> hl.eval(x * 2)
6
>>> hl.eval(x * y)
13.5
Parameters
----------
other : :class:`.NumericExpression`
Number to multiply.
Returns
-------
:class:`.NumericExpression`
Product of the two numbers.
"""
return self._bin_op_numeric("*", other)
def __rmul__(self, other):
return self._bin_op_numeric_reverse("*", other)
def __truediv__(self, other):
"""Divide two numbers.
Examples
--------
>>> hl.eval(x / 2)
1.5
>>> hl.eval(y / 0.1)
45.0
Parameters
----------
other : :class:`.NumericExpression`
Dividend.
Returns
-------
:class:`.NumericExpression`
The left number divided by the left.
"""
return self._bin_op_numeric("/", other, self._div_ret_type_f)
def __rtruediv__(self, other):
return self._bin_op_numeric_reverse("/", other, self._div_ret_type_f)
def __floordiv__(self, other):
"""Divide two numbers with floor division.
Examples
--------
>>> hl.eval(x // 2)
1
>>> hl.eval(y // 2)
2.0
Parameters
----------
other : :class:`.NumericExpression`
Dividend.
Returns
-------
:class:`.NumericExpression`
The floor of the left number divided by the right.
"""
return self._bin_op_numeric('//', other)
def __rfloordiv__(self, other):
return self._bin_op_numeric_reverse('//', other)
def __mod__(self, other):
"""Compute the left modulo the right number.
Examples
--------
>>> hl.eval(32 % x)
2
>>> hl.eval(7 % y)
2.5
Parameters
----------
other : :class:`.NumericExpression`
Dividend.
Returns
-------
:class:`.NumericExpression`
Remainder after dividing the left by the right.
"""
return self._bin_op_numeric('%', other)
def __rmod__(self, other):
return self._bin_op_numeric_reverse('%', other)
def __pow__(self, power, modulo=None):
"""Raise the left to the right power.
Examples
--------
>>> hl.eval(x ** 2)
9.0
>>> hl.eval(x ** -2)
0.1111111111111111
>>> hl.eval(y ** 1.5)
9.545941546018392
Parameters
----------
power : :class:`.NumericExpression`
modulo
Unsupported argument.
Returns
-------
:class:`.Expression` of type :py:data:`.tfloat64`
Result of raising left to the right power.
"""
return self._bin_op_numeric('**', power, lambda _: tfloat64)
def __rpow__(self, other):
return self._bin_op_numeric_reverse('**', other, lambda _: tfloat64)
class BooleanExpression(NumericExpression):
"""Expression of type :py:data:`.tbool`.
>>> t = hl.literal(True)
>>> f = hl.literal(False)
>>> na = hl.missing(hl.tbool)
>>> hl.eval(t)
True
>>> hl.eval(f)
False
>>> hl.eval(na)
None
"""
@typecheck_method(other=expr_bool)
def __rand__(self, other):
return self.__and__(other)
@typecheck_method(other=expr_bool)
def __ror__(self, other):
return self.__or__(other)
@typecheck_method(other=expr_bool)
def __and__(self, other):
"""Return ``True`` if the left and right arguments are ``True``.
Examples
--------
>>> hl.eval(t & f)
False
>>> hl.eval(t & na)
None
>>> hl.eval(f & na)
False
The ``&`` and ``|`` operators have higher priority than comparison
operators like ``==``, ``<``, or ``>``. Parentheses are often
necessary:
>>> x = hl.literal(5)
>>> hl.eval((x < 10) & (x > 2))
True
Parameters
----------
other : :class:`.BooleanExpression`
Right-side operand.
Returns
-------
:class:`.BooleanExpression`
``True`` if both left and right are ``True``.
"""
return self._method("land", tbool, other)
@typecheck_method(other=expr_bool)
def __or__(self, other):
"""Return ``True`` if at least one of the left and right arguments is ``True``.
Examples
--------
>>> hl.eval(t | f)
True
>>> hl.eval(t | na)
True
>>> hl.eval(f | na)
None
The ``&`` and ``|`` operators have higher priority than comparison
operators like ``==``, ``<``, or ``>``. Parentheses are often
necessary:
>>> x = hl.literal(5)
>>> hl.eval((x < 10) | (x > 20))
True
Parameters
----------
other : :class:`.BooleanExpression`
Right-side operand.
Returns
-------
:class:`.BooleanExpression`
``True`` if either left or right is ``True``.
"""
return self._method("lor", tbool, other)
def __invert__(self):
"""Return the boolean negation.
Examples
--------
>>> hl.eval(~t)
False
>>> hl.eval(~f)
True
>>> hl.eval(~na)
None
Returns
-------
:class:`.BooleanExpression`
Boolean negation.
"""
return self._unary_op("!")
def _extra_summary_fields(self, agg_result):
return {'Counts': agg_result}
def _summary_aggs(self):
return hl.agg.filter(hl.is_defined(self), hl.agg.counter(self))
class Float64Expression(NumericExpression):
"""Expression of type :py:data:`.tfloat64`."""
def _extra_summary_fields(self, agg_result):
return {
'Minimum': agg_result['min'],
'Maximum': agg_result['max'],
'Mean': agg_result['mean'],
'Std Dev': agg_result['stdev']
}
def _summary_aggs(self):
return hl.agg.stats(self)
class Float32Expression(NumericExpression):
"""Expression of type :py:data:`.tfloat32`."""
def _extra_summary_fields(self, agg_result):
return {
'Minimum': agg_result['min'],
'Maximum': agg_result['max'],
'Mean': agg_result['mean'],
'Std Dev': agg_result['stdev']
}
def _summary_aggs(self):
return hl.agg.stats(self)
class Int32Expression(NumericExpression):
"""Expression of type :py:data:`.tint32`."""
def _extra_summary_fields(self, agg_result):
return {
'Minimum': int(agg_result['min']),
'Maximum': int(agg_result['max']),
'Mean': agg_result['mean'],
'Std Dev': agg_result['stdev']
}
def _summary_aggs(self):
return hl.agg.stats(self)
def __mul__(self, other):
other = to_expr(other)
if other.dtype == tstr:
return other * self
else:
return NumericExpression.__mul__(self, other)
def __rmul__(self, other):
other = to_expr(other)
if other.dtype == tstr:
return other * self
else:
return NumericExpression.__mul__(self, other)
class Int64Expression(NumericExpression):
"""Expression of type :py:data:`.tint64`."""
def _extra_summary_fields(self, agg_result):
return {
'Minimum': int(agg_result['min']),
'Maximum': int(agg_result['max']),
'Mean': agg_result['mean'],
'Std Dev': agg_result['stdev']
}
def _summary_aggs(self):
return hl.agg.stats(self)
class StringExpression(Expression):
"""Expression of type :py:data:`.tstr`.
>>> s = hl.literal('The quick brown fox')
"""
def __getitem__(self, item):
"""Slice or index into the string.
Examples
--------
>>> hl.eval(s[:15])
'The quick brown'
>>> hl.eval(s[0])
'T'
Parameters
----------
item : slice or :class:`.Expression` of type :py:data:`.tint32`
Slice or character index.
Returns
-------
:class:`.StringExpression`
Substring or character at index `item`.
"""
if isinstance(item, slice):
return self._slice(item.start, item.stop, item.step)
else:
item = to_expr(item)
if not item.dtype == tint32:
raise TypeError("String expects index to be type 'slice' or expression of type 'int32', "
"found expression of type '{}'".format(item.dtype))
return self._index(tstr, item)
def __contains__(self, item):
raise TypeError("Cannot use `in` operator on hail `StringExpression`s. Use the `contains` method instead."
"`my_string.contains('cat')` instead of `'cat' in my_string`")
def __add__(self, other):
"""Concatenate strings.
Examples
--------
>>> hl.eval(s + ' jumped over the lazy dog')
'The quick brown fox jumped over the lazy dog'
Parameters
----------
other : :class:`.StringExpression`
String to concatenate.
Returns
-------
:class:`.StringExpression`
Concatenated string.
"""
other = to_expr(other)
if not other.dtype == tstr:
raise NotImplementedError("'{}' + '{}'".format(self.dtype, other.dtype))
return self._bin_op("concat", other, self.dtype)
def __radd__(self, other):
other = to_expr(other)
if not other.dtype == tstr:
raise NotImplementedError("'{}' + '{}'".format(other.dtype, self.dtype))
return self._bin_op_reverse("concat", other, self.dtype)
def __mul__(self, other):
other = to_expr(other)
if not other.dtype == tint32:
raise NotImplementedError("'{}' + '{}'".format(self.dtype, other.dtype))
return hl.delimit(hl.range(other).map(lambda x: self), delimiter='')
def __rmul__(self, other):
other = to_expr(other)
return other * self
def _slice(self, start=None, stop=None, step=None):
if step is not None:
raise NotImplementedError('Variable slice step size is not currently supported for strings')
if start is not None:
start = to_expr(start)
if stop is not None:
stop = to_expr(stop)
return self._method('slice', tstr, start, stop)
else:
return self._method('sliceRight', tstr, start)
else:
if stop is not None:
stop = to_expr(stop)
return self._method('sliceLeft', tstr, stop)
else:
return self
def length(self):
"""Returns the length of the string.
Examples
--------
>>> hl.eval(s.length())
19
Returns
-------
:class:`.Expression` of type :py:data:`.tint32`
Length of the string.
"""
return apply_expr(lambda x: ir.Apply("length", tint32, x), tint32, self)
@typecheck_method(pattern1=expr_str, pattern2=expr_str)
def replace(self, pattern1, pattern2):
"""Replace substrings matching `pattern1` with `pattern2` using regex.
Examples
--------
Replace spaces with underscores in a Hail string:
>>> hl.eval(hl.str("The quick brown fox").replace(' ', '_'))
'The_quick__brown_fox'
Remove the leading zero in contigs in variant strings in a table:
>>> t = hl.import_table('data/leading-zero-variants.txt')
>>> t.show()
+----------------+
| variant |
+----------------+
| str |
+----------------+
| "01:1000:A:T" |
| "01:10001:T:G" |
| "02:99:A:C" |
| "02:893:G:C" |
| "22:100:A:T" |
| "X:10:C:A" |
+----------------+
<BLANKLINE>
>>> t = t.annotate(variant = t.variant.replace("^0([0-9])", "$1"))
>>> t.show()
+---------------+
| variant |
+---------------+
| str |
+---------------+
| "1:1000:A:T" |
| "1:10001:T:G" |
| "2:99:A:C" |
| "2:893:G:C" |
| "22:100:A:T" |
| "X:10:C:A" |
+---------------+
<BLANKLINE>
Notes
-----
The regex expressions used should follow `Java regex syntax
<https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html>`_. In
the Java regular expression syntax, a dollar sign, ``$1``, refers to the
first group, not the canonical ``\\1``.
Parameters
----------
pattern1 : str or :class:`.StringExpression`
pattern2 : str or :class:`.StringExpression`
Returns
-------
"""
return self._method("replace", tstr, pattern1, pattern2)
@typecheck_method(delim=expr_str, n=nullable(expr_int32))
def split(self, delim, n=None):
"""Returns an array of strings generated by splitting the string at `delim`.
Examples
--------
>>> hl.eval(s.split('\\s+'))
['The', 'quick', 'brown', 'fox']
>>> hl.eval(s.split('\\s+', 2))
['The', 'quick brown fox']
Notes
-----
The delimiter is a regex using the
`Java regex syntax <https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html>`_
delimiter. To split on special characters, escape them with double
backslash (``\\\\``).
Parameters
----------
delim : str or :class:`.StringExpression`
Delimiter regex.
n : :class:`.Expression` of type :py:data:`.tint32`, optional
Maximum number of splits.
Returns
-------
:class:`.ArrayExpression`
Array of split strings.
"""
if n is None:
return self._method("split", tarray(tstr), delim)
else:
return self._method("split", tarray(tstr), delim, n)
def lower(self):
"""Returns a copy of the string, but with upper case letters converted
to lower case.
Examples
--------
>>> hl.eval(s.lower())
'the quick brown fox'
Returns
-------
:class:`.StringExpression`
"""
return self._method("lower", tstr)
def upper(self):
"""Returns a copy of the string, but with lower case letters converted
to upper case.
Examples
--------
>>> hl.eval(s.upper())
'THE QUICK BROWN FOX'
Returns
-------
:class:`.StringExpression`
"""
return self._method("upper", tstr)
def strip(self):
r"""Returns a copy of the string with whitespace removed from the start
and end.
Examples
--------
>>> s2 = hl.str(' once upon a time\n')
>>> hl.eval(s2.strip())
'once upon a time'
Returns
-------
:class:`.StringExpression`
"""
return self._method("strip", tstr)
@typecheck_method(substr=expr_str)
def contains(self, substr):
"""Returns whether `substr` is contained in the string.
Examples
--------
>>> hl.eval(s.contains('fox'))
True
>>> hl.eval(s.contains('dog'))
False
Note
----
This method is case-sensitive.
Parameters
----------
substr : :class:`.StringExpression`
Returns
-------
:class:`.BooleanExpression`
"""
return self._method("contains", tbool, substr)
@typecheck_method(substr=expr_str)
def startswith(self, substr):
"""Returns whether `substr` is a prefix of the string.
Examples
--------
>>> hl.eval(s.startswith('The'))
True
>>> hl.eval(s.startswith('the'))
False
Note
----
This method is case-sensitive.
Parameters
----------
substr : :class:`.StringExpression`
Returns
-------
:class:`.StringExpression`
"""
return self._method('startswith', tbool, substr)
@typecheck_method(substr=expr_str)
def endswith(self, substr):
"""Returns whether `substr` is a suffix of the string.
Examples
--------
>>> hl.eval(s.endswith('fox'))
True
Note
----
This method is case-sensitive.
Parameters
----------
substr : :class:`.StringExpression`
Returns
-------
:class:`.StringExpression`
"""
return self._method('endswith', tbool, substr)
@typecheck_method(regex=str)
def first_match_in(self, regex):
"""Returns an array containing the capture groups of the first match of
`regex` in the given character sequence.
Examples
--------
>>> hl.eval(s.first_match_in("The quick (\\w+) fox"))
['brown']
>>> hl.eval(s.first_match_in("The (\\w+) (\\w+) (\\w+)"))
['quick', 'brown', 'fox']
>>> hl.eval(s.first_match_in("(\\w+) (\\w+)"))
['The', 'quick']
Parameters
----------
regex : :class:`.StringExpression`
Returns
-------
:class:`.ArrayExpression` with element type :py:data:`.tstr`
"""
return self._method('firstMatchIn', tarray(tstr), regex)
@typecheck_method(mapping=expr_dict(expr_str, expr_str))
def translate(self, mapping):
"""Translates characters of the string using `mapping`.
Examples
--------
>>> string = hl.literal('ATTTGCA')
>>> hl.eval(string.translate({'T': 'U'}))
'AUUUGCA'
Parameters
----------
mapping : :class:`.DictExpression`
Dictionary of character-character translations.
Returns
-------
:class:`.StringExpression`
See Also
--------
:meth:`.replace`
"""
return self._method('translate', tstr, mapping)
@typecheck_method(regex=expr_str)
def matches(self, regex):
"""Returns ``True`` if the string contains any match for the given regex.
Examples
--------
>>> string = hl.literal('NA12878')
The `regex` parameter does not need to match the entire string:
>>> hl.eval(string.matches('12'))
True
Regex motifs can be used to match sequences of characters:
>>> hl.eval(string.matches(r'NA\\d+'))
True
Notes
-----
The `regex` argument is a
`regular expression <https://en.wikipedia.org/wiki/Regular_expression>`__,
and uses
`Java regex syntax <https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html>`__.
Parameters
----------
regex: :class:`.StringExpression`
Pattern to match.
Returns
-------
:class:`.BooleanExpression`
``True`` if the string contains any match for the regex, otherwise ``False``.
"""
return regex._method("regexMatch", tbool, self)
def reverse(self):
"""Returns the reversed value.
Examples
--------
>>> string = hl.literal('ATGCC')
>>> hl.eval(string.reverse())
'CCGTA'
Returns
-------
:class:`.StringExpression`
"""
return self._method('reverse', tstr)
@typecheck_method(collection=expr_oneof(expr_array(), expr_set()))
def join(self, collection):
"""Returns a string which is the concatenation of the strings in `collection`
separated by the string providing this method. Raises :class:`TypeError` if
the element type of `collection` is not :data:`.tstr`.
Examples
--------
>>> a = ['Bob', 'Charlie', 'Alice', 'Bob', 'Bob']
>>> hl.eval(hl.str(',').join(a))
'Bob,Charlie,Alice,Bob,Bob'
Parameters
----------
collection : :class:`.ArrayExpression` or :class:`.SetExpression`
Collection.
Returns
-------
:class:`.StringExpression`
Joined string expression.
"""
if collection.dtype.element_type != tstr:
raise TypeError(f"Expected str collection, {collection.dtype.element_type} found")
return hl.delimit(collection, self)
def _extra_summary_fields(self, agg_result):
return {
'Min Size': agg_result[0],
'Max Size': agg_result[1],
'Mean Size': agg_result[2],
'Sample Values': agg_result[3],
}
def _summary_aggs(self):
length = hl.len(self)
return hl.tuple((
hl.agg.min(length),
hl.agg.max(length),
hl.agg.mean(length),
hl.agg.filter(hl.is_defined(self), hl.agg.take(self, 5))))
class CallExpression(Expression):
"""Expression of type :py:data:`.tcall`.
>>> call = hl.call(0, 1, phased=False)
"""
def __getitem__(self, item):
"""Get the i*th* allele.
Examples
--------
Index with a single integer:
>>> hl.eval(call[0])
0
>>> hl.eval(call[1])
1
Parameters
----------
item : int or :class:`.Expression` of type :py:data:`.tint32`
Allele index.
Returns
-------
:class:`.Expression` of type :py:data:`.tint32`
"""
if isinstance(item, slice):
raise NotImplementedError("CallExpression does not support indexing with a slice.")
else:
item = to_expr(item)
if not item.dtype == tint32:
raise TypeError("Call expects allele index to be an expression of type 'int32', "
"found expression of type '{}'".format(item.dtype))
return self._index(tint32, item)
@property
def ploidy(self):
"""Return the number of alleles of this call.
Examples
--------
>>> hl.eval(call.ploidy)
2
Notes
-----
Currently only ploidy 1 and 2 are supported.
Returns
-------
:class:`.Expression` of type :py:data:`.tint32`
"""
return self._method("ploidy", tint32)
@property
def phased(self):
"""True if the call is phased.
Examples
--------
>>> hl.eval(call.phased)
False
Returns
-------
:class:`.BooleanExpression`
"""
return self._method("isPhased", tbool)
def is_haploid(self):
"""True if the call has ploidy equal to 1.
Examples
--------
>>> hl.eval(call.is_haploid())
False
Returns
-------
:class:`.BooleanExpression`
"""
return self.ploidy == 1
def is_diploid(self):
"""True if the call has ploidy equal to 2.
Examples
--------
>>> hl.eval(call.is_diploid())
True
Returns
-------
:class:`.BooleanExpression`
"""
return self.ploidy == 2
def is_non_ref(self):
"""Evaluate whether the call includes one or more non-reference alleles.
Examples
--------
>>> hl.eval(call.is_non_ref())
True
Notes
-----
In the diploid biallelic case, a ``0/0`` call will return ``False``,
and ``0/1`` and ``1/1`` will return ``True``.
Returns
-------
:class:`.BooleanExpression`
``True`` if at least one allele is non-reference, ``False`` otherwise.
"""
return self._method("isNonRef", tbool)
def is_het(self):
"""Evaluate whether the call includes two different alleles.
Examples
--------
>>> hl.eval(call.is_het())
True
Notes
-----
In the diploid biallelic case, a ``0/1`` call will return ``True``,
and ``0/0`` and ``1/1`` will return ``False``.
Returns
-------
:class:`.BooleanExpression`
``True`` if the two alleles are different, ``False`` if they are the same.
"""
return self._method("isHet", tbool)
def is_het_non_ref(self):
"""Evaluate whether the call includes two different alleles, neither of which is reference.
Examples
--------
>>> hl.eval(call.is_het_non_ref())
False
Notes
-----
A biallelic variant may never have a het-non-ref call. Examples of
these calls are ``1/2`` and ``2/4``.
Returns
-------
:class:`.BooleanExpression`
``True`` if the call includes two different alternate alleles, ``False`` otherwise.
"""
return self._method("isHetNonRef", tbool)
def is_het_ref(self):
"""Evaluate whether the call includes two different alleles, one of which is reference.
Examples
--------
>>> hl.eval(call.is_het_ref())
True
Returns
-------
:class:`.BooleanExpression`
``True`` if the call includes one reference and one alternate allele, ``False`` otherwise.
"""
return self._method("isHetRef", tbool)
def is_hom_ref(self):
"""Evaluate whether the call includes two reference alleles.
Examples
--------
>>> hl.eval(call.is_hom_ref())
False
Returns
-------
:class:`.BooleanExpression`
``True`` if the call includes two reference alleles, ``False`` otherwise.
"""
return self._method("isHomRef", tbool)
def is_hom_var(self):
"""Evaluate whether the call includes two identical alternate alleles.
Examples
--------
>>> hl.eval(call.is_hom_var())
False
Returns
-------
:class:`.BooleanExpression`
``True`` if the call includes two identical alternate alleles, ``False`` otherwise.
"""
return self._method("isHomVar", tbool)
def n_alt_alleles(self):
"""Returns the number of non-reference alleles.
Examples
--------
>>> hl.eval(call.n_alt_alleles())
1
Notes
-----
For diploid biallelic calls, this method is equivalent to the alternate
allele dosage. For instance, ``0/0`` will return ``0``, ``0/1`` will
return ``1``, and ``1/1`` will return ``2``.
Returns
-------
:class:`.Expression` of type :py:data:`.tint32`
The number of non-reference alleles.
"""
return self._method("nNonRefAlleles", tint32)
@typecheck_method(alleles=oneof(expr_array(expr_str), expr_int32))
def one_hot_alleles(self, alleles):
"""Returns an array containing the summed one-hot encoding of the
alleles.
Examples
--------
Compute one-hot encoding when number of total alleles is 2.
>>> hl.eval(call.one_hot_alleles(2))
[1, 1]
**DEPRECATED**: Compute one-hot encoding based on length of list of alleles.
>>> hl.eval(call.one_hot_alleles(['A', 'T']))
[1, 1]
This one-hot representation is the positional sum of the one-hot
encoding for each called allele. For a biallelic variant, the one-hot
encoding for a reference allele is ``[1, 0]`` and the one-hot encoding
for an alternate allele is ``[0, 1]``. Diploid calls would produce the
following arrays: ``[2, 0]`` for homozygous reference, ``[1, 1]`` for
heterozygous, and ``[0, 2]`` for homozygous alternate.
Parameters
----------
alleles: :class:`.Int32Expression` or :class:`.ArrayExpression` of :obj:`.tstr`.
Number of total alleles, including the reference, or array of variant alleles.
Returns
-------
:class:`.ArrayExpression` of :obj:`.tint32`
An array of summed one-hot encodings of allele indices.
"""
if isinstance(alleles, Int32Expression):
n_alleles = alleles
else:
n_alleles = hl.len(alleles)
return self._method("oneHotAlleles", tarray(tint32), n_alleles)
def unphased_diploid_gt_index(self):
"""Return the genotype index for unphased, diploid calls.
Examples
--------
>>> hl.eval(call.unphased_diploid_gt_index())
1
Returns
-------
:class:`.Expression` of type :py:data:`.tint32`
"""
return self._method("unphasedDiploidGtIndex", tint32)
def _extra_summary_fields(self, agg_result):
return {
'Homozygous Reference': agg_result[0],
'Heterozygous': agg_result[1],
'Homozygous Variant': agg_result[2],
'Ploidy': agg_result[3],
'Phased': agg_result[4]
}
def _summary_aggs(self):
return hl.tuple((
hl.agg.count_where(self.is_hom_ref()),
hl.agg.count_where(self.is_het()),
hl.agg.count_where(self.is_hom_var()),
hl.agg.filter(hl.is_defined(self), hl.agg.counter(self.ploidy)),
hl.agg.filter(hl.is_defined(self), hl.agg.counter(self.phased))))
class LocusExpression(Expression):
"""Expression of type :class:`.tlocus`.
>>> locus = hl.locus('1', 1034245)
"""
@property
def contig(self):
"""Returns the chromosome.
Examples
--------
>>> hl.eval(locus.contig)
'1'
Returns
-------
:class:`.StringExpression`
The chromosome for this locus.
"""
return self._method("contig", tstr)
@property
def position(self):
"""Returns the position along the chromosome.
Examples
--------
>>> hl.eval(locus.position)
1034245
Returns
-------
:class:`.Expression` of type :py:data:`.tint32`
This locus's position along its chromosome.
"""
return self._method("position", tint32)
def global_position(self):
"""Returns a zero-indexed absolute position along the reference genome.
The global position is computed as :py:attr:`~position` - 1 plus the sum
of the lengths of all the contigs that precede this locus's :py:attr:`~contig`
in the reference genome's ordering of contigs.
See also :func:`.locus_from_global_position`.
Examples
--------
A locus with position 1 along chromosome 1 will have a global position of 0 along
the reference genome GRCh37.
>>> hl.eval(hl.locus('1', 1).global_position())
0
A locus with position 1 along chromosome 2 will have a global position of (1-1) + 249250621,
where 249250621 is the length of chromosome 1 on GRCh37.
>>> hl.eval(hl.locus('2', 1).global_position())
249250621
A different reference genome than the default results in a different global position.
>>> hl.eval(hl.locus('chr2', 1, 'GRCh38').global_position())
248956422
Returns
-------
:class:`.Expression` of type :py:data:`.tint64`
Global base position of locus along the reference genome.
"""
return self._method('locusToGlobalPos', tint64)
def in_x_nonpar(self):
"""Returns ``True`` if the locus is in a non-pseudoautosomal
region of chromosome X.
Examples
--------
>>> hl.eval(locus.in_x_nonpar())
False
Returns
-------
:class:`.BooleanExpression`
"""
return self._method("inXNonPar", tbool)
def in_x_par(self):
"""Returns ``True`` if the locus is in a pseudoautosomal region
of chromosome X.
Examples
--------
>>> hl.eval(locus.in_x_par())
False
Returns
-------
:class:`.BooleanExpression`
"""
return self._method("inXPar", tbool)
def in_y_nonpar(self):
"""Returns ``True`` if the locus is in a non-pseudoautosomal
region of chromosome Y.
Examples
--------
>>> hl.eval(locus.in_y_nonpar())
False
Note
----
Many variant callers only generate variants on chromosome X for the
pseudoautosomal region. In this case, all loci mapped to chromosome
Y are non-pseudoautosomal.
Returns
-------
:class:`.BooleanExpression`
"""
return self._method("inYNonPar", tbool)
def in_y_par(self):
"""Returns ``True`` if the locus is in a pseudoautosomal region
of chromosome Y.
Examples
--------
>>> hl.eval(locus.in_y_par())
False
Note
----
Many variant callers only generate variants on chromosome X for the
pseudoautosomal region. In this case, all loci mapped to chromosome
Y are non-pseudoautosomal.
Returns
-------
:class:`.BooleanExpression`
"""
return self._method("inYPar", tbool)
def in_autosome(self):
"""Returns ``True`` if the locus is on an autosome.
Notes
-----
All contigs are considered autosomal except those
designated as X, Y, or MT by :class:`.ReferenceGenome`.
Examples
--------
>>> hl.eval(locus.in_autosome())
True
Returns
-------
:class:`.BooleanExpression`
"""
return self._method("isAutosomal", tbool)
def in_autosome_or_par(self):
"""Returns ``True`` if the locus is on an autosome or
a pseudoautosomal region of chromosome X or Y.
Examples
--------
>>> hl.eval(locus.in_autosome_or_par())
True
Returns
-------
:class:`.BooleanExpression`
"""
return self._method("isAutosomalOrPseudoAutosomal", tbool)
def in_mito(self):
"""Returns ``True`` if the locus is on mitochondrial DNA.
Examples
--------
>>> hl.eval(locus.in_mito())
False
Returns
-------
:class:`.BooleanExpression`
"""
return self._method("isMitochondrial", tbool)
@typecheck_method(before=expr_int32, after=expr_int32)
def sequence_context(self, before=0, after=0):
"""Return the reference genome sequence at the locus.
Examples
--------
Get the reference allele at a locus:
>>> hl.eval(locus.sequence_context()) # doctest: +SKIP
"G"
Get the reference sequence at a locus including the previous 5 bases:
>>> hl.eval(locus.sequence_context(before=5)) # doctest: +SKIP
"ACTCGG"
Notes
-----
This function requires that this locus' reference genome has an attached
reference sequence. Use :meth:`.ReferenceGenome.add_sequence` to
load and attach a reference sequence to a reference genome.
Parameters
----------
before : :class:`.Expression` of type :py:data:`.tint32`, optional
Number of bases to include before the locus. Truncates at
contig boundary.
after : :class:`.Expression` of type :py:data:`.tint32`, optional
Number of bases to include after the locus. Truncates at
contig boundary.
Returns
-------
:class:`.StringExpression`
"""
rg = self.dtype.reference_genome
if not rg.has_sequence():
raise TypeError("Reference genome '{}' does not have a sequence loaded. Use 'add_sequence' to load the sequence from a FASTA file.".format(rg.name))
return hl.get_sequence(self.contig, self.position, before, after, rg)
@typecheck_method(before=expr_int32, after=expr_int32)
def window(self, before, after):
"""Returns an interval of a specified number of bases around the locus.
Examples
--------
Create a window of two megabases centered at a locus:
>>> locus = hl.locus('16', 29_500_000)
>>> window = locus.window(1_000_000, 1_000_000)
>>> hl.eval(window)
Interval(start=Locus(contig=16, position=28500000, reference_genome=GRCh37), end=Locus(contig=16, position=30500000, reference_genome=GRCh37), includes_start=True, includes_end=True)
Notes
-----
The returned interval is inclusive of both the `start` and `end`
endpoints.
Parameters
----------
before : :class:`.Expression` of type :py:data:`.tint32`
Number of bases to include before the locus. Truncates at 1.
after : :class:`.Expression` of type :py:data:`.tint32`
Number of bases to include after the locus. Truncates at
contig length.
Returns
-------
:class:`.IntervalExpression`
"""
start_pos = hl.max(1, self.position - before)
rg = self.dtype.reference_genome
end_pos = hl.min(hl.contig_length(self.contig, rg), self.position + after)
return hl.interval(start=hl.locus(self.contig, start_pos, reference_genome=rg),
end=hl.locus(self.contig, end_pos, reference_genome=rg),
includes_start=True,
includes_end=True)
def _extra_summary_fields(self, agg_result):
return {'Contig Counts': agg_result}
def _summary_aggs(self):
return hl.agg.filter(hl.is_defined(self), hl.agg.counter(self.contig))
class IntervalExpression(Expression):
"""Expression of type :class:`.tinterval`.
>>> interval = hl.interval(3, 11)
>>> locus_interval = hl.parse_locus_interval("1:53242-90543")
"""
@typecheck_method(value=expr_any)
def contains(self, value):
"""Tests whether a value is contained in the interval.
Examples
--------
>>> hl.eval(interval.contains(3))
True
>>> hl.eval(interval.contains(11))
False
Parameters
----------
value :
Object with type matching the interval point type.
Returns
-------
:class:`.BooleanExpression`
``True`` if `value` is contained in the interval, ``False`` otherwise.
"""
if self.dtype.point_type != value.dtype:
raise TypeError("expected '{}', found: '{}'".format(self.dtype.point_type, value.dtype))
return self._method("contains", tbool, value)
@typecheck_method(interval=expr_interval(expr_any))
def overlaps(self, interval):
"""True if the the supplied interval contains any value in common with this one.
Examples
--------
>>> hl.eval(interval.overlaps(hl.interval(5, 9)))
True
>>> hl.eval(interval.overlaps(hl.interval(11, 20)))
False
Parameters
----------
interval : :class:`.Expression` with type :class:`.tinterval`
Interval object with the same point type.
Returns
-------
:class:`.BooleanExpression`
"""
if self.dtype.point_type != interval.dtype.point_type:
raise TypeError("expected '{}', found: '{}'".format(self.dtype.point_type, interval.dtype.point_type))
return self._method("overlaps", tbool, interval)
@property
def end(self):
"""Returns the end point.
Examples
--------
>>> hl.eval(interval.end)
11
Returns
-------
:class:`.Expression`
"""
return self._method("end", self.dtype.point_type)
@property
def start(self):
"""Returns the start point.
Examples
--------
>>> hl.eval(interval.start)
3
Returns
-------
:class:`.Expression`
"""
return self._method("start", self.dtype.point_type)
@property
def includes_start(self):
"""True if the interval includes the start point.
Examples
--------
>>> hl.eval(interval.includes_start)
True
Returns
-------
:class:`.BooleanExpression`
"""
return self._method("includesStart", tbool)
@property
def includes_end(self):
"""True if the interval includes the end point.
Examples
--------
>>> hl.eval(interval.includes_end)
False
Returns
-------
:class:`.BooleanExpression`
"""
return self._method("includesEnd", tbool)
class NDArrayExpression(Expression):
"""Expression of type :class:`.tndarray`.
>>> nd = hl.nd.array([[1, 2], [3, 4]])
"""
def _data_array(self):
shape = self.shape
ndims = self.ndim
def f(i, *vars):
if i == ndims:
return self[vars]
else:
return hl.range(0, hl.int32(shape[i])).map(lambda idx: f(i + 1, *vars, idx))
return f(0)
@property
def ndim(self):
"""The number of dimensions of this ndarray.
Examples
--------
>>> nd.ndim
2
Returns
-------
:obj:`int`
"""
return self._type.ndim
@property
def T(self):
"""Reverse the dimensions of this ndarray. For an n-dimensional array `a`,
a[i_0, ..., i_n-1, i_n] = a.T[i_n, i_n-1, ..., i_0].
Same as `self.transpose()`.
See also :meth:`.transpose`.
Returns
-------
:class:`.NDArrayExpression`.
"""
return self.transpose()
@typecheck_method(axes=nullable(tupleof(int)))
def transpose(self, axes=None):
"""
Permute the dimensions of this ndarray according to the ordering of axes. Axis j in the ith index of
axes maps the jth dimension of the ndarray to the ith dimension of the output ndarray.
Parameters
----------
axes : :obj:`tuple` of :obj:`int`, optional
The new ordering of the ndarray's dimensions.
Notes
-----
Does nothing on ndarrays of dimensionality 0 or 1.
Returns
-------
:class:`.NDArrayExpression`.
"""
if axes is None:
axes = list(reversed(range(self.ndim)))
else:
if len(axes) != self.ndim:
raise ValueError(f'Must specify a complete permutation of the dimensions. '
f'Expected {self.ndim} axes, got {len(axes)}')
if len(set(axes)) != len(axes):
raise ValueError(f'Axes cannot contain duplicates: {axes}')
for axis in axes:
if not 0 <= axis < self.ndim:
raise ValueError(f'Invalid axis: {axis}')
if self.ndim < 2:
return self
return construct_expr(ir.NDArrayReindex(self._ir, axes), self._type, self._indices, self._aggregations)
@property
def shape(self):
"""The shape of this ndarray.
Examples
--------
>>> hl.eval(nd.shape)
(2, 2)
Returns
-------
:class:`.TupleExpression`
"""
shape_type = ttuple(*[tint64 for _ in range(self.ndim)])
return construct_expr(ir.NDArrayShape(self._ir), shape_type, self._indices, self._aggregations)
_opt_long_slice = sliceof(nullable(expr_int64), nullable(expr_int64), nullable(expr_int64))
@typecheck_method(item=nullable(oneof(expr_int64, type(...), _opt_long_slice, tupleof(nullable(oneof(expr_int64, type(...), _opt_long_slice))))))
def __getitem__(self, item):
if not isinstance(item, tuple):
item = (item,)
num_ellipses = len([e for e in item if isinstance(e, type(...))])
if num_ellipses > 1:
raise IndexError("an index can only have a single ellipsis (\'...\')")
num_nones = len([x for x in item if x is None])
list_item = list(item)
if num_ellipses == 1:
list_types = [type(e) for e in list_item]
ellipsis_location = list_types.index(type(...))
num_slices_to_add = self.ndim - (len(item) - num_nones) + 1
no_ellipses = list_item[:ellipsis_location] + [slice(None)] * num_slices_to_add + list_item[ellipsis_location + 1:]
else:
no_ellipses = list_item
no_nums = [x for x in no_ellipses if ((x is None) or (isinstance(x, slice)))]
indices_nones = [i for i, x in enumerate(no_nums) if x is None]
formatted_item = [x for x in no_ellipses if x is not None]
if len(formatted_item) > self.ndim:
raise IndexError(f'too many indices for array: array is '
f'{self.ndim}-dimensional, but {len(item)} were indexed')
if len(formatted_item) < self.ndim:
formatted_item += [slice(None, None, None)] * (self.ndim - len(formatted_item))
n_sliced_dims = len([s for s in formatted_item if isinstance(s, slice)])
if n_sliced_dims > 0:
slices = []
for i, s in enumerate(formatted_item):
dlen = self.shape[i]
if isinstance(s, slice):
if s.step is not None:
step = hl.case().when(s.step != 0, s.step) \
.or_error("Slice step cannot be zero")
else:
step = to_expr(1, tint64)
max_bound = hl.if_else(step > 0, dlen, dlen - 1)
min_bound = hl.if_else(step > 0, to_expr(0, tint64), to_expr(-1, tint64))
if s.start is not None:
# python treats start < -dlen as None when step < 0: [0,1][-3:0:-1]
# and 0 otherwise: [0,1][-3::1] == [0,1][0::1]
start = hl.case() \
.when(s.start >= dlen, max_bound) \
.when(s.start >= 0, s.start) \
.when((s.start + dlen) >= 0, dlen + s.start) \
.default(min_bound)
else:
start = hl.if_else(step >= 0, to_expr(0, tint64), dlen - 1)
if s.stop is not None:
# python treats stop < -dlen as None when step < 0: [0,1][0:-3:-1] == [0,1][0::-1]
# and 0 otherwise: [0,1][:-3:1] == [0,1][:0:1]
stop = hl.case() \
.when(s.stop >= dlen, max_bound) \
.when(s.stop >= 0, s.stop) \
.when((s.stop + dlen) >= 0, dlen + s.stop) \
.default(min_bound)
else:
stop = hl.if_else(step > 0, dlen, to_expr(-1, tint64))
slices.append(hl.tuple((start, stop, step)))
else:
adjusted_index = hl.if_else(s < 0, s + dlen, s)
checked_int = hl.case().when((adjusted_index < dlen) & (adjusted_index >= 0), adjusted_index).or_error(
hl.str("Index ") + hl.str(s) + hl.str(f" is out of bounds for axis {i} with size ") + hl.str(dlen)
)
slices.append(checked_int)
product = construct_expr(ir.NDArraySlice(self._ir, hl.tuple(slices)._ir),
tndarray(self._type.element_type, n_sliced_dims),
self._indices,
self._aggregations)
if len(indices_nones) > 0:
reshape_arg = []
index_non_nones = 0
for i in range(n_sliced_dims + num_nones):
if i in indices_nones:
reshape_arg.append(1)
else:
reshape_arg.append(product.shape[index_non_nones])
index_non_nones += 1
product = product.reshape(tuple(reshape_arg))
else:
product = construct_expr(ir.NDArrayRef(self._ir, [idx._ir for idx in formatted_item]),
self._type.element_type,
self._indices,
self._aggregations)
if len(indices_nones) > 0:
reshape_arg = []
for i in indices_nones:
reshape_arg.append(1)
product = hl.nd.array(product).reshape(tuple(reshape_arg))
return product
@typecheck_method(shape=oneof(expr_int64, tupleof(expr_int64), expr_tuple()))
def reshape(self, *shape):
"""Reshape this ndarray to a new shape.
Parameters
----------
shape : :class:`.Expression` of type :py:data:`.tint64` or
:obj: `tuple` of :class:`.Expression` of type :py:data:`.tint64`
Examples
--------
>>> v = hl.nd.array([1, 2, 3, 4]) # doctest: +SKIP
>>> m = v.reshape((2, 2)) # doctest: +SKIP
Returns
-------
:class:`.NDArrayExpression`.
"""
# varargs with many ints works, but can't be a mix of ints and tuples.
if len(shape) > 1:
for i, arg in enumerate(shape):
if not isinstance(arg, Int64Expression):
raise TypeError(f"Argument {i} of reshape needs to be of type tint64.")
else:
shape = shape[0]
if isinstance(shape, TupleExpression):
for i, tuple_field_type in enumerate(shape.dtype.types):
if tuple_field_type not in [hl.tint32, hl.tint64]:
raise TypeError(f"Argument {i} of reshape needs to be an integer, got {tuple_field_type}.")
shape_ir = hl.or_missing(hl.is_defined(shape), hl.tuple([hl.int64(i) for i in shape]))._ir
ndim = len(shape)
else:
wrapped_shape = wrap_to_list(shape)
ndim = len(wrapped_shape)
shape_ir = hl.tuple(wrapped_shape)._ir
return construct_expr(ir.NDArrayReshape(self._ir, shape_ir),
tndarray(self._type.element_type, ndim),
self._indices,
self._aggregations)
@typecheck_method(f=func_spec(1, expr_any))
def map(self, f):
"""Applies an element-wise operation on an NDArray.
Parameters
----------
f : function ( (arg) -> :class:`.Expression`)
Function to transform each element of the NDArray.
Returns
-------
:class:`.NDArrayExpression`.
NDArray where each element has been transformed according to `f`.
"""
element_type = self._type.element_type
ndarray_map = self._ir_lambda_method(ir.NDArrayMap, f, element_type, lambda t: tndarray(t, self.ndim))
assert isinstance(self._type, tndarray)
return ndarray_map
@typecheck_method(other=oneof(expr_ndarray(), list), f=func_spec(2, expr_any))
def map2(self, other, f):
"""Applies an element-wise binary operation on two NDArrays.
Parameters
----------
other : class:`.NDArrayExpression`, :class:`.ArrayExpression`, numpy NDarray,
or nested python list/tuples. Both NDArrays must be the same shape or
broadcastable into common shape.
f : function ((arg1, arg2)-> :class:`.Expression`)
Function to be applied to each element of both NDArrays.
Returns
-------
:class:`.NDArrayExpression`.
Element-wise result of applying `f` to each index in NDArrays.
"""
if isinstance(other, list) or isinstance(other, np.ndarray):
other = hl.nd.array(other)
self_broadcast, other_broadcast = self._broadcast_to_same_ndim(other)
element_type1 = self_broadcast._type.element_type
element_type2 = other_broadcast._type.element_type
ndarray_map2 = self_broadcast._ir_lambda_method2(other_broadcast, ir.NDArrayMap2, f, element_type1,
element_type2, lambda t: tndarray(t, self_broadcast.ndim))
assert isinstance(self._type, tndarray)
return ndarray_map2
def _broadcast_to_same_ndim(self, other):
if isinstance(other, NDArrayExpression):
if self.ndim < other.ndim:
return self._broadcast(other.ndim), other
elif self.ndim > other.ndim:
return self, other._broadcast(self.ndim)
return self, other
def _broadcast(self, n_output_dims):
assert self.ndim < n_output_dims
# Right-align existing dimensions and start prepending new ones
# to the left: e.g. [0, 1] -> [3, 2, 0, 1]
# Based off numpy broadcasting with the assumption that everything
# can be thought to have an infinite number of 1-length dimensions
# prepended
old_dims = range(self.ndim)
new_dims = range(self.ndim, n_output_dims)
idx_mapping = list(reversed(new_dims)) + list(old_dims)
return construct_expr(ir.NDArrayReindex(self._ir, idx_mapping),
tndarray(self._type.element_type, n_output_dims),
self._indices, self._aggregations)
class NDArrayNumericExpression(NDArrayExpression):
"""Expression of type :class:`.tndarray` with a numeric element type.
Numeric ndarrays support arithmetic both with scalar values and other
arrays. Arithmetic between two numeric ndarrays requires that the shapes of
each ndarray be either identical or compatible for broadcasting. Operations
are applied positionally (``nd1 * nd2`` will multiply the first element of
``nd1`` by the first element of ``nd2``, the second element of ``nd1`` by
the second element of ``nd2``, and so on). Arithmetic with a scalar will
apply the operation to each element of the ndarray.
"""
def _bin_op_numeric(self, name, other, ret_type_f=None):
if isinstance(other, list) or isinstance(other, np.ndarray):
other = hl.nd.array(other)
self_broadcast, other_broadcast = self._broadcast_to_same_ndim(other)
return super(NDArrayNumericExpression, self_broadcast)._bin_op_numeric(name, other_broadcast, ret_type_f)
def _bin_op_numeric_reverse(self, name, other, ret_type_f=None):
if isinstance(other, list) or isinstance(other, np.ndarray):
other = hl.nd.array(other)
self_broadcast, other_broadcast = self._broadcast_to_same_ndim(other)
return super(NDArrayNumericExpression, self_broadcast)._bin_op_numeric_reverse(name, other_broadcast, ret_type_f)
def __neg__(self):
"""Negate elements of the ndarray.
Returns
-------
:class:`.NDArrayNumericExpression`
Array expression of the same type.
"""
return self * -1
def __add__(self, other):
"""Positionally add an array or a scalar.
Parameters
----------
other : :class:`.NumericExpression` or :class:`.NDArrayNumericExpression`
Value or ndarray to add.
Returns
-------
:class:`.NDArrayNumericExpression`
NDArray of positional sums.
"""
return self._bin_op_numeric("+", other)
def __radd__(self, other):
return self._bin_op_numeric_reverse("+", other)
def __sub__(self, other):
"""Positionally subtract a ndarray or a scalar.
Parameters
----------
other : :class:`.NumericExpression` or :class:`.NDArrayNumericExpression`
Value or ndarray to subtract.
Returns
-------
:class:`.NDArrayNumericExpression`
NDArray of positional differences.
"""
return self._bin_op_numeric("-", other)
def __rsub__(self, other):
return self._bin_op_numeric_reverse("-", other)
def __mul__(self, other):
"""Positionally multiply by a ndarray or a scalar.
Parameters
----------
other : :class:`.NumericExpression` or :class:`.NDArrayNumericExpression`
Value or ndarray to multiply by.
Returns
-------
:class:`.NDArrayNumericExpression`
NDArray of positional products.
"""
return self._bin_op_numeric("*", other)
def __rmul__(self, other):
return self._bin_op_numeric_reverse("*", other)
def __truediv__(self, other):
"""Positionally divide by a ndarray or a scalar.
Parameters
----------
other : :class:`.NumericExpression` or :class:`.NDArrayNumericExpression`
Value or ndarray to divide by.
Returns
-------
:class:`.NDArrayNumericExpression`
NDArray of positional quotients.
"""
return self._bin_op_numeric("/", other, self._div_ret_type_f)
def __rtruediv__(self, other):
return self._bin_op_numeric_reverse("/", other, self._div_ret_type_f)
def __floordiv__(self, other):
"""Positionally divide by a ndarray or a scalar using floor division.
Parameters
----------
other : :class:`.NumericExpression` or :class:`.NDArrayNumericExpression`
Returns
-------
:class:`.NDArrayNumericExpression`
"""
return self._bin_op_numeric('//', other)
def __rfloordiv__(self, other):
return self._bin_op_numeric_reverse('//', other)
def __rmatmul__(self, other):
if not isinstance(other, NDArrayNumericExpression):
other = hl.nd.array(other)
return other.__matmul__(self)
def __matmul__(self, other):
"""Matrix multiplication: `a @ b`, semantically equivalent to `NumPy` matmul. If `a` and `b` are vectors,
the vector dot product is performed, returning a `NumericExpression`. If `a` and `b` are both 2-dimensional
matrices, this performs normal matrix multiplication. If `a` and `b` have more than 2 dimensions, they are
treated as multi-dimensional stacks of 2-dimensional matrices. Matrix multiplication is applied element-wise
across the higher dimensions. E.g. if `a` has shape `(3, 4, 5)` and `b` has shape `(3, 5, 6)`, `a` is treated
as a stack of three matrices of shape `(4, 5)` and `b` as a stack of three matrices of shape `(5, 6)`. `a @ b`
would then have shape `(3, 4, 6)`.
Notes
-----
The last dimension of `a` and the second to last dimension of `b` (or only dimension if `b` is a vector)
must have the same length. The dimensions to the left of the last two dimensions of `a` and `b` (for NDArrays
of dimensionality > 2) must be equal or be compatible for broadcasting.
Number of dimensions of both NDArrays must be at least 1.
Parameters
----------
other : :class:`numpy.ndarray` :class:`.NDArrayNumericExpression`
Returns
-------
:class:`.NDArrayNumericExpression` or :class:`.NumericExpression`
"""
if not isinstance(other, NDArrayNumericExpression):
other = hl.nd.array(other)
if self.ndim == 0 or other.ndim == 0:
raise ValueError('MatMul must be between objects of 1 dimension or more. Try * instead')
if self.ndim > 1 and other.ndim > 1:
left, right = self._broadcast_to_same_ndim(other)
else:
left, right = self, other
from hail.linalg.utils.misc import _ndarray_matmul_ndim
result_ndim = _ndarray_matmul_ndim(left.ndim, right.ndim)
elem_type = unify_types(self._type.element_type, other._type.element_type)
ret_type = tndarray(elem_type, result_ndim)
left = left._promote_numeric(ret_type)
right = right._promote_numeric(ret_type)
res = construct_expr(ir.NDArrayMatMul(left._ir, right._ir), ret_type, self._indices, self._aggregations)
return res if result_ndim > 0 else res[()]
@typecheck_method(axis=nullable(oneof(int, tupleof(int))))
def sum(self, axis=None):
"""Sum out one or more axes of an ndarray.
Parameters
----------
axis : :class:`int` :class:`tuple`
The axis or axes to sum out.
Returns
-------
:class:`.NDArrayNumericExpression` or :class:`.NumericExpression`
"""
if axis is None:
axis = tuple(range(self.ndim))
if self._type.element_type is hl.tbool:
return self.map(lambda x: hl.int(x)).sum(axis)
else:
axis = wrap_to_tuple(axis)
res_ir = ir.NDArrayAgg(self._ir, axis)
axes_set = set(axis)
if len(axes_set) < len(axis):
raise ValueError("duplicate value in 'axis'")
for element in axes_set:
if element < 0 or element >= self.ndim:
raise ValueError(f"axis {element} is out of bounds for ndarray of dimension {self.ndim}")
num_axes_deleted = len(axes_set)
result_ndim = self.ndim - num_axes_deleted
result = construct_expr(res_ir, tndarray(self._type.element_type, result_ndim), self._indices, self._aggregations)
if result_ndim == 0:
return result[()]
else:
return result
scalars = {tbool: BooleanExpression,
tint32: Int32Expression,
tint64: Int64Expression,
tfloat32: Float32Expression,
tfloat64: Float64Expression,
tstr: StringExpression,
tcall: CallExpression}
typ_to_expr = {
tlocus: LocusExpression,
tinterval: IntervalExpression,
tcall: CallExpression,
tdict: DictExpression,
tarray: ArrayExpression,
tset: SetExpression,
tstruct: StructExpression,
ttuple: TupleExpression,
tndarray: NDArrayExpression
}
def apply_expr(f, result_type, *args):
indices, aggregations = unify_all(*args)
ir = f(*[arg._ir for arg in args])
return construct_expr(ir, result_type, indices, aggregations)
@typecheck(x=ir.IR, type=nullable(HailType), indices=Indices, aggregations=LinkedList)
def construct_expr(x: ir.IR,
type: HailType,
indices: Indices = Indices(),
aggregations: LinkedList = LinkedList(Aggregation)):
if type is None:
return Expression(x, None, indices, aggregations)
elif isinstance(type, tarray) and is_numeric(type.element_type):
return ArrayNumericExpression(x, type, indices, aggregations)
elif isinstance(type, tarray):
etype = type.element_type
if isinstance(etype, (hl.tarray, hl.tset)):
while isinstance(etype, (hl.tarray, hl.tset)):
etype = etype.element_type
if isinstance(etype, hl.tstruct):
return ArrayStructExpression(x, type, indices, aggregations)
else:
return typ_to_expr[type.__class__](x, type, indices, aggregations)
elif isinstance(type, tset):
etype = type.element_type
if isinstance(etype, (hl.tarray, hl.tset)):
while isinstance(etype, (hl.tarray, hl.tset)):
etype = etype.element_type
if isinstance(etype, hl.tstruct):
return SetStructExpression(x, type, indices, aggregations)
else:
return typ_to_expr[type.__class__](x, type, indices, aggregations)
elif isinstance(type, tndarray) and is_numeric(type.element_type):
return NDArrayNumericExpression(x, type, indices, aggregations)
elif type in scalars:
return scalars[type](x, type, indices, aggregations)
elif type.__class__ in typ_to_expr:
return typ_to_expr[type.__class__](x, type, indices, aggregations)
else:
raise NotImplementedError(type)
@typecheck(name=str, type=HailType, indices=Indices)
def construct_reference(name, type, indices):
assert isinstance(type, hl.tstruct)
x = ir.SelectFields(ir.TopLevelReference(name), list(type))
return construct_expr(x, type, indices)
@typecheck(name=str, type=HailType, indices=Indices, aggregations=LinkedList)
def construct_variable(name, type,
indices: Indices = Indices(),
aggregations: LinkedList = LinkedList(Aggregation)):
return construct_expr(ir.Ref(name), type, indices, aggregations)
| 30.313393 | 190 | 0.542671 |
7942e7f561335b097cfd8bef8ce06b2245e78368 | 3,464 | py | Python | TemplateDeployment/deployer.py | Azure-Samples/Hybrid-Python-Samples | a30c816fde75856e8b7ed55beff2917cd9e5b100 | [
"MIT"
] | null | null | null | TemplateDeployment/deployer.py | Azure-Samples/Hybrid-Python-Samples | a30c816fde75856e8b7ed55beff2917cd9e5b100 | [
"MIT"
] | null | null | null | TemplateDeployment/deployer.py | Azure-Samples/Hybrid-Python-Samples | a30c816fde75856e8b7ed55beff2917cd9e5b100 | [
"MIT"
] | 2 | 2021-09-14T12:03:24.000Z | 2021-11-15T08:54:12.000Z | """A deployer class to deploy a template on Azure"""
import os.path
import json
from haikunator import Haikunator
from azure.profiles import KnownProfiles
from azure.identity import ClientSecretCredential
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentMode
from msrestazure.azure_cloud import get_cloud_from_metadata_endpoint
from azure.mgmt.resource.resources.models import Deployment
from azure.mgmt.resource.resources.models import DeploymentProperties
class Deployer(object):
""" Initialize the deployer class with config, resource group and public key.
:raises IOError: If the public key path cannot be read (access or not exists)
:raises KeyError: If clientId, clientSecret or tenantId variables are not defined in azureSecretSpConfig.json
"""
name_generator = Haikunator()
def __init__(self, config, resource_group, pub_ssh_key_path='~/id_rsa.pub'):
mystack_cloud = get_cloud_from_metadata_endpoint(config['resourceManagerEndpointUrl'])
credentials = ClientSecretCredential(
client_id = config['clientId'],
client_secret = config['clientSecret'],
tenant_id = config['tenantId'],
authority = mystack_cloud.endpoints.active_directory
)
self.location = config['location']
self.subscription_id = config['subscriptionId']
self.resource_group = resource_group
self.dns_label_prefix = self.name_generator.haikunate()
pub_ssh_key_path = os.path.expanduser(pub_ssh_key_path)
# Will raise if file not exists or not enough permission
with open(pub_ssh_key_path, 'r') as pub_ssh_file_fd:
self.pub_ssh_key = pub_ssh_file_fd.read()
self.credentials = credentials
scope = "openid profile offline_access" + " " + mystack_cloud.endpoints.active_directory_resource_id + "/.default"
self.client = ResourceManagementClient(
credentials , self.subscription_id,
base_url = mystack_cloud.endpoints.resource_manager,
profile=KnownProfiles.v2020_09_01_hybrid,
credential_scopes = [scope])
def deploy(self):
"""Deploy the template to a resource group."""
resource_group_params = {'location': self.location}
self.client.resource_groups.create_or_update(self.resource_group, resource_group_params)
template_path = os.path.join(os.path.dirname(__file__), 'template.json')
with open(template_path, 'r') as template_file_fd:
template = json.load(template_file_fd)
parameters = {
'sshKeyData': self.pub_ssh_key,
'vmName': 'azure-deployment-sample-vm',
'dnsLabelPrefix': self.dns_label_prefix
}
parameters = {k: {'value': v} for k, v in parameters.items()}
deployment_properties = DeploymentProperties(mode=DeploymentMode.incremental, template=template, parameters=parameters)
deployment_async_operation = self.client.deployments.begin_create_or_update(
self.resource_group,
'azure-sample',
Deployment(properties=deployment_properties)
)
deployment_async_operation.wait()
def destroy(self):
"""Destroy the given resource group"""
self.client.resource_groups.begin_delete(self.resource_group).result()
print("\nDeleted: {}".format(self.resource_group)) | 43.848101 | 127 | 0.707852 |
7942e853a05922acfe50d64faf5a7e759e4ff9ff | 1,326 | py | Python | naoqi-sdk-2.5.5.5-linux64/doc/_downloads/alanimationplayer_tutorial_declarePathForTags.py | applejenny66/docker_pepper | 2469cc4db6585161a31ac44c8fcf2605d71318b1 | [
"MIT"
] | null | null | null | naoqi-sdk-2.5.5.5-linux64/doc/_downloads/alanimationplayer_tutorial_declarePathForTags.py | applejenny66/docker_pepper | 2469cc4db6585161a31ac44c8fcf2605d71318b1 | [
"MIT"
] | null | null | null | naoqi-sdk-2.5.5.5-linux64/doc/_downloads/alanimationplayer_tutorial_declarePathForTags.py | applejenny66/docker_pepper | 2469cc4db6585161a31ac44c8fcf2605d71318b1 | [
"MIT"
] | 1 | 2020-10-06T07:44:12.000Z | 2020-10-06T07:44:12.000Z | #! /usr/bin/env python
# -*- encoding: UTF-8 -*-
"""Example: Use declarePathForTags Method"""
import qi
import argparse
import sys
def main(session):
"""
This example uses the declarePathForTags method.
"""
# Get the service ALAnimationPlayer.
animation_player_service = session.service("ALAnimationPlayer")
# With this command we declare a package of animations having the uid "myanimlib", structured as follow:
# Nao/Stand/...
# Sit/...
# SitOnPod/...
# Pepper/Stand/...
animation_player_service.declarePathForTags("myanimlib/[robot]/[posture]/")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="127.0.0.1",
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
session = qi.Session()
try:
session.connect("tcp://" + args.ip + ":" + str(args.port))
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
main(session)
| 30.837209 | 108 | 0.612368 |
7942e88010057daad64937a55a71fabac24ee42f | 3,044 | py | Python | py/g1/devtools/buildtools/g1/devtools/buildtools/capnps.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | 3 | 2016-01-04T06:28:52.000Z | 2020-09-20T13:18:40.000Z | py/g1/devtools/buildtools/g1/devtools/buildtools/capnps.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | py/g1/devtools/buildtools/g1/devtools/buildtools/capnps.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | __all__ = [
'make_compile_schemas',
]
import subprocess
import warnings
from distutils import log
from distutils.core import Command
from pathlib import Path
def make_compile_schemas(schemas, *, import_paths=()):
class compile_schemas(Command):
IMPORT_PATH = ':'.join(map(str, import_paths))
description = "compile Cap'n Proto schema files"
user_options = [
('import-path=', None, 'schema file search path'),
]
def initialize_options(self):
self.import_path = self.IMPORT_PATH
def finalize_options(self):
pass
def run(self):
_compile_schemas(schemas, self.import_path.split(':'))
return compile_schemas
def _compile_schemas(schemas, import_paths):
"""Generate the CodeGeneratorRequest."""
schema_paths = _find_schema_paths(schemas, import_paths)
for import_, output_path in sorted(schemas.items()):
output_path = Path(output_path).absolute()
if not output_path.parent.is_dir():
cmd = ['mkdir', '--parents', str(output_path.parent)]
subprocess.run(cmd, check=True)
_compile(schema_paths[import_], import_paths, output_path)
def _is_absolute_import(import_):
return import_.startswith('/')
def _find_schema_paths(imports, import_paths):
"""Find all imported Cap'n Proto schema files."""
for import_ in imports:
if not _is_absolute_import(import_):
raise ValueError('all input must be absolute: %s' % import_)
import_paths = [Path(p).absolute() for p in import_paths]
for import_path in import_paths:
if not import_path.is_dir():
warnings.warn('not a directory: %s' % import_path)
schema_paths = {}
for import_ in imports:
if import_ not in schema_paths:
schema_paths[import_] = _find_import_path(import_paths, import_)
return schema_paths
def _find_import_path(import_paths, import_):
assert _is_absolute_import(import_)
found = []
for import_path in import_paths:
schema_path = _make_schema_path(import_path, import_)
if schema_path.is_file():
found.append(schema_path)
if not found:
raise FileNotFoundError('no import path for %r' % import_)
if len(found) > 1:
raise RuntimeError(
'find multiple import paths for %r: %s' % (import_, found)
)
return found[0]
def _make_schema_path(import_path, import_):
# import_ must be an absolute path.
assert import_[0] == '/' and import_[1] != '/', import_
return import_path / import_[1:]
def _compile(schema_path, import_paths, output_path):
"""Compile the schema."""
cmd = ['capnp', 'compile', '-o-']
for import_path in import_paths:
cmd.append('--import-path=%s' % Path(import_path).absolute())
cmd.append(str(schema_path))
log.info('execute: %s > %s', ' '.join(cmd), output_path)
with output_path.open('wb') as output:
subprocess.run(cmd, stdout=output, check=True)
| 29.843137 | 76 | 0.658673 |
7942ea1dc20b742e54bbe171e9a323b0caaaf995 | 13,075 | py | Python | sublimerepl.py | tomschenkjr/SublimeREPL | 8cfe65d66ce632f97699ca51f2743ed7e3c8df86 | [
"BSD-2-Clause"
] | 2 | 2016-02-15T03:13:21.000Z | 2019-08-01T19:19:36.000Z | sublimerepl.py | tomschenkjr/SublimeREPL | 8cfe65d66ce632f97699ca51f2743ed7e3c8df86 | [
"BSD-2-Clause"
] | null | null | null | sublimerepl.py | tomschenkjr/SublimeREPL | 8cfe65d66ce632f97699ca51f2743ed7e3c8df86 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2011, Wojciech Bederski (wuub.net)
# All rights reserved.
# See LICENSE.txt for details.
import threading
import Queue
import sublime
import sublime_plugin
import repl
import os
import buzhug
repl_views = {}
PLATFORM = sublime.platform().lower()
SUBLIMEREPL_DIR = os.getcwdu()
SETTINGS_FILE = 'SublimeREPL.sublime-settings'
def repl_view(view):
id = view.settings().get("repl_id")
if not repl_views.has_key(id):
return None
rv = repl_views[id]
rv.update_view(view)
return rv
def find_repl(external_id):
for rv in repl_views.values():
if rv.external_id == external_id:
return rv
return None
def _delete_repl(view):
id = view.settings().get("repl_id")
if not repl_views.has_key(id):
return None
del repl_views[id]
def subst_for_translate(window):
""" Return all available substitutions"""
import os.path
import locale
res = {
"packages": sublime.packages_path(),
"installed_packages" : sublime.installed_packages_path()
}
if sublime.platform() == "windows":
res["win_cmd_encoding"] = locale.getdefaultlocale()[1]
av = window.active_view()
if av is None:
return res
filename = av.file_name()
if not filename:
return res
filename = os.path.abspath(filename)
res["file"] = filename
res["file_path"] = os.path.dirname(filename)
res["file_basename"] = os.path.basename(filename)
return res
def translate_string(window, string, subst=None):
#$file, $file_path, $packages
from string import Template
if subst is None:
subst = subst_for_translate(window)
return Template(string).safe_substitute(**subst)
def translate_list(window, list, subst=None):
if subst is None:
subst = subst_for_translate(window)
return [translate(window, x, subst) for x in list]
def translate_dict(window, dictionary, subst=None):
if subst is None:
subst = subst_for_translate(window)
if PLATFORM in dictionary:
return translate(window, dictionary[PLATFORM], subst)
for k, v in dictionary.items():
dictionary[k] = translate(window, v, subst)
return dictionary
def translate(window, obj, subst=None):
if subst is None:
subst = subst_for_translate(window)
if isinstance(obj, dict):
return translate_dict(window, obj, subst)
if isinstance(obj, basestring):
return translate_string(window, obj, subst)
if isinstance(obj, list):
return translate_list(window, obj, subst)
return obj
class ReplReader(threading.Thread):
def __init__(self, repl):
super(ReplReader, self).__init__()
self.repl = repl
self.daemon = True
self.queue = Queue.Queue()
def run(self):
r = self.repl
q = self.queue
while True:
result = r.read()
q.put(result)
if result is None:
break
class HistoryMatchList(object):
def __init__(self, command_prefix, commands):
self._command_prefix = command_prefix
self._commands = commands
self._cur = len(commands) # no '-1' on purpose
def current_command(self):
if not self._commands:
return ""
return self._commands[self._cur]
def prev_command(self):
self._cur = max(0, self._cur - 1)
return self.current_command()
def next_command(self):
self._cur = min(len(self._commands) -1, self._cur + 1)
return self.current_command()
class History(object):
def __init__(self):
self._last = None
def push(self, command):
cmd = command.rstrip()
if not cmd or cmd == self._last:
return
self.append(cmd)
self._last = cmd
def append(self, cmd):
raise NotImplemented
def match(self, command_prefix):
raise NotImplemented
class MemHistory(History):
def __init__(self):
super(MemHistory, self).__init__()
self._stack = []
def append(self, cmd):
self._stack.append(cmd)
def match(self, command_prefix):
matching_commands = []
for cmd in self._stack:
if cmd.startswith(command_prefix):
matching_commands.append(cmd)
return HistoryMatchList(command_prefix, matching_commands)
class PersistentHistory(History):
def __init__(self, external_id):
import datetime
super(PersistentHistory, self).__init__()
path = os.path.join(sublime.packages_path(), "User", "SublimeREPLHistory")
self._db = buzhug.TS_Base(path)
self._external_id = external_id
self._db.create(("external_id", unicode), ("command", unicode), ("ts", datetime.datetime), mode="open")
def append(self, cmd):
from datetime import datetime
self._db.insert(external_id=self._external_id, command=cmd, ts=datetime.now())
def match(self, command_prefix):
import re
pattern = re.compile("^" + re.escape(command_prefix) + ".*")
retults = self._db.select(None, 'external_id==eid and p.match(command)', eid=self._external_id, p=pattern)
retults.sort_by("+ts")
return HistoryMatchList(command_prefix, [x.command for x in retults])
class ReplView(object):
def __init__(self, view, repl, syntax):
self.repl = repl
self._view = view
if syntax:
view.set_syntax_file(syntax)
self._output_end = view.size()
view.settings().set("repl_id", repl.id)
view.settings().set("repl", True)
view.settings().set("translate_tabs_to_spaces", False)
self._repl_reader = ReplReader(repl)
self._repl_reader.start()
if self.external_id and sublime.load_settings(SETTINGS_FILE).get("presistent_history_enabled"):
self._history = PersistentHistory(self.external_id)
else:
self._history = MemHistory()
self._history_match = None
# begin refreshing attached view
self.update_view_loop()
@property
def external_id(self):
return self.repl.external_id
def update_view(self, view):
"""If projects were switched, a view could be a new instance"""
if self._view is not view:
self._view = view
def user_input(self):
"""Returns text entered by the user"""
region = sublime.Region(self._output_end, self._view.size())
return self._view.substr(region)
def adjust_end(self):
if self.repl.suppress_echo:
v = self._view
vsize = v.size()
self._output_end = min(vsize, self._output_end)
edit = v.begin_edit()
v.erase(edit, sublime.Region(self._output_end, vsize))
v.end_edit(edit)
else:
self._output_end = self._view.size()
def write(self, unistr):
"""Writes output from Repl into this view."""
# string is assumet to be already correctly encoded
v = self._view
edit = v.begin_edit()
try:
v.insert(edit, self._output_end, unistr)
self._output_end += len(unistr)
finally:
v.end_edit(edit)
self.scroll_to_end()
def scroll_to_end(self):
v = self._view
v.show(v.line(v.size()).begin())
def append_input_text(self, text, edit=None):
e = edit
if not edit:
e = self._view.begin_edit()
self._view.insert(e, self._view.size(), text)
if not edit:
self._view.end_edit(e)
def new_output(self):
"""Returns new data from Repl and bool indicating if Repl is still
working"""
q = self._repl_reader.queue
data = ""
try:
while True:
packet = q.get_nowait()
if packet is None:
return data, False
data += packet
except Queue.Empty:
return data, True
def update_view_loop(self):
(data, is_still_working) = self.new_output()
if data:
self.write(data)
if is_still_working:
sublime.set_timeout(self.update_view_loop, 100)
else:
self.write("\n***Repl Closed***\n""")
self._view.set_read_only(True)
if sublime.load_settings(SETTINGS_FILE).get("view_auto_close"):
window = self._view.window()
window.focus_view(self._view)
window.run_command("close")
def push_history(self, command):
self._history.push(command)
self._history_match = None
def ensure_history_match(self):
user_input = self.user_input()
if self._history_match is not None:
if user_input != self._history_match.current_command():
# user did something! reset
self._history_match = None
if self._history_match is None:
self._history_match = self._history.match(user_input)
def view_previous_command(self, edit):
self.ensure_history_match()
self.replace_current_with_history(edit, self._history_match.prev_command())
def view_next_command(self, edit):
self.ensure_history_match()
self.replace_current_with_history(edit, self._history_match.next_command())
def replace_current_with_history(self, edit, cmd):
if not cmd:
return #don't replace if no match
user_region = sublime.Region(self._output_end, self._view.size())
self._view.erase(edit, user_region)
self._view.insert(edit, user_region.begin(), cmd)
class ReplOpenCommand(sublime_plugin.WindowCommand):
def run(self, encoding, type, syntax=None, **kwds):
try:
window = self.window
kwds = translate(window, kwds)
encoding = translate(window, encoding)
r = repl.Repl.subclass(type)(encoding, **kwds)
view = window.new_file()
rv = ReplView(view, r, syntax)
repl_views[r.id] = rv
view.set_scratch(True)
view.set_name("*REPL* [%s]" % (r.name(),))
return rv
except Exception, e:
sublime.error_message(repr(e))
class ReplEnterCommand(sublime_plugin.TextCommand):
def run(self, edit):
v = self.view
if v.sel()[0].begin() != v.size():
v.run_command("insert", {"characters": "\n"})
return
rv = repl_view(v)
rv.push_history(rv.user_input()) # don't include cmd_postfix in history
v.run_command("insert", {"characters": rv.repl.cmd_postfix})
command = rv.user_input()
rv.adjust_end()
rv.repl.write(command)
class ReplViewPreviousCommand(sublime_plugin.TextCommand):
def run(self, edit):
repl_view(self.view).view_previous_command(edit)
class ReplViewNextCommand(sublime_plugin.TextCommand):
def run(self, edit):
repl_view(self.view).view_next_command(edit)
class SublimeReplListener(sublime_plugin.EventListener):
def on_close(self, view):
rv = repl_view(view)
if not rv:
return
rv.repl.close()
_delete_repl(view)
class SubprocessReplSendSignal(sublime_plugin.TextCommand):
def run(self, edit, signal=None):
rv = repl_view(self.view)
subrepl = rv.repl
signals = subrepl.available_signals()
sorted_names = sorted(signals.keys())
if signals.has_key(signal):
#signal given by name
self.safe_send_signal(subrepl, signals[signal])
return
if signal in signals.values():
#signal given by code (correct one!)
self.safe_send_signal(subrepl, signal)
return
# no or incorrect signal given
def signal_selected(num):
if num == -1:
return
signame = sorted_names[num]
sigcode = signals[signame]
self.safe_send_signal(subrepl, sigcode)
self.view.window().show_quick_panel(sorted_names, signal_selected)
def safe_send_signal(self, subrepl, sigcode):
try:
subrepl.send_signal(sigcode)
except Exception, e:
sublime.error_message(str(e))
def is_visible(self):
rv = repl_view(self.view)
return rv and hasattr(rv.repl, "send_signal")
def is_enabled(self):
return self.is_visible()
def description(self):
return "Send SIGNAL"
| 32.046569 | 115 | 0.594493 |
7942eb97db0a004db663fc056986f310bb984d88 | 54,530 | py | Python | src/pystage/en/sprite.py | pystage/pystage | 4a76e95f6de2df59736de17fe81219485fde1556 | [
"MIT"
] | 12 | 2021-05-20T12:49:52.000Z | 2022-01-12T02:15:33.000Z | src/pystage/en/sprite.py | pystage/pystage | 4a76e95f6de2df59736de17fe81219485fde1556 | [
"MIT"
] | 14 | 2021-05-25T09:28:33.000Z | 2021-09-10T07:54:45.000Z | src/pystage/en/sprite.py | pystage/pystage | 4a76e95f6de2df59736de17fe81219485fde1556 | [
"MIT"
] | 3 | 2021-05-25T12:58:36.000Z | 2022-02-18T04:19:21.000Z |
from pystage.core.sprite import CoreSprite
class Sprite():
def __init__(self, core_sprite):
self._core : CoreSprite = core_sprite
self._core.facade = self
def create_clone_of(self, sprite='_myself_'):
"""create clone of %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
sprite : FILL
Returns
-------
"""
return self._core.control_create_clone_of(sprite='_my_')
def delete_this_clone(self):
"""delete this clone
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.control_delete_this_clone()
def when_i_start_as_a_clone(self, key, generator_function, name='', no_refresh=False):
"""when I start as a clone
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
key : FILL
generator_function : FILL
name : FILL
no_refresh : FILL
Returns
-------
"""
return self._core.control_start_as_clone(key, generator_function, name='', no_refresh=False)
def stop_all(self):
"""stop all
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.control_stop_all()
def stop_other_scripts_in_sprite(self):
"""stop other scripts in sprite
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.control_stop_other()
def stop_this_script(self):
"""stop this script
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.control_stop_this()
def wait(self, secs):
"""wait %1 seconds
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
secs : FILL
Returns
-------
"""
return self._core.control_wait(secs)
def change_variable_by(self, name, value):
"""change %1 by %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
name : FILL
value : FILL
Returns
-------
"""
return self._core.data_changevariableby(name, value)
def hide_variable(self, name):
"""hide variable %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
name : FILL
Returns
-------
"""
return self._core.data_hidevariable(name)
def set_variable(self, name, value):
"""set %1 to %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
name : FILL
value : FILL
Returns
-------
"""
return self._core.data_setvariableto(name, value)
def show_variable(self, name):
"""show variable %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
name : FILL
Returns
-------
"""
return self._core.data_showvariable(name)
def get_variable(self, name):
"""
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
name : FILL
Returns
-------
"""
return self._core.data_variable(name)
def broadcast(self, message):
"""broadcast %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
message : FILL
Returns
-------
"""
return self._core.event_broadcast(message)
def broadcast_and_wait(self, message):
"""broadcast %1 and wait
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
message : FILL
Returns
-------
"""
return self._core.event_broadcastandwait(message)
def when_backdrop_switches_to(self, backdrop, generator_function, name='', no_refresh=False):
"""when backdrop switches to %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
backdrop : FILL
generator_function : FILL
name : FILL
no_refresh : FILL
Returns
-------
"""
return self._core.event_whenbackdropswitchesto(backdrop, generator_function, name='', no_refresh=False)
def when_i_receive_message(self, message, generator_function, name='', no_refresh=False):
"""when I receive %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
message : FILL
generator_function : FILL
name : FILL
no_refresh : FILL
Returns
-------
"""
return self._core.event_whenbroadcastreceived(message, generator_function, name='', no_refresh=False)
def when_program_starts(self, generator_function, name='', no_refresh=False):
"""when <greenflag> clicked
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
generator_function : FILL
name : FILL
no_refresh : FILL
Returns
-------
"""
return self._core.event_whenflagclicked(generator_function, name='', no_refresh=False)
def when_loudness_greater_than(self, value, generator_function, name='', no_refresh=False):
"""when loudness <greater> %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
generator_function : FILL
name : FILL
no_refresh : FILL
Returns
-------
"""
return self._core.event_whengreaterthan_loudness(value, generator_function, name='', no_refresh=False)
def when_timer_greater_than(self, value, generator_function, name='', no_refresh=False):
"""when timer <greater> %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
generator_function : FILL
name : FILL
no_refresh : FILL
Returns
-------
"""
return self._core.event_whengreaterthan_timer(value, generator_function, name='', no_refresh=False)
def when_key_pressed(self, key, generator_function, name='', no_refresh=False):
"""when %1 key pressed
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
key : FILL
generator_function : FILL
name : FILL
no_refresh : FILL
Returns
-------
"""
return self._core.event_whenkeypressed(key, generator_function, name='', no_refresh=False)
def when_this_sprite_clicked(self, generator_function, name='', no_refresh=False):
"""when this sprite clicked
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
generator_function : FILL
name : FILL
no_refresh : FILL
Returns
-------
"""
return self._core.event_whenthisspriteclicked(generator_function, name='', no_refresh=False)
def backdrop_name(self):
"""backdrop name
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.looks_backdropnumbername_name()
def backdrop_number(self):
"""backdrop number
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.looks_backdropnumbername_number()
def change_brightness_effect_by(self, value):
"""change brightness effect by %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_changeeffectby_brightness(value)
def change_color_effect_by(self, value):
"""change color effect by %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_changeeffectby_color(value)
def change_fisheye_effect_by(self, value):
"""change fisheye effect by %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_changeeffectby_fisheye(value)
def change_ghost_effect_by(self, value):
"""change ghost effect by %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_changeeffectby_ghost(value)
def change_mosaic_effect_by(self, value):
"""change mosaic effect by %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_changeeffectby_mosaic(value)
def change_pixelate_effect_by(self, value):
"""change pixelate effect by %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_changeeffectby_pixelate(value)
def change_whirl_effect_by(self, value):
"""change whirl effect by %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_changeeffectby_whirl(value)
def change_size_by(self, percent):
"""change size by %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
percent : FILL
Returns
-------
"""
return self._core.looks_changesizeby(percent)
def clear_graphic_effects(self):
"""clear graphic effects
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.looks_cleargraphiceffects()
def costume_name(self):
"""costume name
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.looks_costumenumbername_name()
def costume_number(self):
"""costume number
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.looks_costumenumbername_number()
def go_backward(self, value):
"""go backward %2 layers
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_goforwardbackwardlayers_backward(value)
def go_forward(self, value):
"""go forward %2 layers
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_goforwardbackwardlayers_forward(value)
def go_to_back_layer(self):
"""go to back layer
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.looks_gotofrontback_back()
def go_to_front_layer(self):
"""go to front layer
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.looks_gotofrontback_front()
def hide(self):
"""hide
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.looks_hide()
def next_backdrop(self):
"""next backdrop
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.looks_nextbackdrop()
def next_costume(self):
"""next costume
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.looks_nextcostume()
def say(self, text):
"""say %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
text : FILL
Returns
-------
"""
return self._core.looks_say(text)
def say_for_seconds(self, text, secs):
"""say %1 for %2 seconds
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
text : FILL
secs : FILL
Returns
-------
"""
return self._core.looks_sayforsecs(text, secs)
def set_brightness_effect_to(self, value):
"""set brightness effect to %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_seteffectto_brightness(value)
def set_color_effect_to(self, value):
"""set color effect to %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_seteffectto_color(value)
def set_fisheye_effect_to(self, value):
"""set fisheye effect to %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_seteffectto_fisheye(value)
def set_ghost_effect_to(self, value):
"""set ghost effect to %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_seteffectto_ghost(value)
def set_mosaic_effect_to(self, value):
"""set mosaic effect to %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_seteffectto_mosaic(value)
def set_pixelate_effect_to(self, value):
"""set pixelate effect to %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_seteffectto_pixelate(value)
def set_whirl_effect_to(self, value):
"""set whirl effect to %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.looks_seteffectto_whirl(value)
def set_size_to(self, percent):
"""set size to %1 %
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
percent : FILL
Returns
-------
"""
return self._core.looks_setsizeto(percent)
def show(self):
"""show
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.looks_show()
def size(self):
"""size
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.looks_size()
def switch_backdrop_to(self, backdrop):
"""switch backdrop to %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
backdrop : FILL
Returns
-------
"""
return self._core.looks_switchbackdropto(backdrop)
def switch_costume_to(self, costume):
"""switch costume to %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
costume : FILL
Returns
-------
"""
return self._core.looks_switchcostumeto(costume)
def think(self, text):
"""think %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
text : FILL
Returns
-------
"""
return self._core.looks_think(text)
def think_for_seconds(self, text, secs):
"""think %1 for %2 seconds
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
text : FILL
secs : FILL
Returns
-------
"""
return self._core.looks_thinkforsecs(text, secs)
def change_x_by(self, value):
"""change x by %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.motion_changexby(value)
def change_y_by(self, value):
"""change y by %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.motion_changeyby(value)
def direction(self):
"""direction
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.motion_direction()
def glide_to_x_y(self, secs, x, y):
"""glide %1 secs to x: %2 y: %3
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
secs : FILL
x : FILL
y : FILL
Returns
-------
"""
return self._core.motion_glidesecstoxy(secs, x, y)
def glide_to_mouse_pointer(self, secs):
"""glide %1 secs to mouse-pointer
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
secs : FILL
Returns
-------
"""
return self._core.motion_glideto_pointer(secs)
def glide_to_random_position(self, secs):
"""glide %1 secs to random position
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
secs : FILL
Returns
-------
"""
return self._core.motion_glideto_random(secs)
def glide_to_sprite(self, secs, sprite):
"""glide %1 secs to %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
secs : FILL
sprite : FILL
Returns
-------
"""
return self._core.motion_glideto_sprite(secs, sprite)
def go_to_mouse_pointer(self):
"""go to mouse-pointer
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.motion_goto_pointer()
def go_to_random_position(self):
"""go to random position
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.motion_goto_random()
def go_to_sprite(self, sprite):
"""go to %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
sprite : FILL
Returns
-------
"""
return self._core.motion_goto_sprite(sprite)
def go_to_x_y(self, x, y):
"""go to x: %1 y: %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
x : FILL
y : FILL
Returns
-------
"""
return self._core.motion_gotoxy(x, y)
def if_on_edge_bounce(self):
"""if on edge, bounce
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.motion_ifonedgebounce()
def move(self, steps):
"""move %1 steps
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
steps : FILL
Returns
-------
"""
return self._core.motion_movesteps(steps)
def point_in_direction(self, direction):
"""point in direction %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
direction : FILL
Returns
-------
"""
return self._core.motion_pointindirection(direction)
def point_towards_mouse_pointer(self):
"""point towards mouse-pointer
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.motion_pointtowards_pointer()
def point_towards_sprite(self, sprite):
"""point towards %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
sprite : FILL
Returns
-------
"""
return self._core.motion_pointtowards_sprite(sprite)
def set_rotation_style_none(self):
"""set rotation style %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
"""
return self._core.motion_setrotationstyle_dontrotate()
def set_rotation_style_left_right(self):
"""set rotation style %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
"""
return self._core.motion_setrotationstyle_leftright()
def set_rotation_style_around(self):
"""set rotation style %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
"""
return self._core.motion_setrotationstyle_allaround()
def set_x(self, value):
"""set x to %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.motion_setx(value)
def set_y(self, value):
"""set y to %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.motion_sety(value)
def turn_left(self, deg):
"""turn left %2 degrees
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
deg : FILL
Returns
-------
"""
return self._core.motion_turnleft(deg)
def turn_right(self, deg):
"""turn right %2 degrees
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
deg : FILL
Returns
-------
"""
return self._core.motion_turnright(deg)
def x_position(self):
"""x position
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.motion_xposition()
def y_position(self):
"""y position
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.motion_yposition()
def calculate(self, operator, number):
"""%1 of %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
operator : FILL
number : FILL
Returns
-------
"""
return self._core.operator_mathop(operator, number)
def pick_random(self, start, end):
"""pick random %1 to %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
start : FILL
end : FILL
Returns
-------
"""
return self._core.operator_random(start, end)
def change_pen_brightness_by(self, value):
"""change pen brightness by [VALUE]
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.pen_changePenColorParamBy_brightness(value)
def change_pen_hue_by(self, value):
"""change pen color by [VALUE]
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.pen_changePenColorParamBy_color(value)
def change_pen_saturation_by(self, value):
"""change pen saturation by [VALUE]
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.pen_changePenColorParamBy_saturation(value)
def change_pen_transparency_by(self, value):
"""change pen transparency by [VALUE]
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.pen_changePenColorParamBy_transparency(value)
def change_pen_size_by(self, value):
"""change pen size by [SIZE]
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.pen_changePenSizeBy(value)
def erase_all(self):
"""erase all
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.pen_clear()
def pen_down(self):
"""pen down
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.pen_penDown()
def pen_up(self):
"""pen up
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.pen_penUp()
def set_pen_brightness_to(self, value):
"""set pen brightness to [VALUE]
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.pen_setPenColorParamTo_brightness(value)
def set_pen_hue_to(self, value):
"""set pen color to [VALUE]
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.pen_setPenColorParamTo_color(value)
def set_pen_saturation_to(self, value):
"""set pen saturation to [VALUE]
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.pen_setPenColorParamTo_saturation(value)
def set_pen_transparency_to(self, value):
"""set pen transparency to [VALUE]
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.pen_setPenColorParamTo_transparency(value)
def set_pen_color(self, color):
"""set pen color to [COLOR]
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
color : FILL
Returns
-------
"""
return self._core.pen_setPenColorToColor(color)
def set_pen_size_to(self, value):
"""set pen size to [SIZE]
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.pen_setPenSizeTo(value)
def stamp(self):
"""stamp
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.pen_stamp()
def add_costume(self, name, center_x=None, center_y=None, factor=1):
"""
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
name : FILL
center_x : FILL
center_y : FILL
factor : FILL
Returns
-------
"""
return self._core.pystage_addcostume(name, center_x, center_y, factor)
def add_sound(self, name):
"""
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
name : FILL
Returns
-------
"""
return self._core.pystage_addsound(name)
def insert_costume(self, index, name, center_x=None, center_y=None, factor=1):
"""
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
index : FILL
name : FILL
center_x : FILL
center_y : FILL
Returns
-------
"""
return self._core.pystage_insertcostume(index, name, center_x, center_y, factor)
def create_variable(self, name, all_sprites=True):
"""
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
name : FILL
all_sprites : FILL
Returns
-------
"""
return self._core.pystage_makevariable(name, all_sprites=True)
def replace_costume(self, index, name, center_x=None, center_y=None, factor=1):
"""
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
index : FILL
name : FILL
center_x : FILL
center_y : FILL
Returns
-------
"""
return self._core.pystage_replacecostume(index, name, center_x, center_y, factor)
def set_monitor_position(self, name, x, y):
"""
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
name : FILL
x : FILL
y : FILL
Returns
-------
"""
return self._core.pystage_setmonitorposition(name, x, y)
def pystage_setmonitorstyle_large(self, name):
"""
Translation string:
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
name : FILL
Returns
-------
"""
return self._core.pystage_setmonitorstyle_large(name)
def pystage_setmonitorstyle_normal(self, name):
"""
Translation string:
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
name : FILL
Returns
-------
"""
return self._core.pystage_setmonitorstyle_normal(name)
def pystage_setmonitorstyle_slider(self, name):
"""
Translation string:
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
name : FILL
Returns
-------
"""
return self._core.pystage_setmonitorstyle_slider(name)
def answer(self):
"""answer
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_answer()
def ask_and_wait(self, question):
"""ask %1 and wait
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
question : FILL
Returns
-------
"""
return self._core.sensing_askandwait(question)
def color_is_touching(self, sprite_color, color):
"""color %1 is touching %2?
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
sprite_color : FILL
color : FILL
Returns
-------
"""
return self._core.sensing_coloristouchingcolor(sprite_color, color)
def current_date(self):
"""current date
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_current_date()
def current_day_of_week(self):
"""current day of week
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_current_dayofweek()
def current_hour(self):
"""current hour
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_current_hour()
def current_minute(self):
"""current minute
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_current_minute()
def current_month(self):
"""current month
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_current_month()
def current_second(self):
"""current second
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_current_second()
def current_year(self):
"""current year
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_current_year()
def days_since(self):
"""days since 2000
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_dayssince2000()
def distance_to_mouse_pointer(self):
"""distance to mouse-pointer
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_distanceto_pointer()
def distance_to_sprite(self, sprite):
"""distance to %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
sprite : FILL
Returns
-------
"""
return self._core.sensing_distanceto_sprite(sprite)
def key_pressed(self, key):
"""key %1 pressed?
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
key : FILL
Returns
-------
"""
return self._core.sensing_keypressed(key)
def loudness(self):
"""loudness
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_loudness()
def mouse_down(self):
"""mouse down?
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_mousedown()
def mouse_x(self):
"""mouse x
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_mousex()
def mouse_y(self):
"""mouse y
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_mousey()
def backdrop_name_of(self, stage='_stage_'):
"""backdrop name of %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
stage : FILL
Returns
-------
"""
return self._core.sensing_of_backdropname(stage='_stage_')
def backdrop_of(self, stage='_stage_'):
"""backdrop # of %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
stage : FILL
Returns
-------
"""
return self._core.sensing_of_backdropnumber(stage='_stage_')
def costume_name_of(self, sprite):
"""costume name of %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
sprite : FILL
Returns
-------
"""
return self._core.sensing_of_costumename(sprite)
def costume_of(self, sprite):
"""costume # of %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
sprite : FILL
Returns
-------
"""
return self._core.sensing_of_costumenumber(sprite)
def direction_of(self, sprite):
"""direction of %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
sprite : FILL
Returns
-------
"""
return self._core.sensing_of_direction(sprite)
def size_of(self, sprite):
"""size of %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
sprite : FILL
Returns
-------
"""
return self._core.sensing_of_size(sprite)
def get_variable_of(self, variable, sprite='_stage_'):
"""%1 of %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
variable : FILL
sprite : FILL
Returns
-------
"""
return self._core.sensing_of_variable(variable, sprite='_stage_')
def volume_of(self, sprite='_stage_'):
"""volume of %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
sprite : FILL
Returns
-------
"""
return self._core.sensing_of_volume(sprite='_stage_')
def x_position_of(self, sprite):
"""x position of %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
sprite : FILL
Returns
-------
"""
return self._core.sensing_of_xposition(sprite)
def y_position_of(self, sprite):
"""y position of %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
sprite : FILL
Returns
-------
"""
return self._core.sensing_of_yposition(sprite)
def reset_timer(self):
"""reset timer
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_resettimer()
def set_drag_mode_draggable(self):
"""set drag mode draggable
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_setdragmode_draggable()
def set_drag_mode_not_draggable(self):
"""set drag mode not draggable
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_setdragmode_notdraggable()
def timer(self):
"""timer
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_timer()
def touching_color(self, color):
"""touching color %1?
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
color : FILL
Returns
-------
"""
return self._core.sensing_touchingcolor(color)
def touching_edge(self):
"""touching edge?
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_touchingobject_edge()
def touching_mouse_pointer(self):
"""touching mouse-pointer?
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_touchingobject_pointer()
def touching(self, sprite):
"""touching %1?
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
sprite : FILL
Returns
-------
"""
return self._core.sensing_touchingobject_sprite(sprite)
def username(self):
"""username
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sensing_username()
def change_pan_left_right_effect_by(self, value):
"""change pan left/right effect by %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.sound_changeeffectby_pan(value)
def change_pitch_effect_by(self, value):
"""change pitch effect by %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.sound_changeeffectby_pitch(value)
def change_volume_by(self, value):
"""change volume by %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.sound_changevolumeby(value)
def clear_sound_effects(self):
"""clear sound effects
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sound_cleareffects()
def start_sound(self, name, loop=0):
"""start sound %1
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
name : FILL
loop : FILL
Returns
-------
"""
return self._core.sound_play(name, loop=0)
def play_sound_until_done(self, name):
"""play sound %1 until done
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
name : FILL
Returns
-------
"""
return self._core.sound_playuntildone(name)
def set_pan_left_right_effect_to(self, value):
"""set pan left/right effect to %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.sound_seteffectto_pan(value)
def set_pitch_effect_to(self, value):
"""set pitch effect to %2
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.sound_seteffectto_pitch(value)
def set_volume_to(self, value):
"""set volume to %1%
Engl. Translation for your reference: ...
Engl. Documentation when available...
Parameters
----------
value : FILL
Returns
-------
"""
return self._core.sound_setvolumeto(value)
def stop_all_sounds(self):
"""stop all sounds
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sound_stopallsounds()
def volume(self):
"""volume
Engl. Translation for your reference: ...
Engl. Documentation when available...
Returns
-------
"""
return self._core.sound_volume()
| 20.624054 | 111 | 0.487566 |
7942eba2ee095d3f8c18b9f9b7ee0f1e58f11a4d | 4,626 | py | Python | demisto_sdk/commands/format/update_incidentfields.py | SergeBakharev/demisto-sdk | 17d00942a1bd33039a8aba9ddffecfd81008d275 | [
"MIT"
] | null | null | null | demisto_sdk/commands/format/update_incidentfields.py | SergeBakharev/demisto-sdk | 17d00942a1bd33039a8aba9ddffecfd81008d275 | [
"MIT"
] | null | null | null | demisto_sdk/commands/format/update_incidentfields.py | SergeBakharev/demisto-sdk | 17d00942a1bd33039a8aba9ddffecfd81008d275 | [
"MIT"
] | null | null | null | from typing import List, Tuple
import click
import ujson
from demisto_sdk.commands.common.tools import (get_dict_from_file,
get_item_marketplaces,
open_id_set_file)
from demisto_sdk.commands.format.format_constants import (ERROR_RETURN_CODE,
SKIP_RETURN_CODE,
SUCCESS_RETURN_CODE)
from demisto_sdk.commands.format.update_generic_json import BaseUpdateJSON
class IncidentFieldJSONFormat(BaseUpdateJSON):
"""IncidentFieldJSONFormat class is designed to update incident fields JSON file according to Demisto's convention.
Attributes:
input (str): the path to the file we are updating at the moment.
output (str): the desired file name to save the updated version of the JSON to.
"""
def __init__(self,
input: str = '',
output: str = '',
path: str = '',
from_version: str = '',
no_validate: bool = False,
verbose: bool = False,
**kwargs):
super().__init__(input=input, output=output, path=path, from_version=from_version, no_validate=no_validate,
verbose=verbose, **kwargs)
self.id_set_path = kwargs.get('id_set_path')
def run_format(self) -> int:
try:
click.secho(f'\n================= Updating file {self.source_file} =================', fg='bright_blue')
super().update_json()
self.format_marketplaces_field_of_aliases()
self.set_default_values_as_needed()
self.save_json_to_destination_file()
return SUCCESS_RETURN_CODE
except Exception as err:
if self.verbose:
click.secho(f'\nFailed to update file {self.source_file}. Error: {err}', fg='red')
return ERROR_RETURN_CODE
def format_marketplaces_field_of_aliases(self):
"""
When formatting incident field with aliases,
the function will update the marketplaces in the fields mapped by the aliases to be XSOAR marketplace only.
"""
if not self.id_set_path:
click.secho('Skipping "Aliases" formatting as id_set_path argument is missing', fg='yellow')
aliases = self.data.get('Aliases', {})
if aliases:
for alias_field, alias_field_file_path in self._get_incident_fields_by_aliases(aliases):
marketplaces = get_item_marketplaces(item_path=alias_field_file_path, item_data=alias_field)
if len(marketplaces) != 1 or marketplaces[0] != 'xsoar':
alias_field['marketplaces'] = ['xsoar']
click.secho(f'\n================= Updating file {alias_field_file_path} =================', fg='bright_blue')
self._save_alias_field_file(dest_file_path=alias_field_file_path, field_data=alias_field)
def _get_incident_fields_by_aliases(self, aliases: List[dict]):
"""Get from the id_set the actual fields for the given aliases
Args:
aliases (list): The alias list.
Returns:
A generator that generates a tuple with the incident field and it's path for each alias in the given list.
"""
alias_ids: set = {f'incident_{alias.get("cliName")}' for alias in aliases}
id_set = open_id_set_file(self.id_set_path)
incident_field_list: list = id_set.get('IncidentFields')
for incident_field in incident_field_list:
field_id = list(incident_field.keys())[0]
if field_id in alias_ids:
alias_data = incident_field[field_id]
alias_file_path = alias_data.get('file_path')
aliased_field, _ = get_dict_from_file(path=alias_file_path)
yield aliased_field, alias_file_path
def _save_alias_field_file(self, dest_file_path, field_data):
"""Save formatted JSON data to destination file."""
with open(dest_file_path, 'w') as file:
ujson.dump(field_data, file, indent=4, encode_html_chars=True, escape_forward_slashes=False,
ensure_ascii=False)
def format_file(self) -> Tuple[int, int]:
"""Manager function for the incident fields JSON updater."""
format_res = self.run_format()
if format_res:
return format_res, SKIP_RETURN_CODE
else:
return format_res, self.initiate_file_validator()
| 44.912621 | 129 | 0.608949 |
7942eba4e426f4afff6029b93719088b1ee1786a | 186 | py | Python | w/lib/__init__.py | propername/w | bbf253b5f2428e4ee1580140dca15331b8355da2 | [
"CC0-1.0"
] | null | null | null | w/lib/__init__.py | propername/w | bbf253b5f2428e4ee1580140dca15331b8355da2 | [
"CC0-1.0"
] | null | null | null | w/lib/__init__.py | propername/w | bbf253b5f2428e4ee1580140dca15331b8355da2 | [
"CC0-1.0"
] | null | null | null | from w.lib.config import get # noqa
from w.lib.config import put # noqa
from w.lib.config import root # noqa
from w.lib.config import env # noqa
from w.lib.config import var # noqa
| 31 | 37 | 0.731183 |
7942ecdd9280c425ce10a813551741c3fac9f795 | 4,199 | py | Python | tests/refencode.py | mejedi/libordpath | 0c70c81b5816881ece0d5b66a5cf1bf1f07b2ebe | [
"Apache-1.1"
] | 1 | 2019-06-05T09:11:18.000Z | 2019-06-05T09:11:18.000Z | tests/refencode.py | mejedi/libordpath | 0c70c81b5816881ece0d5b66a5cf1bf1f07b2ebe | [
"Apache-1.1"
] | null | null | null | tests/refencode.py | mejedi/libordpath | 0c70c81b5816881ece0d5b66a5cf1bf1f07b2ebe | [
"Apache-1.1"
] | null | null | null | #! /usr/bin/python
# RANDLABEL.py
#
# Generates random ORDPATH label in the format compatible with
# ordpath-test.
#
# -l, --lenght=<integer> length of generated label
# --setup=<filename> read ORDPATH codec setup from the file
# specified; each generated component has
# equal probability to hit any of the
# intervals specified by setup
# --clamp=<integer> discard some intervals from setup before
# generation; intervals are ordered by
# proximity to the "sweet spot"; positive
# clamp value limits the length of intervals
# list; non-positive clamp value K is
# interpreted as a request to discard first
# |K| elements from the list
# REFENCODE.py
#
# Reference implementation of the ORDPATH codec. Reads label from stdin
# and writes results to stdout. Input and output formats are compatible
# with ordpath-test.
#
# --setup=<filename> read ORDPATH codec setup from the file
# specified
import sys, os, math, re, random, getopt
def parseSetup(s = None):
s = s or """
0000001 : 48
0000010 : 32
0000011 : 16
000010 : 12
000011 : 8
00010 : 6
00011 : 4
001 : 3
01 : 3 : 0
100 : 4
101 : 6
1100 : 8
1101 : 12
11100 : 16
11101 : 32
11110 : 48"""
acc = []
offset = 0
cpos = 0
sweet = 0
for ind, m in enumerate(re.finditer(
r'([01]+)\s*:\s*(\d+)(?:\s*:\s*(-?\d+))?', s)):
pfx, w, orig = m.groups()
w = int(w)
sz = 1 << w
if orig:
orig = int(orig)
sweet = ind
offset = orig - cpos
acc.append((cpos, cpos + sz, pfx, w))
cpos += sz
l = [(abs(i-sweet), b+offset, e+offset, pfx, width)
for (i, (b, e, pfx, width)) in enumerate(acc)]
l.sort(lambda x, y: cmp(x[0], y[0]))
return [val[1:] for val in l]
def inputOutput(args):
if len(args) > 2:
raise Exception('Excess arguments given, expecting at most 2')
args += ['-'] * (2 - len(args))
return (sys.stdin if args[0] == '-' else open(args[0], 'rb')), (
sys.stdout if args[1] == '-' else open(args[1], 'wb'))
def randLabel(setup, l):
return [random.randrange(*random.choice(setup)[:2]) for i in xrange(l)]
def randlabelMain(opts = []):
length = 10
setupstr = None
clamp = 0
optlist, args = getopt.getopt(
opts, 'l:', ['length=', 'setup=', 'clamp='])
for o, a in optlist:
if o in ['-l', '--length']: length = int(a)
elif o in ['--setup']:
with open(a) as f:
setupstr = f.read()
elif o in ['--clamp']: clamp = int(a)
input, output = inputOutput(args)
setup = parseSetup(setupstr)
clamped = setup[-clamp : ] if clamp <= 0 else setup[ : clamp]
data = randLabel(clamped, length)
output.write('\n'.join(map(str, data)) + '\n')
def refEncode(setup, label):
return ''.join([(lambda (b, e, pfx, width):
pfx + str.format("{0:0{1}b}", c-b, width)) (
next((i for i in setup if c >= i[0] and c < i[1])))
for c in label])
def refencodeMain(opts = []):
setupstr = None
optlist, args = getopt.getopt(opts, '', ['setup='])
for o, a in optlist:
if o in ['--setup']:
with open(a) as f:
setupstr = f.read()
input, output = inputOutput(args)
setup = parseSetup(setupstr)
label = map(int, input.read().split())
elabel = refEncode(setup, label)
output.write("%-15d\n" % len(elabel))
t = elabel + '0' * 7
encoded = ''.join((chr(int(t[i:i+8], 2)) for i in range(0, len(elabel),8)))
output.write(encoded)
if __name__ == '__main__':
{
'refencode.py': refencodeMain,
'randlabel.py': randlabelMain
} [os.path.basename(sys.argv[0])] (sys.argv[1:])
| 30.427536 | 79 | 0.511312 |
7942ed2a03adf47a78a317a767e16adff9a7bca3 | 385 | py | Python | cats/cats/asgi.py | Xamaneone/test-repository | 273b187d1b4c1aa692a802452b5ddde676300d7a | [
"MIT"
] | null | null | null | cats/cats/asgi.py | Xamaneone/test-repository | 273b187d1b4c1aa692a802452b5ddde676300d7a | [
"MIT"
] | null | null | null | cats/cats/asgi.py | Xamaneone/test-repository | 273b187d1b4c1aa692a802452b5ddde676300d7a | [
"MIT"
] | null | null | null | """
ASGI config for cats project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cats.settings')
application = get_asgi_application()
| 22.647059 | 78 | 0.781818 |
7942ed5ce37b089cbf36e7407b3154324af879f3 | 189,100 | py | Python | test/scenarios/datafactory/output/src/datafactory-preview/azext_datafactory_preview/vendored_sdks/azure_mgmt_datafactory/models/_models.py | kairu-ms/autorest.az | c3370f3d4d394e580615d8d97df05515533b035e | [
"MIT"
] | null | null | null | test/scenarios/datafactory/output/src/datafactory-preview/azext_datafactory_preview/vendored_sdks/azure_mgmt_datafactory/models/_models.py | kairu-ms/autorest.az | c3370f3d4d394e580615d8d97df05515533b035e | [
"MIT"
] | null | null | null | test/scenarios/datafactory/output/src/datafactory-preview/azext_datafactory_preview/vendored_sdks/azure_mgmt_datafactory/models/_models.py | kairu-ms/autorest.az | c3370f3d4d394e580615d8d97df05515533b035e | [
"MIT"
] | 1 | 2021-03-21T03:59:29.000Z | 2021-03-21T03:59:29.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class AccessPolicyResponse(msrest.serialization.Model):
"""Get Data Plane read only token response definition.
:param policy: The user access policy.
:type policy: ~dfaz_management_client.models.UserAccessPolicy
:param access_token: Data Plane read only access token.
:type access_token: str
:param data_plane_url: Data Plane service base URL.
:type data_plane_url: str
"""
_attribute_map = {
'policy': {'key': 'policy', 'type': 'UserAccessPolicy'},
'access_token': {'key': 'accessToken', 'type': 'str'},
'data_plane_url': {'key': 'dataPlaneUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccessPolicyResponse, self).__init__(**kwargs)
self.policy = kwargs.get('policy', None)
self.access_token = kwargs.get('access_token', None)
self.data_plane_url = kwargs.get('data_plane_url', None)
class JobBase(msrest.serialization.Model):
"""Job base definition.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ComputeJobBase.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param job_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "Command", "Sweep", "Labeling", "Pipeline", "Data", "AutoML".
:type job_type: str or ~dfaz_management_client.models.JobType
:ivar interaction_endpoints: Dictonary of endpoint URIs, keyed by enumerated JobEndpoints, can
be added, removed or updated.
:vartype interaction_endpoints: ~dfaz_management_client.models.JobBaseInteractionEndpoints
:param description: The asset description text.
:type description: str
:param tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:type tags: dict[str, str]
"""
_validation = {
'job_type': {'required': True},
'interaction_endpoints': {'readonly': True},
}
_attribute_map = {
'job_type': {'key': 'jobType', 'type': 'str'},
'interaction_endpoints': {'key': 'interactionEndpoints', 'type': 'JobBaseInteractionEndpoints'},
'description': {'key': 'description', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
_subtype_map = {
'job_type': {'ComputeJobBase': 'ComputeJobBase'}
}
def __init__(
self,
**kwargs
):
super(JobBase, self).__init__(**kwargs)
self.job_type = None # type: Optional[str]
self.interaction_endpoints = None
self.description = kwargs.get('description', None)
self.tags = kwargs.get('tags', None)
class ComputeJobBase(JobBase):
"""Compute job base definition.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CommandJob.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param job_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "Command", "Sweep", "Labeling", "Pipeline", "Data", "AutoML".
:type job_type: str or ~dfaz_management_client.models.JobType
:ivar interaction_endpoints: Dictonary of endpoint URIs, keyed by enumerated JobEndpoints, can
be added, removed or updated.
:vartype interaction_endpoints: ~dfaz_management_client.models.JobBaseInteractionEndpoints
:param description: The asset description text.
:type description: str
:param tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:type tags: dict[str, str]
:param experiment_name: The name of the experiment the job belongs to. If not set, the job is
placed in the "Default" experiment.
:type experiment_name: str
:param compute_binding: Required. computeBinding of the job.
:type compute_binding: str
"""
_validation = {
'job_type': {'required': True},
'interaction_endpoints': {'readonly': True},
'compute_binding': {'required': True},
}
_attribute_map = {
'job_type': {'key': 'jobType', 'type': 'str'},
'interaction_endpoints': {'key': 'interactionEndpoints', 'type': 'JobBaseInteractionEndpoints'},
'description': {'key': 'description', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'experiment_name': {'key': 'experimentName', 'type': 'str'},
'compute_binding': {'key': 'computeBinding', 'type': 'str'},
}
_subtype_map = {
'job_type': {'CommandJob': 'CommandJob'}
}
def __init__(
self,
**kwargs
):
super(ComputeJobBase, self).__init__(**kwargs)
self.job_type = 'ComputeJobBase' # type: str
self.experiment_name = kwargs.get('experiment_name', None)
self.compute_binding = kwargs['compute_binding']
class CommandJob(ComputeJobBase):
"""Code Job definition.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AutomlJob.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param job_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "Command", "Sweep", "Labeling", "Pipeline", "Data", "AutoML".
:type job_type: str or ~dfaz_management_client.models.JobType
:ivar interaction_endpoints: Dictonary of endpoint URIs, keyed by enumerated JobEndpoints, can
be added, removed or updated.
:vartype interaction_endpoints: ~dfaz_management_client.models.JobBaseInteractionEndpoints
:param description: The asset description text.
:type description: str
:param tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:type tags: dict[str, str]
:param experiment_name: The name of the experiment the job belongs to. If not set, the job is
placed in the "Default" experiment.
:type experiment_name: str
:param compute_binding: Required. computeBinding of the job.
:type compute_binding: str
:param status: Status of the job. Possible values include: "NotStarted", "Starting",
"Provisioning", "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed",
"Failed", "Canceled", "NotResponding", "Paused".
:type status: str or ~dfaz_management_client.models.JobStatus
:param max_run_duration_seconds: The max run duration in seconds, ater which the job will be
cancelled.
:type max_run_duration_seconds: long
:param code_configuration: Required. Code Configuration of the job.
:type code_configuration: str
:param environment_id: Environment specification of the job.
:type environment_id: str
:param data_bindings: Mapping of data bindings used in the job.
:type data_bindings: object
:param distribution_configuration: Distributon configuration of the job. This should be one of
MpiConfiguration, TensorflowConfiguration, or PyTorchConfiguration.
:type distribution_configuration: object
"""
_validation = {
'job_type': {'required': True},
'interaction_endpoints': {'readonly': True},
'compute_binding': {'required': True},
'code_configuration': {'required': True},
}
_attribute_map = {
'job_type': {'key': 'jobType', 'type': 'str'},
'interaction_endpoints': {'key': 'interactionEndpoints', 'type': 'JobBaseInteractionEndpoints'},
'description': {'key': 'description', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'experiment_name': {'key': 'experimentName', 'type': 'str'},
'compute_binding': {'key': 'computeBinding', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'max_run_duration_seconds': {'key': 'maxRunDurationSeconds', 'type': 'long'},
'code_configuration': {'key': 'codeConfiguration', 'type': 'str'},
'environment_id': {'key': 'environmentId', 'type': 'str'},
'data_bindings': {'key': 'dataBindings', 'type': 'object'},
'distribution_configuration': {'key': 'distributionConfiguration', 'type': 'object'},
}
_subtype_map = {
'job_type': {'AutoML': 'AutomlJob'}
}
def __init__(
self,
**kwargs
):
super(CommandJob, self).__init__(**kwargs)
self.job_type = 'CommandJob' # type: str
self.status = kwargs.get('status', None)
self.max_run_duration_seconds = kwargs.get('max_run_duration_seconds', None)
self.code_configuration = kwargs['code_configuration']
self.environment_id = kwargs.get('environment_id', None)
self.data_bindings = kwargs.get('data_bindings', None)
self.distribution_configuration = kwargs.get('distribution_configuration', None)
class AutomlJob(CommandJob):
"""Automl Job definition.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: TestJob.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param job_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "Command", "Sweep", "Labeling", "Pipeline", "Data", "AutoML".
:type job_type: str or ~dfaz_management_client.models.JobType
:ivar interaction_endpoints: Dictonary of endpoint URIs, keyed by enumerated JobEndpoints, can
be added, removed or updated.
:vartype interaction_endpoints: ~dfaz_management_client.models.JobBaseInteractionEndpoints
:param description: The asset description text.
:type description: str
:param tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:type tags: dict[str, str]
:param experiment_name: The name of the experiment the job belongs to. If not set, the job is
placed in the "Default" experiment.
:type experiment_name: str
:param compute_binding: Required. computeBinding of the job.
:type compute_binding: str
:param status: Status of the job. Possible values include: "NotStarted", "Starting",
"Provisioning", "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed",
"Failed", "Canceled", "NotResponding", "Paused".
:type status: str or ~dfaz_management_client.models.JobStatus
:param max_run_duration_seconds: The max run duration in seconds, ater which the job will be
cancelled.
:type max_run_duration_seconds: long
:param code_configuration: Required. Code Configuration of the job.
:type code_configuration: str
:param environment_id: Environment specification of the job.
:type environment_id: str
:param data_bindings: Mapping of data bindings used in the job.
:type data_bindings: object
:param distribution_configuration: Distributon configuration of the job. This should be one of
MpiConfiguration, TensorflowConfiguration, or PyTorchConfiguration.
:type distribution_configuration: object
:param run_type: Run type.
:type run_type: str
:param run_source: Run source would be used by services calling AutoML CreateParentRun,
if none is provided it would default to "AutoML"
This value would be used for RootAttribution.
:type run_source: str
:param num_iterations: Number of iterations.
:type num_iterations: int
:param training_type: Training type. Possible values include: "TrainFull", "TrainAndValidate",
"CrossValidate", "MeanCrossValidate".
:type training_type: str or ~dfaz_management_client.models.TrainingType
:param acquisition_function: Aquisition function. Possible values include: "EI", "PI", "UCB".
:type acquisition_function: str or ~dfaz_management_client.models.AcquisitionFunction
:param metrics: Optimization metrics.
:type metrics: list[str or ~dfaz_management_client.models.OptimizationMetric]
:param primary_metric: Primary optimization metric. Possible values include: "AUC_weighted",
"Accuracy", "Norm_macro_recall", "Average_precision_score_weighted",
"Precision_score_weighted", "Spearman_correlation", "Normalized_root_mean_squared_error",
"R2_score", "Normalized_mean_absolute_error", "Normalized_root_mean_squared_log_error",
"Mean_average_precision", "Iou".
:type primary_metric: str or ~dfaz_management_client.models.OptimizationMetric
:param train_split: Train split percentage.
:type train_split: float
:param acquisition_parameter: Aquisition parameter.
:type acquisition_parameter: float
:param num_cross_validation: Num cross validation.
:type num_cross_validation: int
:param target: Target.
:type target: str
:param aml_settings_json_string: AMLSettings Json string.
:type aml_settings_json_string: str
:param data_prep_json_string: Serialized DataPrep dataflow object.
:type data_prep_json_string: str
:param enable_subsampling: Enable subsampling.
:type enable_subsampling: bool
:param scenario: Which scenario is being used to mapping to a curated environment.
:type scenario: str
:param parent_run_id: The parent run id for the current parent run dto.
:type parent_run_id: str
"""
_validation = {
'job_type': {'required': True},
'interaction_endpoints': {'readonly': True},
'compute_binding': {'required': True},
'code_configuration': {'required': True},
}
_attribute_map = {
'job_type': {'key': 'jobType', 'type': 'str'},
'interaction_endpoints': {'key': 'interactionEndpoints', 'type': 'JobBaseInteractionEndpoints'},
'description': {'key': 'description', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'experiment_name': {'key': 'experimentName', 'type': 'str'},
'compute_binding': {'key': 'computeBinding', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'max_run_duration_seconds': {'key': 'maxRunDurationSeconds', 'type': 'long'},
'code_configuration': {'key': 'codeConfiguration', 'type': 'str'},
'environment_id': {'key': 'environmentId', 'type': 'str'},
'data_bindings': {'key': 'dataBindings', 'type': 'object'},
'distribution_configuration': {'key': 'distributionConfiguration', 'type': 'object'},
'run_type': {'key': 'runType', 'type': 'str'},
'run_source': {'key': 'runSource', 'type': 'str'},
'num_iterations': {'key': 'numIterations', 'type': 'int'},
'training_type': {'key': 'trainingType', 'type': 'str'},
'acquisition_function': {'key': 'acquisitionFunction', 'type': 'str'},
'metrics': {'key': 'metrics', 'type': '[str]'},
'primary_metric': {'key': 'primaryMetric', 'type': 'str'},
'train_split': {'key': 'trainSplit', 'type': 'float'},
'acquisition_parameter': {'key': 'acquisitionParameter', 'type': 'float'},
'num_cross_validation': {'key': 'numCrossValidation', 'type': 'int'},
'target': {'key': 'target', 'type': 'str'},
'aml_settings_json_string': {'key': 'amlSettingsJsonString', 'type': 'str'},
'data_prep_json_string': {'key': 'dataPrepJsonString', 'type': 'str'},
'enable_subsampling': {'key': 'enableSubsampling', 'type': 'bool'},
'scenario': {'key': 'scenario', 'type': 'str'},
'parent_run_id': {'key': 'parentRunId', 'type': 'str'},
}
_subtype_map = {
'job_type': {'TestJob': 'TestJob'}
}
def __init__(
self,
**kwargs
):
super(AutomlJob, self).__init__(**kwargs)
self.job_type = 'AutoML' # type: str
self.run_type = kwargs.get('run_type', None)
self.run_source = kwargs.get('run_source', None)
self.num_iterations = kwargs.get('num_iterations', None)
self.training_type = kwargs.get('training_type', None)
self.acquisition_function = kwargs.get('acquisition_function', None)
self.metrics = kwargs.get('metrics', None)
self.primary_metric = kwargs.get('primary_metric', None)
self.train_split = kwargs.get('train_split', None)
self.acquisition_parameter = kwargs.get('acquisition_parameter', None)
self.num_cross_validation = kwargs.get('num_cross_validation', None)
self.target = kwargs.get('target', None)
self.aml_settings_json_string = kwargs.get('aml_settings_json_string', None)
self.data_prep_json_string = kwargs.get('data_prep_json_string', None)
self.enable_subsampling = kwargs.get('enable_subsampling', None)
self.scenario = kwargs.get('scenario', None)
self.parent_run_id = kwargs.get('parent_run_id', None)
class Trigger(msrest.serialization.Model):
"""Azure data factory nested object which contains information about creating pipeline run.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ChainingTrigger, MultiplePipelineTrigger, RerunTumblingWindowTrigger, TumblingWindowTrigger.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
}
_subtype_map = {
'type': {'ChainingTrigger': 'ChainingTrigger', 'MultiplePipelineTrigger': 'MultiplePipelineTrigger', 'RerunTumblingWindowTrigger': 'RerunTumblingWindowTrigger', 'TumblingWindowTrigger': 'TumblingWindowTrigger'}
}
def __init__(
self,
**kwargs
):
super(Trigger, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.type = 'Trigger' # type: str
self.description = kwargs.get('description', None)
self.runtime_state = None
self.annotations = kwargs.get('annotations', None)
class MultiplePipelineTrigger(Trigger):
"""Base class for all triggers that support one to many model for trigger to pipeline.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: BlobEventsTrigger, BlobTrigger, ScheduleTrigger.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
:param pipelines: Pipelines that need to be started.
:type pipelines: list[~dfaz_management_client.models.TriggerPipelineReference]
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'pipelines': {'key': 'pipelines', 'type': '[TriggerPipelineReference]'},
}
_subtype_map = {
'type': {'BlobEventsTrigger': 'BlobEventsTrigger', 'BlobTrigger': 'BlobTrigger', 'ScheduleTrigger': 'ScheduleTrigger'}
}
def __init__(
self,
**kwargs
):
super(MultiplePipelineTrigger, self).__init__(**kwargs)
self.type = 'MultiplePipelineTrigger' # type: str
self.pipelines = kwargs.get('pipelines', None)
class BlobEventsTrigger(MultiplePipelineTrigger):
"""Trigger that runs every time a Blob event occurs.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
:param pipelines: Pipelines that need to be started.
:type pipelines: list[~dfaz_management_client.models.TriggerPipelineReference]
:param blob_path_begins_with: The blob path must begin with the pattern provided for trigger to
fire. For example, '/records/blobs/december/' will only fire the trigger for blobs in the
december folder under the records container. At least one of these must be provided:
blobPathBeginsWith, blobPathEndsWith.
:type blob_path_begins_with: str
:param blob_path_ends_with: The blob path must end with the pattern provided for trigger to
fire. For example, 'december/boxes.csv' will only fire the trigger for blobs named boxes in a
december folder. At least one of these must be provided: blobPathBeginsWith, blobPathEndsWith.
:type blob_path_ends_with: str
:param ignore_empty_blobs: If set to true, blobs with zero bytes will be ignored.
:type ignore_empty_blobs: bool
:param events: Required. The type of events that cause this trigger to fire.
:type events: list[str or ~dfaz_management_client.models.BlobEventTypes]
:param scope: Required. The ARM resource ID of the Storage Account.
:type scope: str
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
'events': {'required': True},
'scope': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'pipelines': {'key': 'pipelines', 'type': '[TriggerPipelineReference]'},
'blob_path_begins_with': {'key': 'typeProperties.blobPathBeginsWith', 'type': 'str'},
'blob_path_ends_with': {'key': 'typeProperties.blobPathEndsWith', 'type': 'str'},
'ignore_empty_blobs': {'key': 'typeProperties.ignoreEmptyBlobs', 'type': 'bool'},
'events': {'key': 'typeProperties.events', 'type': '[str]'},
'scope': {'key': 'typeProperties.scope', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BlobEventsTrigger, self).__init__(**kwargs)
self.type = 'BlobEventsTrigger' # type: str
self.blob_path_begins_with = kwargs.get('blob_path_begins_with', None)
self.blob_path_ends_with = kwargs.get('blob_path_ends_with', None)
self.ignore_empty_blobs = kwargs.get('ignore_empty_blobs', None)
self.events = kwargs['events']
self.scope = kwargs['scope']
class BlobTrigger(MultiplePipelineTrigger):
"""Trigger that runs every time the selected Blob container changes.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
:param pipelines: Pipelines that need to be started.
:type pipelines: list[~dfaz_management_client.models.TriggerPipelineReference]
:param folder_path: Required. The path of the container/folder that will trigger the pipeline.
:type folder_path: str
:param max_concurrency: Required. The max number of parallel files to handle when it is
triggered.
:type max_concurrency: int
:param linked_service: Required. The Azure Storage linked service reference.
:type linked_service: ~dfaz_management_client.models.LinkedServiceReference
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
'folder_path': {'required': True},
'max_concurrency': {'required': True},
'linked_service': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'pipelines': {'key': 'pipelines', 'type': '[TriggerPipelineReference]'},
'folder_path': {'key': 'typeProperties.folderPath', 'type': 'str'},
'max_concurrency': {'key': 'typeProperties.maxConcurrency', 'type': 'int'},
'linked_service': {'key': 'typeProperties.linkedService', 'type': 'LinkedServiceReference'},
}
def __init__(
self,
**kwargs
):
super(BlobTrigger, self).__init__(**kwargs)
self.type = 'BlobTrigger' # type: str
self.folder_path = kwargs['folder_path']
self.max_concurrency = kwargs['max_concurrency']
self.linked_service = kwargs['linked_service']
class ChainingTrigger(Trigger):
"""Trigger that allows the referenced pipeline to depend on other pipeline runs based on runDimension Name/Value pairs. Upstream pipelines should declare the same runDimension Name and their runs should have the values for those runDimensions. The referenced pipeline run would be triggered if the values for the runDimension match for all upstream pipeline runs.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
:param pipeline: Required. Pipeline for which runs are created when all upstream pipelines
complete successfully.
:type pipeline: ~dfaz_management_client.models.TriggerPipelineReference
:param depends_on: Required. Upstream Pipelines.
:type depends_on: list[~dfaz_management_client.models.PipelineReference]
:param run_dimension: Required. Run Dimension property that needs to be emitted by upstream
pipelines.
:type run_dimension: str
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
'pipeline': {'required': True},
'depends_on': {'required': True},
'run_dimension': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'pipeline': {'key': 'pipeline', 'type': 'TriggerPipelineReference'},
'depends_on': {'key': 'typeProperties.dependsOn', 'type': '[PipelineReference]'},
'run_dimension': {'key': 'typeProperties.runDimension', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ChainingTrigger, self).__init__(**kwargs)
self.type = 'ChainingTrigger' # type: str
self.pipeline = kwargs['pipeline']
self.depends_on = kwargs['depends_on']
self.run_dimension = kwargs['run_dimension']
class CloudError(msrest.serialization.Model):
"""The object that defines the structure of an Azure Data Factory error response.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code.
:type code: str
:param message: Required. Error message.
:type message: str
:param target: Property name/path in request associated with error.
:type target: str
:param details: Array with additional error details.
:type details: list[~dfaz_management_client.models.CloudError]
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'error.code', 'type': 'str'},
'message': {'key': 'error.message', 'type': 'str'},
'target': {'key': 'error.target', 'type': 'str'},
'details': {'key': 'error.details', 'type': '[CloudError]'},
}
def __init__(
self,
**kwargs
):
super(CloudError, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
class CustomSetupBase(msrest.serialization.Model):
"""The base definition of the custom setup.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CmdkeySetup, ComponentSetup, EnvironmentVariableSetup.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of custom setup.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
_subtype_map = {
'type': {'CmdkeySetup': 'CmdkeySetup', 'ComponentSetup': 'ComponentSetup', 'EnvironmentVariableSetup': 'EnvironmentVariableSetup'}
}
def __init__(
self,
**kwargs
):
super(CustomSetupBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class CmdkeySetup(CustomSetupBase):
"""The custom setup of running cmdkey commands.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of custom setup.Constant filled by server.
:type type: str
:param target_name: Required. The server name of data source access.
:type target_name: object
:param user_name: Required. The user name of data source access.
:type user_name: object
:param password: Required. The password of data source access.
:type password: ~dfaz_management_client.models.SecretBase
"""
_validation = {
'type': {'required': True},
'target_name': {'required': True},
'user_name': {'required': True},
'password': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'target_name': {'key': 'typeProperties.targetName', 'type': 'object'},
'user_name': {'key': 'typeProperties.userName', 'type': 'object'},
'password': {'key': 'typeProperties.password', 'type': 'SecretBase'},
}
def __init__(
self,
**kwargs
):
super(CmdkeySetup, self).__init__(**kwargs)
self.type = 'CmdkeySetup' # type: str
self.target_name = kwargs['target_name']
self.user_name = kwargs['user_name']
self.password = kwargs['password']
class ComponentSetup(CustomSetupBase):
"""The custom setup of installing 3rd party components.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of custom setup.Constant filled by server.
:type type: str
:param component_name: Required. The name of the 3rd party component.
:type component_name: str
:param license_key: The license key to activate the component.
:type license_key: ~dfaz_management_client.models.SecretBase
"""
_validation = {
'type': {'required': True},
'component_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'component_name': {'key': 'typeProperties.componentName', 'type': 'str'},
'license_key': {'key': 'typeProperties.licenseKey', 'type': 'SecretBase'},
}
def __init__(
self,
**kwargs
):
super(ComponentSetup, self).__init__(**kwargs)
self.type = 'ComponentSetup' # type: str
self.component_name = kwargs['component_name']
self.license_key = kwargs.get('license_key', None)
class CreateLinkedIntegrationRuntimeRequest(msrest.serialization.Model):
"""The linked integration runtime information.
:param name: The name of the linked integration runtime.
:type name: str
:param subscription_id: The ID of the subscription that the linked integration runtime belongs
to.
:type subscription_id: str
:param data_factory_name: The name of the data factory that the linked integration runtime
belongs to.
:type data_factory_name: str
:param data_factory_location: The location of the data factory that the linked integration
runtime belongs to.
:type data_factory_location: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'data_factory_name': {'key': 'dataFactoryName', 'type': 'str'},
'data_factory_location': {'key': 'dataFactoryLocation', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CreateLinkedIntegrationRuntimeRequest, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.subscription_id = kwargs.get('subscription_id', None)
self.data_factory_name = kwargs.get('data_factory_name', None)
self.data_factory_location = kwargs.get('data_factory_location', None)
class DependencyReference(msrest.serialization.Model):
"""Referenced dependency.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SelfDependencyTumblingWindowTriggerReference, TriggerDependencyReference.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of dependency reference.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
_subtype_map = {
'type': {'SelfDependencyTumblingWindowTriggerReference': 'SelfDependencyTumblingWindowTriggerReference', 'TriggerDependencyReference': 'TriggerDependencyReference'}
}
def __init__(
self,
**kwargs
):
super(DependencyReference, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class Resource(msrest.serialization.Model):
"""Azure Data Factory top-level resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:ivar e_tag: Etag identifies change in the resource.
:vartype e_tag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'e_tag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'e_tag': {'key': 'eTag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
self.e_tag = None
class DomainService(Resource):
"""Domain service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:ivar e_tag: Etag identifies change in the resource.
:vartype e_tag: str
:ivar version: Data Model Version.
:vartype version: int
:ivar tenant_id: Azure Active Directory Tenant Id.
:vartype tenant_id: str
:param domain_name: The name of the Azure domain that the user would like to deploy Domain
Services to.
:type domain_name: str
:ivar deployment_id: Deployment Id.
:vartype deployment_id: str
:ivar sync_owner: SyncOwner ReplicaSet Id.
:vartype sync_owner: str
:param replica_sets: List of ReplicaSets.
:type replica_sets: list[~dfaz_management_client.models.ReplicaSet]
:param domain_configuration_type: Domain Configuration Type. Possible values include:
"FullySynced", "ResourceTrusting".
:type domain_configuration_type: str or
~dfaz_management_client.models.DomainServicePropertiesDomainConfigurationType
:param sku: Sku Type. Possible values include: "Standard", "Enterprise", "Premium".
:type sku: str or ~dfaz_management_client.models.DomainServicePropertiesSku
:param filtered_sync: Enabled or Disabled flag to turn on Group-based filtered sync. Possible
values include: "Enabled", "Disabled".
:type filtered_sync: str or ~dfaz_management_client.models.FilteredSync
:ivar provisioning_state: the current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'e_tag': {'readonly': True},
'version': {'readonly': True},
'tenant_id': {'readonly': True},
'deployment_id': {'readonly': True},
'sync_owner': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'int'},
'tenant_id': {'key': 'properties.tenantId', 'type': 'str'},
'domain_name': {'key': 'properties.domainName', 'type': 'str'},
'deployment_id': {'key': 'properties.deploymentId', 'type': 'str'},
'sync_owner': {'key': 'properties.syncOwner', 'type': 'str'},
'replica_sets': {'key': 'properties.replicaSets', 'type': '[ReplicaSet]'},
'domain_configuration_type': {'key': 'properties.domainConfigurationType', 'type': 'str'},
'sku': {'key': 'properties.sku', 'type': 'str'},
'filtered_sync': {'key': 'properties.filteredSync', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DomainService, self).__init__(**kwargs)
self.version = None
self.tenant_id = None
self.domain_name = kwargs.get('domain_name', None)
self.deployment_id = None
self.sync_owner = None
self.replica_sets = kwargs.get('replica_sets', None)
self.domain_configuration_type = kwargs.get('domain_configuration_type', None)
self.sku = kwargs.get('sku', None)
self.filtered_sync = kwargs.get('filtered_sync', None)
self.provisioning_state = None
class EntityReference(msrest.serialization.Model):
"""The entity reference.
:param type: The type of this referenced entity. Possible values include:
"IntegrationRuntimeReference", "LinkedServiceReference".
:type type: str or ~dfaz_management_client.models.IntegrationRuntimeEntityReferenceType
:param reference_name: The name of this referenced entity.
:type reference_name: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EntityReference, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.reference_name = kwargs.get('reference_name', None)
class EnvironmentVariableSetup(CustomSetupBase):
"""The custom setup of setting environment variable.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of custom setup.Constant filled by server.
:type type: str
:param variable_name: Required. The name of the environment variable.
:type variable_name: str
:param variable_value: Required. The value of the environment variable.
:type variable_value: str
"""
_validation = {
'type': {'required': True},
'variable_name': {'required': True},
'variable_value': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'variable_name': {'key': 'typeProperties.variableName', 'type': 'str'},
'variable_value': {'key': 'typeProperties.variableValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EnvironmentVariableSetup, self).__init__(**kwargs)
self.type = 'EnvironmentVariableSetup' # type: str
self.variable_name = kwargs['variable_name']
self.variable_value = kwargs['variable_value']
class Factory(Resource):
"""Factory resource type.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:ivar e_tag: Etag identifies change in the resource.
:vartype e_tag: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param identity: Managed service identity of the factory.
:type identity: ~dfaz_management_client.models.FactoryIdentity
:param test_inherit: Test Job Base.
:type test_inherit: ~dfaz_management_client.models.JobBase
:ivar provisioning_state: Factory provisioning state, example Succeeded.
:vartype provisioning_state: str
:ivar create_time: Time the factory was created in ISO8601 format.
:vartype create_time: ~datetime.datetime
:ivar version: Version of the factory.
:vartype version: str
:param repo_configuration: Git repo information of the factory.
:type repo_configuration: ~dfaz_management_client.models.FactoryRepoConfiguration
:param fake_identity: This is only for az test.
:type fake_identity: ~dfaz_management_client.models.FakeFactoryIdentity
:param zones: This is only for az test.
:type zones: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'e_tag': {'readonly': True},
'provisioning_state': {'readonly': True},
'create_time': {'readonly': True},
'version': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'identity': {'key': 'identity', 'type': 'FactoryIdentity'},
'test_inherit': {'key': 'testInherit', 'type': 'JobBase'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'create_time': {'key': 'properties.createTime', 'type': 'iso-8601'},
'version': {'key': 'properties.version', 'type': 'str'},
'repo_configuration': {'key': 'properties.repoConfiguration', 'type': 'FactoryRepoConfiguration'},
'fake_identity': {'key': 'properties.fakeIdentity', 'type': 'FakeFactoryIdentity'},
'zones': {'key': 'properties.zones', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(Factory, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.identity = kwargs.get('identity', None)
self.test_inherit = kwargs.get('test_inherit', None)
self.provisioning_state = None
self.create_time = None
self.version = None
self.repo_configuration = kwargs.get('repo_configuration', None)
self.fake_identity = kwargs.get('fake_identity', None)
self.zones = kwargs.get('zones', None)
class FactoryRepoConfiguration(msrest.serialization.Model):
"""Factory's git repo information.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: FactoryGitHubConfiguration, FactoryVstsConfiguration.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of repo configuration.Constant filled by server.
:type type: str
:param account_name: Required. Account name.
:type account_name: str
:param repository_name: Required. Repository name.
:type repository_name: str
:param collaboration_branch: Required. Collaboration branch.
:type collaboration_branch: str
:param root_folder: Required. Root folder.
:type root_folder: str
:param last_commit_id: Last commit id.
:type last_commit_id: str
"""
_validation = {
'type': {'required': True},
'account_name': {'required': True},
'repository_name': {'required': True},
'collaboration_branch': {'required': True},
'root_folder': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'account_name': {'key': 'accountName', 'type': 'str'},
'repository_name': {'key': 'repositoryName', 'type': 'str'},
'collaboration_branch': {'key': 'collaborationBranch', 'type': 'str'},
'root_folder': {'key': 'rootFolder', 'type': 'str'},
'last_commit_id': {'key': 'lastCommitId', 'type': 'str'},
}
_subtype_map = {
'type': {'FactoryGitHubConfiguration': 'FactoryGitHubConfiguration', 'FactoryVSTSConfiguration': 'FactoryVstsConfiguration'}
}
def __init__(
self,
**kwargs
):
super(FactoryRepoConfiguration, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.account_name = kwargs['account_name']
self.repository_name = kwargs['repository_name']
self.collaboration_branch = kwargs['collaboration_branch']
self.root_folder = kwargs['root_folder']
self.last_commit_id = kwargs.get('last_commit_id', None)
class FactoryGitHubConfiguration(FactoryRepoConfiguration):
"""Factory's GitHub repo information.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of repo configuration.Constant filled by server.
:type type: str
:param account_name: Required. Account name.
:type account_name: str
:param repository_name: Required. Repository name.
:type repository_name: str
:param collaboration_branch: Required. Collaboration branch.
:type collaboration_branch: str
:param root_folder: Required. Root folder.
:type root_folder: str
:param last_commit_id: Last commit id.
:type last_commit_id: str
:param host_name: GitHub Enterprise host name. For example: https://github.mydomain.com.
:type host_name: str
"""
_validation = {
'type': {'required': True},
'account_name': {'required': True},
'repository_name': {'required': True},
'collaboration_branch': {'required': True},
'root_folder': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'account_name': {'key': 'accountName', 'type': 'str'},
'repository_name': {'key': 'repositoryName', 'type': 'str'},
'collaboration_branch': {'key': 'collaborationBranch', 'type': 'str'},
'root_folder': {'key': 'rootFolder', 'type': 'str'},
'last_commit_id': {'key': 'lastCommitId', 'type': 'str'},
'host_name': {'key': 'hostName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FactoryGitHubConfiguration, self).__init__(**kwargs)
self.type = 'FactoryGitHubConfiguration' # type: str
self.host_name = kwargs.get('host_name', None)
class FactoryIdentity(msrest.serialization.Model):
"""Identity properties of the factory resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type: Required. The identity type. Currently the only supported type is
'SystemAssigned'. Possible values include: "SystemAssigned".
:type type: str or ~dfaz_management_client.models.FactoryIdentityType
:ivar principal_id: The principal id of the identity.
:vartype principal_id: str
:ivar tenant_id: The client tenant id of the identity.
:vartype tenant_id: str
"""
_validation = {
'type': {'required': True},
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FactoryIdentity, self).__init__(**kwargs)
self.type = kwargs['type']
self.principal_id = None
self.tenant_id = None
class FactoryListResponse(msrest.serialization.Model):
"""A list of factory resources.
All required parameters must be populated in order to send to Azure.
:param value: Required. List of factories.
:type value: list[~dfaz_management_client.models.Factory]
:param next_link: The link to the next page of results, if any remaining results exist.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Factory]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FactoryListResponse, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = kwargs.get('next_link', None)
class FactoryRepoUpdate(msrest.serialization.Model):
"""Factory's git repo information.
:param factory_resource_id: The factory resource id.
:type factory_resource_id: str
:param repo_configuration: Git repo information of the factory.
:type repo_configuration: ~dfaz_management_client.models.FactoryRepoConfiguration
"""
_attribute_map = {
'factory_resource_id': {'key': 'factoryResourceId', 'type': 'str'},
'repo_configuration': {'key': 'repoConfiguration', 'type': 'FactoryRepoConfiguration'},
}
def __init__(
self,
**kwargs
):
super(FactoryRepoUpdate, self).__init__(**kwargs)
self.factory_resource_id = kwargs.get('factory_resource_id', None)
self.repo_configuration = kwargs.get('repo_configuration', None)
class FactoryUpdateParameters(msrest.serialization.Model):
"""Parameters for updating a factory resource.
All required parameters must be populated in order to send to Azure.
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param identity: Required. Managed service identity of the factory.
:type identity: ~dfaz_management_client.models.FactoryIdentity
"""
_validation = {
'identity': {'required': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'FactoryIdentity'},
}
def __init__(
self,
**kwargs
):
super(FactoryUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.identity = kwargs['identity']
class FactoryVstsConfiguration(FactoryRepoConfiguration):
"""Factory's VSTS repo information.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of repo configuration.Constant filled by server.
:type type: str
:param account_name: Required. Account name.
:type account_name: str
:param repository_name: Required. Repository name.
:type repository_name: str
:param collaboration_branch: Required. Collaboration branch.
:type collaboration_branch: str
:param root_folder: Required. Root folder.
:type root_folder: str
:param last_commit_id: Last commit id.
:type last_commit_id: str
:param project_name: Required. VSTS project name.
:type project_name: str
:param tenant_id: VSTS tenant id.
:type tenant_id: str
"""
_validation = {
'type': {'required': True},
'account_name': {'required': True},
'repository_name': {'required': True},
'collaboration_branch': {'required': True},
'root_folder': {'required': True},
'project_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'account_name': {'key': 'accountName', 'type': 'str'},
'repository_name': {'key': 'repositoryName', 'type': 'str'},
'collaboration_branch': {'key': 'collaborationBranch', 'type': 'str'},
'root_folder': {'key': 'rootFolder', 'type': 'str'},
'last_commit_id': {'key': 'lastCommitId', 'type': 'str'},
'project_name': {'key': 'projectName', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FactoryVstsConfiguration, self).__init__(**kwargs)
self.type = 'FactoryVSTSConfiguration' # type: str
self.project_name = kwargs['project_name']
self.tenant_id = kwargs.get('tenant_id', None)
class FakeFactoryIdentity(msrest.serialization.Model):
"""This is only for az test.
All required parameters must be populated in order to send to Azure.
:param name: Required. ..
:type name: str
:param zones_inside: sample of simple array.
:type zones_inside: list[str]
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'zones_inside': {'key': 'zonesInside', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(FakeFactoryIdentity, self).__init__(**kwargs)
self.name = kwargs['name']
self.zones_inside = kwargs.get('zones_inside', None)
class GitHubAccessTokenRequest(msrest.serialization.Model):
"""Get GitHub access token request definition.
All required parameters must be populated in order to send to Azure.
:param git_hub_access_code: Required. GitHub access code.
:type git_hub_access_code: str
:param git_hub_client_id: GitHub application client ID.
:type git_hub_client_id: str
:param git_hub_access_token_base_url: Required. GitHub access token base URL.
:type git_hub_access_token_base_url: str
"""
_validation = {
'git_hub_access_code': {'required': True},
'git_hub_access_token_base_url': {'required': True},
}
_attribute_map = {
'git_hub_access_code': {'key': 'gitHubAccessCode', 'type': 'str'},
'git_hub_client_id': {'key': 'gitHubClientId', 'type': 'str'},
'git_hub_access_token_base_url': {'key': 'gitHubAccessTokenBaseUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GitHubAccessTokenRequest, self).__init__(**kwargs)
self.git_hub_access_code = kwargs['git_hub_access_code']
self.git_hub_client_id = kwargs.get('git_hub_client_id', None)
self.git_hub_access_token_base_url = kwargs['git_hub_access_token_base_url']
class GitHubAccessTokenResponse(msrest.serialization.Model):
"""Get GitHub access token response definition.
:param git_hub_access_token: GitHub access token.
:type git_hub_access_token: str
"""
_attribute_map = {
'git_hub_access_token': {'key': 'gitHubAccessToken', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GitHubAccessTokenResponse, self).__init__(**kwargs)
self.git_hub_access_token = kwargs.get('git_hub_access_token', None)
class Group(msrest.serialization.Model):
"""A group created in a Migration project.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Path reference to this group.
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Migrate/assessmentProjects/{projectName}/groups/{groupName}.
:vartype id: str
:ivar name: Name of the group.
:vartype name: str
:param e_tag: For optimistic concurrency control.
:type e_tag: str
:ivar type: Type of the object = [Microsoft.Migrate/assessmentProjects/groups].
:vartype type: str
:param properties: Required. Properties of the group.
:type properties: ~dfaz_management_client.models.GroupProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'GroupProperties'},
}
def __init__(
self,
**kwargs
):
super(Group, self).__init__(**kwargs)
self.id = None
self.name = None
self.e_tag = kwargs.get('e_tag', None)
self.type = None
self.properties = kwargs['properties']
class GroupProperties(msrest.serialization.Model):
"""Properties of group resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar group_status: Whether the group has been created and is valid. Possible values include:
"Created", "Updated", "Running", "Completed", "Invalid".
:vartype group_status: str or ~dfaz_management_client.models.GroupStatus
:ivar machine_count: Number of machines part of this group.
:vartype machine_count: int
:ivar assessments: List of References to Assessments created on this group.
:vartype assessments: list[str]
:ivar are_assessments_running: If the assessments are in running state.
:vartype are_assessments_running: bool
:ivar created_timestamp: Time when this group was created. Date-Time represented in ISO-8601
format.
:vartype created_timestamp: ~datetime.datetime
:ivar updated_timestamp: Time when this group was last updated. Date-Time represented in
ISO-8601 format.
:vartype updated_timestamp: ~datetime.datetime
"""
_validation = {
'group_status': {'readonly': True},
'machine_count': {'readonly': True},
'assessments': {'readonly': True},
'are_assessments_running': {'readonly': True},
'created_timestamp': {'readonly': True},
'updated_timestamp': {'readonly': True},
}
_attribute_map = {
'group_status': {'key': 'groupStatus', 'type': 'str'},
'machine_count': {'key': 'machineCount', 'type': 'int'},
'assessments': {'key': 'assessments', 'type': '[str]'},
'are_assessments_running': {'key': 'areAssessmentsRunning', 'type': 'bool'},
'created_timestamp': {'key': 'createdTimestamp', 'type': 'iso-8601'},
'updated_timestamp': {'key': 'updatedTimestamp', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(GroupProperties, self).__init__(**kwargs)
self.group_status = None
self.machine_count = None
self.assessments = None
self.are_assessments_running = None
self.created_timestamp = None
self.updated_timestamp = None
class HealthAlert(msrest.serialization.Model):
"""Health Alert Description.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Health Alert Id.
:vartype id: str
:ivar name: Health Alert Name.
:vartype name: str
:ivar issue: Health Alert Issue.
:vartype issue: str
:ivar severity: Health Alert Severity.
:vartype severity: str
:ivar raised: Health Alert Raised DateTime.
:vartype raised: ~datetime.datetime
:ivar last_detected: Health Alert Last Detected DateTime.
:vartype last_detected: ~datetime.datetime
:ivar resolution_uri: Health Alert TSG Link.
:vartype resolution_uri: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'issue': {'readonly': True},
'severity': {'readonly': True},
'raised': {'readonly': True},
'last_detected': {'readonly': True},
'resolution_uri': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'issue': {'key': 'issue', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
'raised': {'key': 'raised', 'type': 'iso-8601'},
'last_detected': {'key': 'lastDetected', 'type': 'iso-8601'},
'resolution_uri': {'key': 'resolutionUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(HealthAlert, self).__init__(**kwargs)
self.id = None
self.name = None
self.issue = None
self.severity = None
self.raised = None
self.last_detected = None
self.resolution_uri = None
class HealthMonitor(msrest.serialization.Model):
"""Health Monitor Description.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Health Monitor Id.
:vartype id: str
:ivar name: Health Monitor Name.
:vartype name: str
:ivar details: Health Monitor Details.
:vartype details: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'details': {'key': 'details', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(HealthMonitor, self).__init__(**kwargs)
self.id = None
self.name = None
self.details = None
class IntegrationRuntime(msrest.serialization.Model):
"""Azure Data Factory nested object which serves as a compute resource for activities.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ManagedIntegrationRuntime, SelfHostedIntegrationRuntime.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Type of integration runtime.Constant filled by server. Possible values
include: "Managed", "SelfHosted".
:type type: str or ~dfaz_management_client.models.IntegrationRuntimeType
:param description: Integration runtime description.
:type description: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
_subtype_map = {
'type': {'Managed': 'ManagedIntegrationRuntime', 'SelfHosted': 'SelfHostedIntegrationRuntime'}
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntime, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.type = 'IntegrationRuntime' # type: str
self.description = kwargs.get('description', None)
class IntegrationRuntimeAuthKeys(msrest.serialization.Model):
"""The integration runtime authentication keys.
:param auth_key1: The primary integration runtime authentication key.
:type auth_key1: str
:param auth_key2: The secondary integration runtime authentication key.
:type auth_key2: str
"""
_attribute_map = {
'auth_key1': {'key': 'authKey1', 'type': 'str'},
'auth_key2': {'key': 'authKey2', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeAuthKeys, self).__init__(**kwargs)
self.auth_key1 = kwargs.get('auth_key1', None)
self.auth_key2 = kwargs.get('auth_key2', None)
class IntegrationRuntimeComputeProperties(msrest.serialization.Model):
"""The compute resource properties for managed integration runtime.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param location: The location for managed integration runtime. The supported regions could be
found on https://docs.microsoft.com/en-us/azure/data-factory/data-factory-data-movement-
activities.
:type location: str
:param node_size: The node size requirement to managed integration runtime.
:type node_size: str
:param number_of_nodes: The required number of nodes for managed integration runtime.
:type number_of_nodes: int
:param max_parallel_executions_per_node: Maximum parallel executions count per node for managed
integration runtime.
:type max_parallel_executions_per_node: int
:param data_flow_properties: Data flow properties for managed integration runtime.
:type data_flow_properties: ~dfaz_management_client.models.IntegrationRuntimeDataFlowProperties
:param v_net_properties: VNet properties for managed integration runtime.
:type v_net_properties: ~dfaz_management_client.models.IntegrationRuntimeVNetProperties
"""
_validation = {
'number_of_nodes': {'minimum': 1},
'max_parallel_executions_per_node': {'minimum': 1},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'location': {'key': 'location', 'type': 'str'},
'node_size': {'key': 'nodeSize', 'type': 'str'},
'number_of_nodes': {'key': 'numberOfNodes', 'type': 'int'},
'max_parallel_executions_per_node': {'key': 'maxParallelExecutionsPerNode', 'type': 'int'},
'data_flow_properties': {'key': 'dataFlowProperties', 'type': 'IntegrationRuntimeDataFlowProperties'},
'v_net_properties': {'key': 'vNetProperties', 'type': 'IntegrationRuntimeVNetProperties'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeComputeProperties, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.location = kwargs.get('location', None)
self.node_size = kwargs.get('node_size', None)
self.number_of_nodes = kwargs.get('number_of_nodes', None)
self.max_parallel_executions_per_node = kwargs.get('max_parallel_executions_per_node', None)
self.data_flow_properties = kwargs.get('data_flow_properties', None)
self.v_net_properties = kwargs.get('v_net_properties', None)
class IntegrationRuntimeConnectionInfo(msrest.serialization.Model):
"""Connection information for encrypting the on-premises data source credentials.
Variables are only populated by the server, and will be ignored when sending a request.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:ivar service_token: The token generated in service. Callers use this token to authenticate to
integration runtime.
:vartype service_token: str
:ivar identity_cert_thumbprint: The integration runtime SSL certificate thumbprint. Click-Once
application uses it to do server validation.
:vartype identity_cert_thumbprint: str
:ivar host_service_uri: The on-premises integration runtime host URL.
:vartype host_service_uri: str
:ivar version: The integration runtime version.
:vartype version: str
:ivar public_key: The public key for encrypting a credential when transferring the credential
to the integration runtime.
:vartype public_key: str
:ivar is_identity_cert_exprired: Whether the identity certificate is expired.
:vartype is_identity_cert_exprired: bool
"""
_validation = {
'service_token': {'readonly': True},
'identity_cert_thumbprint': {'readonly': True},
'host_service_uri': {'readonly': True},
'version': {'readonly': True},
'public_key': {'readonly': True},
'is_identity_cert_exprired': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'service_token': {'key': 'serviceToken', 'type': 'str'},
'identity_cert_thumbprint': {'key': 'identityCertThumbprint', 'type': 'str'},
'host_service_uri': {'key': 'hostServiceUri', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'public_key': {'key': 'publicKey', 'type': 'str'},
'is_identity_cert_exprired': {'key': 'isIdentityCertExprired', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeConnectionInfo, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.service_token = None
self.identity_cert_thumbprint = None
self.host_service_uri = None
self.version = None
self.public_key = None
self.is_identity_cert_exprired = None
class IntegrationRuntimeCustomSetupScriptProperties(msrest.serialization.Model):
"""Custom setup script properties for a managed dedicated integration runtime.
:param blob_container_uri: The URI of the Azure blob container that contains the custom setup
script.
:type blob_container_uri: str
:param sas_token: The SAS token of the Azure blob container.
:type sas_token: ~dfaz_management_client.models.SecureString
"""
_attribute_map = {
'blob_container_uri': {'key': 'blobContainerUri', 'type': 'str'},
'sas_token': {'key': 'sasToken', 'type': 'SecureString'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeCustomSetupScriptProperties, self).__init__(**kwargs)
self.blob_container_uri = kwargs.get('blob_container_uri', None)
self.sas_token = kwargs.get('sas_token', None)
class IntegrationRuntimeDataFlowProperties(msrest.serialization.Model):
"""Data flow properties for managed integration runtime.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param compute_type: Compute type of the cluster which will execute data flow job. Possible
values include: "General", "MemoryOptimized", "ComputeOptimized".
:type compute_type: str or ~dfaz_management_client.models.DataFlowComputeType
:param core_count: Core count of the cluster which will execute data flow job. Supported values
are: 8, 16, 32, 48, 80, 144 and 272.
:type core_count: int
:param time_to_live: Time to live (in minutes) setting of the cluster which will execute data
flow job.
:type time_to_live: int
"""
_validation = {
'time_to_live': {'minimum': 0},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'compute_type': {'key': 'computeType', 'type': 'str'},
'core_count': {'key': 'coreCount', 'type': 'int'},
'time_to_live': {'key': 'timeToLive', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeDataFlowProperties, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.compute_type = kwargs.get('compute_type', None)
self.core_count = kwargs.get('core_count', None)
self.time_to_live = kwargs.get('time_to_live', None)
class IntegrationRuntimeDataProxyProperties(msrest.serialization.Model):
"""Data proxy properties for a managed dedicated integration runtime.
:param connect_via: The self-hosted integration runtime reference.
:type connect_via: ~dfaz_management_client.models.EntityReference
:param staging_linked_service: The staging linked service reference.
:type staging_linked_service: ~dfaz_management_client.models.EntityReference
:param path: The path to contain the staged data in the Blob storage.
:type path: str
"""
_attribute_map = {
'connect_via': {'key': 'connectVia', 'type': 'EntityReference'},
'staging_linked_service': {'key': 'stagingLinkedService', 'type': 'EntityReference'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeDataProxyProperties, self).__init__(**kwargs)
self.connect_via = kwargs.get('connect_via', None)
self.staging_linked_service = kwargs.get('staging_linked_service', None)
self.path = kwargs.get('path', None)
class IntegrationRuntimeListResponse(msrest.serialization.Model):
"""A list of integration runtime resources.
All required parameters must be populated in order to send to Azure.
:param value: Required. List of integration runtimes.
:type value: list[~dfaz_management_client.models.IntegrationRuntimeResource]
:param next_link: The link to the next page of results, if any remaining results exist.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IntegrationRuntimeResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeListResponse, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = kwargs.get('next_link', None)
class IntegrationRuntimeMonitoringData(msrest.serialization.Model):
"""Get monitoring data response.
:param name: Integration runtime name.
:type name: str
:param nodes: Integration runtime node monitoring data.
:type nodes: list[~dfaz_management_client.models.IntegrationRuntimeNodeMonitoringData]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'nodes': {'key': 'nodes', 'type': '[IntegrationRuntimeNodeMonitoringData]'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeMonitoringData, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.nodes = kwargs.get('nodes', None)
class IntegrationRuntimeNodeIpAddress(msrest.serialization.Model):
"""The IP address of self-hosted integration runtime node.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar ip_address: The IP address of self-hosted integration runtime node.
:vartype ip_address: str
"""
_validation = {
'ip_address': {'readonly': True},
}
_attribute_map = {
'ip_address': {'key': 'ipAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeNodeIpAddress, self).__init__(**kwargs)
self.ip_address = None
class IntegrationRuntimeNodeMonitoringData(msrest.serialization.Model):
"""Monitoring data for integration runtime node.
Variables are only populated by the server, and will be ignored when sending a request.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:ivar node_name: Name of the integration runtime node.
:vartype node_name: str
:ivar available_memory_in_mb: Available memory (MB) on the integration runtime node.
:vartype available_memory_in_mb: int
:ivar cpu_utilization: CPU percentage on the integration runtime node.
:vartype cpu_utilization: int
:ivar concurrent_jobs_limit: Maximum concurrent jobs on the integration runtime node.
:vartype concurrent_jobs_limit: int
:ivar concurrent_jobs_running: The number of jobs currently running on the integration runtime
node.
:vartype concurrent_jobs_running: int
:ivar max_concurrent_jobs: The maximum concurrent jobs in this integration runtime.
:vartype max_concurrent_jobs: int
:ivar sent_bytes: Sent bytes on the integration runtime node.
:vartype sent_bytes: float
:ivar received_bytes: Received bytes on the integration runtime node.
:vartype received_bytes: float
"""
_validation = {
'node_name': {'readonly': True},
'available_memory_in_mb': {'readonly': True},
'cpu_utilization': {'readonly': True},
'concurrent_jobs_limit': {'readonly': True},
'concurrent_jobs_running': {'readonly': True},
'max_concurrent_jobs': {'readonly': True},
'sent_bytes': {'readonly': True},
'received_bytes': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'node_name': {'key': 'nodeName', 'type': 'str'},
'available_memory_in_mb': {'key': 'availableMemoryInMB', 'type': 'int'},
'cpu_utilization': {'key': 'cpuUtilization', 'type': 'int'},
'concurrent_jobs_limit': {'key': 'concurrentJobsLimit', 'type': 'int'},
'concurrent_jobs_running': {'key': 'concurrentJobsRunning', 'type': 'int'},
'max_concurrent_jobs': {'key': 'maxConcurrentJobs', 'type': 'int'},
'sent_bytes': {'key': 'sentBytes', 'type': 'float'},
'received_bytes': {'key': 'receivedBytes', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeNodeMonitoringData, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.node_name = None
self.available_memory_in_mb = None
self.cpu_utilization = None
self.concurrent_jobs_limit = None
self.concurrent_jobs_running = None
self.max_concurrent_jobs = None
self.sent_bytes = None
self.received_bytes = None
class IntegrationRuntimeReference(msrest.serialization.Model):
"""Integration runtime reference type.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. Type of integration runtime. Default value:
"IntegrationRuntimeReference".
:vartype type: str
:param reference_name: Required. Reference integration runtime name.
:type reference_name: str
:param parameters: Arguments for integration runtime.
:type parameters: dict[str, object]
"""
_validation = {
'type': {'required': True, 'constant': True},
'reference_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{object}'},
}
type = "IntegrationRuntimeReference"
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeReference, self).__init__(**kwargs)
self.reference_name = kwargs['reference_name']
self.parameters = kwargs.get('parameters', None)
class IntegrationRuntimeRegenerateKeyParameters(msrest.serialization.Model):
"""Parameters to regenerate the authentication key.
:param key_name: The name of the authentication key to regenerate. Possible values include:
"authKey1", "authKey2".
:type key_name: str or ~dfaz_management_client.models.IntegrationRuntimeAuthKeyName
"""
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeRegenerateKeyParameters, self).__init__(**kwargs)
self.key_name = kwargs.get('key_name', None)
class SubResource(msrest.serialization.Model):
"""Azure Data Factory nested resource, which belongs to a factory.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar etag: Etag identifies change in the resource.
:vartype etag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.etag = None
class IntegrationRuntimeResource(SubResource):
"""Integration runtime resource type.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar etag: Etag identifies change in the resource.
:vartype etag: str
:param properties: Required. Integration runtime properties.
:type properties: ~dfaz_management_client.models.IntegrationRuntime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'IntegrationRuntime'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeResource, self).__init__(**kwargs)
self.properties = kwargs['properties']
class IntegrationRuntimeSsisCatalogInfo(msrest.serialization.Model):
"""Catalog information for managed dedicated integration runtime.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param catalog_server_endpoint: The catalog database server URL.
:type catalog_server_endpoint: str
:param catalog_admin_user_name: The administrator user name of catalog database.
:type catalog_admin_user_name: str
:param catalog_admin_password: The password of the administrator user account of the catalog
database.
:type catalog_admin_password: ~dfaz_management_client.models.SecureString
:param catalog_pricing_tier: The pricing tier for the catalog database. The valid values could
be found in https://azure.microsoft.com/en-us/pricing/details/sql-database/. Possible values
include: "Basic", "Standard", "Premium", "PremiumRS".
:type catalog_pricing_tier: str or
~dfaz_management_client.models.IntegrationRuntimeSsisCatalogPricingTier
"""
_validation = {
'catalog_admin_user_name': {'max_length': 128, 'min_length': 1},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'catalog_server_endpoint': {'key': 'catalogServerEndpoint', 'type': 'str'},
'catalog_admin_user_name': {'key': 'catalogAdminUserName', 'type': 'str'},
'catalog_admin_password': {'key': 'catalogAdminPassword', 'type': 'SecureString'},
'catalog_pricing_tier': {'key': 'catalogPricingTier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeSsisCatalogInfo, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.catalog_server_endpoint = kwargs.get('catalog_server_endpoint', None)
self.catalog_admin_user_name = kwargs.get('catalog_admin_user_name', None)
self.catalog_admin_password = kwargs.get('catalog_admin_password', None)
self.catalog_pricing_tier = kwargs.get('catalog_pricing_tier', None)
class IntegrationRuntimeSsisProperties(msrest.serialization.Model):
"""SSIS properties for managed integration runtime.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param catalog_info: Catalog information for managed dedicated integration runtime.
:type catalog_info: ~dfaz_management_client.models.IntegrationRuntimeSsisCatalogInfo
:param license_type: License type for bringing your own license scenario. Possible values
include: "BasePrice", "LicenseIncluded".
:type license_type: str or ~dfaz_management_client.models.IntegrationRuntimeLicenseType
:param custom_setup_script_properties: Custom setup script properties for a managed dedicated
integration runtime.
:type custom_setup_script_properties:
~dfaz_management_client.models.IntegrationRuntimeCustomSetupScriptProperties
:param data_proxy_properties: Data proxy properties for a managed dedicated integration
runtime.
:type data_proxy_properties:
~dfaz_management_client.models.IntegrationRuntimeDataProxyProperties
:param edition: The edition for the SSIS Integration Runtime. Possible values include:
"Standard", "Enterprise".
:type edition: str or ~dfaz_management_client.models.IntegrationRuntimeEdition
:param express_custom_setup_properties: Custom setup without script properties for a SSIS
integration runtime.
:type express_custom_setup_properties: list[~dfaz_management_client.models.CustomSetupBase]
:param package_stores: Package stores for the SSIS Integration Runtime.
:type package_stores: list[~dfaz_management_client.models.PackageStore]
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'catalog_info': {'key': 'catalogInfo', 'type': 'IntegrationRuntimeSsisCatalogInfo'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'custom_setup_script_properties': {'key': 'customSetupScriptProperties', 'type': 'IntegrationRuntimeCustomSetupScriptProperties'},
'data_proxy_properties': {'key': 'dataProxyProperties', 'type': 'IntegrationRuntimeDataProxyProperties'},
'edition': {'key': 'edition', 'type': 'str'},
'express_custom_setup_properties': {'key': 'expressCustomSetupProperties', 'type': '[CustomSetupBase]'},
'package_stores': {'key': 'packageStores', 'type': '[PackageStore]'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeSsisProperties, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.catalog_info = kwargs.get('catalog_info', None)
self.license_type = kwargs.get('license_type', None)
self.custom_setup_script_properties = kwargs.get('custom_setup_script_properties', None)
self.data_proxy_properties = kwargs.get('data_proxy_properties', None)
self.edition = kwargs.get('edition', None)
self.express_custom_setup_properties = kwargs.get('express_custom_setup_properties', None)
self.package_stores = kwargs.get('package_stores', None)
class IntegrationRuntimeStatus(msrest.serialization.Model):
"""Integration runtime status.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ManagedIntegrationRuntimeStatus, SelfHostedIntegrationRuntimeStatus.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Type of integration runtime.Constant filled by server. Possible values
include: "Managed", "SelfHosted".
:type type: str or ~dfaz_management_client.models.IntegrationRuntimeType
:ivar data_factory_name: The data factory name which the integration runtime belong to.
:vartype data_factory_name: str
:ivar state: The state of integration runtime. Possible values include: "Initial", "Stopped",
"Started", "Starting", "Stopping", "NeedRegistration", "Online", "Limited", "Offline",
"AccessDenied".
:vartype state: str or ~dfaz_management_client.models.IntegrationRuntimeState
"""
_validation = {
'type': {'required': True},
'data_factory_name': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'data_factory_name': {'key': 'dataFactoryName', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
}
_subtype_map = {
'type': {'Managed': 'ManagedIntegrationRuntimeStatus', 'SelfHosted': 'SelfHostedIntegrationRuntimeStatus'}
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeStatus, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.type = 'IntegrationRuntimeStatus' # type: str
self.data_factory_name = None
self.state = None
class IntegrationRuntimeStatusListResponse(msrest.serialization.Model):
"""A list of integration runtime status.
All required parameters must be populated in order to send to Azure.
:param value: Required. List of integration runtime status.
:type value: list[~dfaz_management_client.models.IntegrationRuntimeStatusResponse]
:param next_link: The link to the next page of results, if any remaining results exist.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IntegrationRuntimeStatusResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeStatusListResponse, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = kwargs.get('next_link', None)
class IntegrationRuntimeStatusResponse(msrest.serialization.Model):
"""Integration runtime status response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: The integration runtime name.
:vartype name: str
:param properties: Required. Integration runtime properties.
:type properties: ~dfaz_management_client.models.IntegrationRuntimeStatus
"""
_validation = {
'name': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'IntegrationRuntimeStatus'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeStatusResponse, self).__init__(**kwargs)
self.name = None
self.properties = kwargs['properties']
class IntegrationRuntimeVNetProperties(msrest.serialization.Model):
"""VNet properties for managed integration runtime.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param v_net_id: The ID of the VNet that this integration runtime will join.
:type v_net_id: str
:param subnet: The name of the subnet this integration runtime will join.
:type subnet: str
:param public_i_ps: Resource IDs of the public IP addresses that this integration runtime will
use.
:type public_i_ps: list[str]
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'v_net_id': {'key': 'vNetId', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'str'},
'public_i_ps': {'key': 'publicIPs', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(IntegrationRuntimeVNetProperties, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.v_net_id = kwargs.get('v_net_id', None)
self.subnet = kwargs.get('subnet', None)
self.public_i_ps = kwargs.get('public_i_ps', None)
class JobBaseInteractionEndpoints(msrest.serialization.Model):
"""Dictonary of endpoint URIs, keyed by enumerated JobEndpoints, can be added, removed or updated.
:param tracking:
:type tracking: str
:param studio:
:type studio: str
:param grafana:
:type grafana: str
:param tensorboard:
:type tensorboard: str
"""
_attribute_map = {
'tracking': {'key': 'Tracking', 'type': 'str'},
'studio': {'key': 'Studio', 'type': 'str'},
'grafana': {'key': 'Grafana', 'type': 'str'},
'tensorboard': {'key': 'Tensorboard', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobBaseInteractionEndpoints, self).__init__(**kwargs)
self.tracking = kwargs.get('tracking', None)
self.studio = kwargs.get('studio', None)
self.grafana = kwargs.get('grafana', None)
self.tensorboard = kwargs.get('tensorboard', None)
class LinkedIntegrationRuntime(msrest.serialization.Model):
"""The linked integration runtime information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the linked integration runtime.
:vartype name: str
:ivar subscription_id: The subscription ID for which the linked integration runtime belong to.
:vartype subscription_id: str
:ivar data_factory_name: The name of the data factory for which the linked integration runtime
belong to.
:vartype data_factory_name: str
:ivar data_factory_location: The location of the data factory for which the linked integration
runtime belong to.
:vartype data_factory_location: str
:ivar create_time: The creating time of the linked integration runtime.
:vartype create_time: ~datetime.datetime
"""
_validation = {
'name': {'readonly': True},
'subscription_id': {'readonly': True},
'data_factory_name': {'readonly': True},
'data_factory_location': {'readonly': True},
'create_time': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'data_factory_name': {'key': 'dataFactoryName', 'type': 'str'},
'data_factory_location': {'key': 'dataFactoryLocation', 'type': 'str'},
'create_time': {'key': 'createTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(LinkedIntegrationRuntime, self).__init__(**kwargs)
self.name = None
self.subscription_id = None
self.data_factory_name = None
self.data_factory_location = None
self.create_time = None
class LinkedIntegrationRuntimeType(msrest.serialization.Model):
"""The base definition of a linked integration runtime.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: LinkedIntegrationRuntimeKeyAuthorization, LinkedIntegrationRuntimeRbacAuthorization.
All required parameters must be populated in order to send to Azure.
:param authorization_type: Required. The authorization type for integration runtime
sharing.Constant filled by server.
:type authorization_type: str
"""
_validation = {
'authorization_type': {'required': True},
}
_attribute_map = {
'authorization_type': {'key': 'authorizationType', 'type': 'str'},
}
_subtype_map = {
'authorization_type': {'Key': 'LinkedIntegrationRuntimeKeyAuthorization', 'RBAC': 'LinkedIntegrationRuntimeRbacAuthorization'}
}
def __init__(
self,
**kwargs
):
super(LinkedIntegrationRuntimeType, self).__init__(**kwargs)
self.authorization_type = None # type: Optional[str]
class LinkedIntegrationRuntimeKeyAuthorization(LinkedIntegrationRuntimeType):
"""The key authorization type integration runtime.
All required parameters must be populated in order to send to Azure.
:param authorization_type: Required. The authorization type for integration runtime
sharing.Constant filled by server.
:type authorization_type: str
:param key: Required. The key used for authorization.
:type key: ~dfaz_management_client.models.SecureString
"""
_validation = {
'authorization_type': {'required': True},
'key': {'required': True},
}
_attribute_map = {
'authorization_type': {'key': 'authorizationType', 'type': 'str'},
'key': {'key': 'key', 'type': 'SecureString'},
}
def __init__(
self,
**kwargs
):
super(LinkedIntegrationRuntimeKeyAuthorization, self).__init__(**kwargs)
self.authorization_type = 'Key' # type: str
self.key = kwargs['key']
class LinkedIntegrationRuntimeRbacAuthorization(LinkedIntegrationRuntimeType):
"""The role based access control (RBAC) authorization type integration runtime.
All required parameters must be populated in order to send to Azure.
:param authorization_type: Required. The authorization type for integration runtime
sharing.Constant filled by server.
:type authorization_type: str
:param resource_id: Required. The resource identifier of the integration runtime to be shared.
:type resource_id: str
"""
_validation = {
'authorization_type': {'required': True},
'resource_id': {'required': True},
}
_attribute_map = {
'authorization_type': {'key': 'authorizationType', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LinkedIntegrationRuntimeRbacAuthorization, self).__init__(**kwargs)
self.authorization_type = 'RBAC' # type: str
self.resource_id = kwargs['resource_id']
class LinkedIntegrationRuntimeRequest(msrest.serialization.Model):
"""Data factory name for linked integration runtime request.
All required parameters must be populated in order to send to Azure.
:param linked_factory_name: Required. The data factory name for linked integration runtime.
:type linked_factory_name: str
"""
_validation = {
'linked_factory_name': {'required': True},
}
_attribute_map = {
'linked_factory_name': {'key': 'factoryName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LinkedIntegrationRuntimeRequest, self).__init__(**kwargs)
self.linked_factory_name = kwargs['linked_factory_name']
class LinkedServiceReference(msrest.serialization.Model):
"""Linked service reference type.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. Linked service reference type. Default value: "LinkedServiceReference".
:vartype type: str
:param reference_name: Required. Reference LinkedService name.
:type reference_name: str
:param parameters: Arguments for LinkedService.
:type parameters: dict[str, object]
"""
_validation = {
'type': {'required': True, 'constant': True},
'reference_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{object}'},
}
type = "LinkedServiceReference"
def __init__(
self,
**kwargs
):
super(LinkedServiceReference, self).__init__(**kwargs)
self.reference_name = kwargs['reference_name']
self.parameters = kwargs.get('parameters', None)
class ManagedIntegrationRuntime(IntegrationRuntime):
"""Managed integration runtime, including managed elastic and managed dedicated integration runtimes.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Type of integration runtime.Constant filled by server. Possible values
include: "Managed", "SelfHosted".
:type type: str or ~dfaz_management_client.models.IntegrationRuntimeType
:param description: Integration runtime description.
:type description: str
:ivar state: Integration runtime state, only valid for managed dedicated integration runtime.
Possible values include: "Initial", "Stopped", "Started", "Starting", "Stopping",
"NeedRegistration", "Online", "Limited", "Offline", "AccessDenied".
:vartype state: str or ~dfaz_management_client.models.IntegrationRuntimeState
:param repo_configuration: Git repo information of the factory.
:type repo_configuration: ~dfaz_management_client.models.FactoryRepoConfiguration
:param fake_identity: This is only for az test.
:type fake_identity: ~dfaz_management_client.models.FakeFactoryIdentity
:param zones: This is only for az test.
:type zones: list[str]
:param compute_properties: The compute resource for managed integration runtime.
:type compute_properties: ~dfaz_management_client.models.IntegrationRuntimeComputeProperties
:param ssis_properties: SSIS properties for managed integration runtime.
:type ssis_properties: ~dfaz_management_client.models.IntegrationRuntimeSsisProperties
"""
_validation = {
'type': {'required': True},
'state': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'repo_configuration': {'key': 'repoConfiguration', 'type': 'FactoryRepoConfiguration'},
'fake_identity': {'key': 'fakeIdentity', 'type': 'FakeFactoryIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
'compute_properties': {'key': 'typeProperties.computeProperties', 'type': 'IntegrationRuntimeComputeProperties'},
'ssis_properties': {'key': 'typeProperties.ssisProperties', 'type': 'IntegrationRuntimeSsisProperties'},
}
def __init__(
self,
**kwargs
):
super(ManagedIntegrationRuntime, self).__init__(**kwargs)
self.type = 'Managed' # type: str
self.state = None
self.repo_configuration = kwargs.get('repo_configuration', None)
self.fake_identity = kwargs.get('fake_identity', None)
self.zones = kwargs.get('zones', None)
self.compute_properties = kwargs.get('compute_properties', None)
self.ssis_properties = kwargs.get('ssis_properties', None)
class ManagedIntegrationRuntimeError(msrest.serialization.Model):
"""Error definition for managed integration runtime.
Variables are only populated by the server, and will be ignored when sending a request.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:ivar time: The time when the error occurred.
:vartype time: ~datetime.datetime
:ivar code: Error code.
:vartype code: str
:ivar parameters: Managed integration runtime error parameters.
:vartype parameters: list[str]
:ivar message: Error message.
:vartype message: str
"""
_validation = {
'time': {'readonly': True},
'code': {'readonly': True},
'parameters': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'time': {'key': 'time', 'type': 'iso-8601'},
'code': {'key': 'code', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '[str]'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedIntegrationRuntimeError, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.time = None
self.code = None
self.parameters = None
self.message = None
class ManagedIntegrationRuntimeNode(msrest.serialization.Model):
"""Properties of integration runtime node.
Variables are only populated by the server, and will be ignored when sending a request.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:ivar node_id: The managed integration runtime node id.
:vartype node_id: str
:ivar status: The managed integration runtime node status. Possible values include: "Starting",
"Available", "Recycling", "Unavailable".
:vartype status: str or ~dfaz_management_client.models.ManagedIntegrationRuntimeNodeStatus
:param errors: The errors that occurred on this integration runtime node.
:type errors: list[~dfaz_management_client.models.ManagedIntegrationRuntimeError]
"""
_validation = {
'node_id': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'node_id': {'key': 'nodeId', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[ManagedIntegrationRuntimeError]'},
}
def __init__(
self,
**kwargs
):
super(ManagedIntegrationRuntimeNode, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.node_id = None
self.status = None
self.errors = kwargs.get('errors', None)
class ManagedIntegrationRuntimeOperationResult(msrest.serialization.Model):
"""Properties of managed integration runtime operation result.
Variables are only populated by the server, and will be ignored when sending a request.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:ivar type: The operation type. Could be start or stop.
:vartype type: str
:ivar start_time: The start time of the operation.
:vartype start_time: ~datetime.datetime
:ivar result: The operation result.
:vartype result: str
:ivar error_code: The error code.
:vartype error_code: str
:ivar parameters: Managed integration runtime error parameters.
:vartype parameters: list[str]
:ivar activity_id: The activity id for the operation request.
:vartype activity_id: str
"""
_validation = {
'type': {'readonly': True},
'start_time': {'readonly': True},
'result': {'readonly': True},
'error_code': {'readonly': True},
'parameters': {'readonly': True},
'activity_id': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'result': {'key': 'result', 'type': 'str'},
'error_code': {'key': 'errorCode', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '[str]'},
'activity_id': {'key': 'activityId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedIntegrationRuntimeOperationResult, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.type = None
self.start_time = None
self.result = None
self.error_code = None
self.parameters = None
self.activity_id = None
class ManagedIntegrationRuntimeStatus(IntegrationRuntimeStatus):
"""Managed integration runtime status.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Type of integration runtime.Constant filled by server. Possible values
include: "Managed", "SelfHosted".
:type type: str or ~dfaz_management_client.models.IntegrationRuntimeType
:ivar data_factory_name: The data factory name which the integration runtime belong to.
:vartype data_factory_name: str
:ivar state: The state of integration runtime. Possible values include: "Initial", "Stopped",
"Started", "Starting", "Stopping", "NeedRegistration", "Online", "Limited", "Offline",
"AccessDenied".
:vartype state: str or ~dfaz_management_client.models.IntegrationRuntimeState
:ivar create_time: The time at which the integration runtime was created, in ISO8601 format.
:vartype create_time: ~datetime.datetime
:ivar nodes: The list of nodes for managed integration runtime.
:vartype nodes: list[~dfaz_management_client.models.ManagedIntegrationRuntimeNode]
:ivar other_errors: The errors that occurred on this integration runtime.
:vartype other_errors: list[~dfaz_management_client.models.ManagedIntegrationRuntimeError]
:ivar last_operation: The last operation result that occurred on this integration runtime.
:vartype last_operation:
~dfaz_management_client.models.ManagedIntegrationRuntimeOperationResult
"""
_validation = {
'type': {'required': True},
'data_factory_name': {'readonly': True},
'state': {'readonly': True},
'create_time': {'readonly': True},
'nodes': {'readonly': True},
'other_errors': {'readonly': True},
'last_operation': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'data_factory_name': {'key': 'dataFactoryName', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'create_time': {'key': 'typeProperties.createTime', 'type': 'iso-8601'},
'nodes': {'key': 'typeProperties.nodes', 'type': '[ManagedIntegrationRuntimeNode]'},
'other_errors': {'key': 'typeProperties.otherErrors', 'type': '[ManagedIntegrationRuntimeError]'},
'last_operation': {'key': 'typeProperties.lastOperation', 'type': 'ManagedIntegrationRuntimeOperationResult'},
}
def __init__(
self,
**kwargs
):
super(ManagedIntegrationRuntimeStatus, self).__init__(**kwargs)
self.type = 'Managed' # type: str
self.create_time = None
self.nodes = None
self.other_errors = None
self.last_operation = None
class PackageStore(msrest.serialization.Model):
"""Package store for the SSIS integration runtime.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the package store.
:type name: str
:param package_store_linked_service: Required. The package store linked service reference.
:type package_store_linked_service: ~dfaz_management_client.models.EntityReference
"""
_validation = {
'name': {'required': True},
'package_store_linked_service': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'package_store_linked_service': {'key': 'packageStoreLinkedService', 'type': 'EntityReference'},
}
def __init__(
self,
**kwargs
):
super(PackageStore, self).__init__(**kwargs)
self.name = kwargs['name']
self.package_store_linked_service = kwargs['package_store_linked_service']
class PipelineReference(msrest.serialization.Model):
"""Pipeline reference type.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. Pipeline reference type. Default value: "PipelineReference".
:vartype type: str
:param reference_name: Required. Reference pipeline name.
:type reference_name: str
:param name: Reference name.
:type name: str
"""
_validation = {
'type': {'required': True, 'constant': True},
'reference_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
type = "PipelineReference"
def __init__(
self,
**kwargs
):
super(PipelineReference, self).__init__(**kwargs)
self.reference_name = kwargs['reference_name']
self.name = kwargs.get('name', None)
class RecurrenceSchedule(msrest.serialization.Model):
"""The recurrence schedule.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param minutes: The minutes.
:type minutes: list[int]
:param hours: The hours.
:type hours: list[int]
:param week_days: The days of the week.
:type week_days: list[str or ~dfaz_management_client.models.DaysOfWeek]
:param month_days: The month days.
:type month_days: list[int]
:param monthly_occurrences: The monthly occurrences.
:type monthly_occurrences: list[~dfaz_management_client.models.RecurrenceScheduleOccurrence]
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'minutes': {'key': 'minutes', 'type': '[int]'},
'hours': {'key': 'hours', 'type': '[int]'},
'week_days': {'key': 'weekDays', 'type': '[str]'},
'month_days': {'key': 'monthDays', 'type': '[int]'},
'monthly_occurrences': {'key': 'monthlyOccurrences', 'type': '[RecurrenceScheduleOccurrence]'},
}
def __init__(
self,
**kwargs
):
super(RecurrenceSchedule, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.minutes = kwargs.get('minutes', None)
self.hours = kwargs.get('hours', None)
self.week_days = kwargs.get('week_days', None)
self.month_days = kwargs.get('month_days', None)
self.monthly_occurrences = kwargs.get('monthly_occurrences', None)
class RecurrenceScheduleOccurrence(msrest.serialization.Model):
"""The recurrence schedule occurrence.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param day: The day of the week. Possible values include: "Sunday", "Monday", "Tuesday",
"Wednesday", "Thursday", "Friday", "Saturday".
:type day: str or ~dfaz_management_client.models.DayOfWeek
:param occurrence: The occurrence.
:type occurrence: int
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'day': {'key': 'day', 'type': 'str'},
'occurrence': {'key': 'occurrence', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RecurrenceScheduleOccurrence, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.day = kwargs.get('day', None)
self.occurrence = kwargs.get('occurrence', None)
class ReplicaSet(msrest.serialization.Model):
"""Replica Set Definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar replica_set_id: ReplicaSet Id.
:vartype replica_set_id: str
:param location: Virtual network location.
:type location: str
:ivar vnet_site_id: Virtual network site id.
:vartype vnet_site_id: str
:param subnet_id: The name of the virtual network that Domain Services will be deployed on. The
id of the subnet that Domain Services will be deployed on.
/virtualNetwork/vnetName/subnets/subnetName.
:type subnet_id: str
:ivar domain_controller_ip_address: List of Domain Controller IP Address.
:vartype domain_controller_ip_address: list[str]
:ivar external_access_ip_address: External access ip address.
:vartype external_access_ip_address: str
:ivar service_status: Status of Domain Service instance.
:vartype service_status: str
:ivar health_last_evaluated: Last domain evaluation run DateTime.
:vartype health_last_evaluated: ~datetime.datetime
:ivar health_monitors: List of Domain Health Monitors.
:vartype health_monitors: list[~dfaz_management_client.models.HealthMonitor]
:ivar health_alerts: List of Domain Health Alerts.
:vartype health_alerts: list[~dfaz_management_client.models.HealthAlert]
"""
_validation = {
'replica_set_id': {'readonly': True},
'vnet_site_id': {'readonly': True},
'domain_controller_ip_address': {'readonly': True},
'external_access_ip_address': {'readonly': True},
'service_status': {'readonly': True},
'health_last_evaluated': {'readonly': True},
'health_monitors': {'readonly': True},
'health_alerts': {'readonly': True},
}
_attribute_map = {
'replica_set_id': {'key': 'replicaSetId', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'vnet_site_id': {'key': 'vnetSiteId', 'type': 'str'},
'subnet_id': {'key': 'subnetId', 'type': 'str'},
'domain_controller_ip_address': {'key': 'domainControllerIpAddress', 'type': '[str]'},
'external_access_ip_address': {'key': 'externalAccessIpAddress', 'type': 'str'},
'service_status': {'key': 'serviceStatus', 'type': 'str'},
'health_last_evaluated': {'key': 'healthLastEvaluated', 'type': 'rfc-1123'},
'health_monitors': {'key': 'healthMonitors', 'type': '[HealthMonitor]'},
'health_alerts': {'key': 'healthAlerts', 'type': '[HealthAlert]'},
}
def __init__(
self,
**kwargs
):
super(ReplicaSet, self).__init__(**kwargs)
self.replica_set_id = None
self.location = kwargs.get('location', None)
self.vnet_site_id = None
self.subnet_id = kwargs.get('subnet_id', None)
self.domain_controller_ip_address = None
self.external_access_ip_address = None
self.service_status = None
self.health_last_evaluated = None
self.health_monitors = None
self.health_alerts = None
class RerunTumblingWindowTrigger(Trigger):
"""Trigger that schedules pipeline reruns for all fixed time interval windows from a requested start time to requested end time.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
:param parent_trigger: Required. The parent trigger reference.
:type parent_trigger: object
:param requested_start_time: Required. The start time for the time period for which restatement
is initiated. Only UTC time is currently supported.
:type requested_start_time: ~datetime.datetime
:param requested_end_time: Required. The end time for the time period for which restatement is
initiated. Only UTC time is currently supported.
:type requested_end_time: ~datetime.datetime
:param rerun_concurrency: Required. The max number of parallel time windows (ready for
execution) for which a rerun is triggered.
:type rerun_concurrency: int
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
'parent_trigger': {'required': True},
'requested_start_time': {'required': True},
'requested_end_time': {'required': True},
'rerun_concurrency': {'required': True, 'maximum': 50, 'minimum': 1},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'parent_trigger': {'key': 'typeProperties.parentTrigger', 'type': 'object'},
'requested_start_time': {'key': 'typeProperties.requestedStartTime', 'type': 'iso-8601'},
'requested_end_time': {'key': 'typeProperties.requestedEndTime', 'type': 'iso-8601'},
'rerun_concurrency': {'key': 'typeProperties.rerunConcurrency', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RerunTumblingWindowTrigger, self).__init__(**kwargs)
self.type = 'RerunTumblingWindowTrigger' # type: str
self.parent_trigger = kwargs['parent_trigger']
self.requested_start_time = kwargs['requested_start_time']
self.requested_end_time = kwargs['requested_end_time']
self.rerun_concurrency = kwargs['rerun_concurrency']
class RetryPolicy(msrest.serialization.Model):
"""Execution policy for an activity.
:param count: Maximum ordinary retry attempts. Default is 0. Type: integer (or Expression with
resultType integer), minimum: 0.
:type count: object
:param interval_in_seconds: Interval between retries in seconds. Default is 30.
:type interval_in_seconds: int
"""
_validation = {
'interval_in_seconds': {'maximum': 86400, 'minimum': 30},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'object'},
'interval_in_seconds': {'key': 'intervalInSeconds', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RetryPolicy, self).__init__(**kwargs)
self.count = kwargs.get('count', None)
self.interval_in_seconds = kwargs.get('interval_in_seconds', None)
class ScheduleTrigger(MultiplePipelineTrigger):
"""Trigger that creates pipeline runs periodically, on schedule.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
:param pipelines: Pipelines that need to be started.
:type pipelines: list[~dfaz_management_client.models.TriggerPipelineReference]
:param recurrence: Required. Recurrence schedule configuration.
:type recurrence: ~dfaz_management_client.models.ScheduleTriggerRecurrence
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
'recurrence': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'pipelines': {'key': 'pipelines', 'type': '[TriggerPipelineReference]'},
'recurrence': {'key': 'typeProperties.recurrence', 'type': 'ScheduleTriggerRecurrence'},
}
def __init__(
self,
**kwargs
):
super(ScheduleTrigger, self).__init__(**kwargs)
self.type = 'ScheduleTrigger' # type: str
self.recurrence = kwargs['recurrence']
class ScheduleTriggerRecurrence(msrest.serialization.Model):
"""The workflow trigger recurrence.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param frequency: The frequency. Possible values include: "NotSpecified", "Minute", "Hour",
"Day", "Week", "Month", "Year".
:type frequency: str or ~dfaz_management_client.models.RecurrenceFrequency
:param interval: The interval.
:type interval: int
:param start_time: The start time.
:type start_time: ~datetime.datetime
:param end_time: The end time.
:type end_time: ~datetime.datetime
:param time_zone: The time zone.
:type time_zone: str
:param schedule: The recurrence schedule.
:type schedule: ~dfaz_management_client.models.RecurrenceSchedule
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'frequency': {'key': 'frequency', 'type': 'str'},
'interval': {'key': 'interval', 'type': 'int'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
'schedule': {'key': 'schedule', 'type': 'RecurrenceSchedule'},
}
def __init__(
self,
**kwargs
):
super(ScheduleTriggerRecurrence, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.frequency = kwargs.get('frequency', None)
self.interval = kwargs.get('interval', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.time_zone = kwargs.get('time_zone', None)
self.schedule = kwargs.get('schedule', None)
class SecretBase(msrest.serialization.Model):
"""The base definition of a secret type.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SecureString.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of the secret.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
_subtype_map = {
'type': {'SecureString': 'SecureString'}
}
def __init__(
self,
**kwargs
):
super(SecretBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class SecureString(SecretBase):
"""Azure Data Factory secure string definition. The string value will be masked with asterisks '*' during Get or List API calls.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of the secret.Constant filled by server.
:type type: str
:param value: Required. Value of secure string.
:type value: str
"""
_validation = {
'type': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecureString, self).__init__(**kwargs)
self.type = 'SecureString' # type: str
self.value = kwargs['value']
class SelfDependencyTumblingWindowTriggerReference(DependencyReference):
"""Self referenced tumbling window trigger dependency.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of dependency reference.Constant filled by server.
:type type: str
:param offset: Required. Timespan applied to the start time of a tumbling window when
evaluating dependency.
:type offset: str
:param size: The size of the window when evaluating the dependency. If undefined the frequency
of the tumbling window will be used.
:type size: str
"""
_validation = {
'type': {'required': True},
'offset': {'required': True, 'max_length': 15, 'min_length': 8, 'pattern': r'-((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9]))'},
'size': {'max_length': 15, 'min_length': 8, 'pattern': r'((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9]))'},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'offset': {'key': 'offset', 'type': 'str'},
'size': {'key': 'size', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SelfDependencyTumblingWindowTriggerReference, self).__init__(**kwargs)
self.type = 'SelfDependencyTumblingWindowTriggerReference' # type: str
self.offset = kwargs['offset']
self.size = kwargs.get('size', None)
class SelfHostedIntegrationRuntime(IntegrationRuntime):
"""Self-hosted integration runtime.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Type of integration runtime.Constant filled by server. Possible values
include: "Managed", "SelfHosted".
:type type: str or ~dfaz_management_client.models.IntegrationRuntimeType
:param description: Integration runtime description.
:type description: str
:param linked_info: The base definition of a linked integration runtime.
:type linked_info: ~dfaz_management_client.models.LinkedIntegrationRuntimeType
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'linked_info': {'key': 'typeProperties.linkedInfo', 'type': 'LinkedIntegrationRuntimeType'},
}
def __init__(
self,
**kwargs
):
super(SelfHostedIntegrationRuntime, self).__init__(**kwargs)
self.type = 'SelfHosted' # type: str
self.linked_info = kwargs.get('linked_info', None)
class SelfHostedIntegrationRuntimeNode(msrest.serialization.Model):
"""Properties of Self-hosted integration runtime node.
Variables are only populated by the server, and will be ignored when sending a request.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:ivar node_name: Name of the integration runtime node.
:vartype node_name: str
:ivar machine_name: Machine name of the integration runtime node.
:vartype machine_name: str
:ivar host_service_uri: URI for the host machine of the integration runtime.
:vartype host_service_uri: str
:ivar status: Status of the integration runtime node. Possible values include:
"NeedRegistration", "Online", "Limited", "Offline", "Upgrading", "Initializing",
"InitializeFailed".
:vartype status: str or ~dfaz_management_client.models.SelfHostedIntegrationRuntimeNodeStatus
:ivar capabilities: The integration runtime capabilities dictionary.
:vartype capabilities: dict[str, str]
:ivar version_status: Status of the integration runtime node version.
:vartype version_status: str
:ivar version: Version of the integration runtime node.
:vartype version: str
:ivar register_time: The time at which the integration runtime node was registered in ISO8601
format.
:vartype register_time: ~datetime.datetime
:ivar last_connect_time: The most recent time at which the integration runtime was connected in
ISO8601 format.
:vartype last_connect_time: ~datetime.datetime
:ivar expiry_time: The time at which the integration runtime will expire in ISO8601 format.
:vartype expiry_time: ~datetime.datetime
:ivar last_start_time: The time the node last started up.
:vartype last_start_time: ~datetime.datetime
:ivar last_stop_time: The integration runtime node last stop time.
:vartype last_stop_time: ~datetime.datetime
:ivar last_update_result: The result of the last integration runtime node update. Possible
values include: "None", "Succeed", "Fail".
:vartype last_update_result: str or
~dfaz_management_client.models.IntegrationRuntimeUpdateResult
:ivar last_start_update_time: The last time for the integration runtime node update start.
:vartype last_start_update_time: ~datetime.datetime
:ivar last_end_update_time: The last time for the integration runtime node update end.
:vartype last_end_update_time: ~datetime.datetime
:ivar is_active_dispatcher: Indicates whether this node is the active dispatcher for
integration runtime requests.
:vartype is_active_dispatcher: bool
:ivar concurrent_jobs_limit: Maximum concurrent jobs on the integration runtime node.
:vartype concurrent_jobs_limit: int
:ivar max_concurrent_jobs: The maximum concurrent jobs in this integration runtime.
:vartype max_concurrent_jobs: int
"""
_validation = {
'node_name': {'readonly': True},
'machine_name': {'readonly': True},
'host_service_uri': {'readonly': True},
'status': {'readonly': True},
'capabilities': {'readonly': True},
'version_status': {'readonly': True},
'version': {'readonly': True},
'register_time': {'readonly': True},
'last_connect_time': {'readonly': True},
'expiry_time': {'readonly': True},
'last_start_time': {'readonly': True},
'last_stop_time': {'readonly': True},
'last_update_result': {'readonly': True},
'last_start_update_time': {'readonly': True},
'last_end_update_time': {'readonly': True},
'is_active_dispatcher': {'readonly': True},
'concurrent_jobs_limit': {'readonly': True},
'max_concurrent_jobs': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'node_name': {'key': 'nodeName', 'type': 'str'},
'machine_name': {'key': 'machineName', 'type': 'str'},
'host_service_uri': {'key': 'hostServiceUri', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'capabilities': {'key': 'capabilities', 'type': '{str}'},
'version_status': {'key': 'versionStatus', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'register_time': {'key': 'registerTime', 'type': 'iso-8601'},
'last_connect_time': {'key': 'lastConnectTime', 'type': 'iso-8601'},
'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'},
'last_start_time': {'key': 'lastStartTime', 'type': 'iso-8601'},
'last_stop_time': {'key': 'lastStopTime', 'type': 'iso-8601'},
'last_update_result': {'key': 'lastUpdateResult', 'type': 'str'},
'last_start_update_time': {'key': 'lastStartUpdateTime', 'type': 'iso-8601'},
'last_end_update_time': {'key': 'lastEndUpdateTime', 'type': 'iso-8601'},
'is_active_dispatcher': {'key': 'isActiveDispatcher', 'type': 'bool'},
'concurrent_jobs_limit': {'key': 'concurrentJobsLimit', 'type': 'int'},
'max_concurrent_jobs': {'key': 'maxConcurrentJobs', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(SelfHostedIntegrationRuntimeNode, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.node_name = None
self.machine_name = None
self.host_service_uri = None
self.status = None
self.capabilities = None
self.version_status = None
self.version = None
self.register_time = None
self.last_connect_time = None
self.expiry_time = None
self.last_start_time = None
self.last_stop_time = None
self.last_update_result = None
self.last_start_update_time = None
self.last_end_update_time = None
self.is_active_dispatcher = None
self.concurrent_jobs_limit = None
self.max_concurrent_jobs = None
class SelfHostedIntegrationRuntimeStatus(IntegrationRuntimeStatus):
"""Self-hosted integration runtime status.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Type of integration runtime.Constant filled by server. Possible values
include: "Managed", "SelfHosted".
:type type: str or ~dfaz_management_client.models.IntegrationRuntimeType
:ivar data_factory_name: The data factory name which the integration runtime belong to.
:vartype data_factory_name: str
:ivar state: The state of integration runtime. Possible values include: "Initial", "Stopped",
"Started", "Starting", "Stopping", "NeedRegistration", "Online", "Limited", "Offline",
"AccessDenied".
:vartype state: str or ~dfaz_management_client.models.IntegrationRuntimeState
:ivar create_time: The time at which the integration runtime was created, in ISO8601 format.
:vartype create_time: ~datetime.datetime
:ivar task_queue_id: The task queue id of the integration runtime.
:vartype task_queue_id: str
:ivar internal_channel_encryption: It is used to set the encryption mode for node-node
communication channel (when more than 2 self-hosted integration runtime nodes exist). Possible
values include: "NotSet", "SslEncrypted", "NotEncrypted".
:vartype internal_channel_encryption: str or
~dfaz_management_client.models.IntegrationRuntimeInternalChannelEncryptionMode
:ivar version: Version of the integration runtime.
:vartype version: str
:param nodes: The list of nodes for this integration runtime.
:type nodes: list[~dfaz_management_client.models.SelfHostedIntegrationRuntimeNode]
:ivar scheduled_update_date: The date at which the integration runtime will be scheduled to
update, in ISO8601 format.
:vartype scheduled_update_date: ~datetime.datetime
:ivar update_delay_offset: The time in the date scheduled by service to update the integration
runtime, e.g., PT03H is 3 hours.
:vartype update_delay_offset: str
:ivar local_time_zone_offset: The local time zone offset in hours.
:vartype local_time_zone_offset: str
:ivar capabilities: Object with additional information about integration runtime capabilities.
:vartype capabilities: dict[str, str]
:ivar service_urls: The URLs for the services used in integration runtime backend service.
:vartype service_urls: list[str]
:ivar auto_update: Whether Self-hosted integration runtime auto update has been turned on.
Possible values include: "On", "Off", "fakeValue1", "fakeValue2", "fakeValue3", "fakeValue4",
"fakeValue5", "fakeValue6".
:vartype auto_update: str or ~dfaz_management_client.models.IntegrationRuntimeAutoUpdate
:ivar version_status: Status of the integration runtime version.
:vartype version_status: str
:param links: The list of linked integration runtimes that are created to share with this
integration runtime.
:type links: list[~dfaz_management_client.models.LinkedIntegrationRuntime]
:ivar pushed_version: The version that the integration runtime is going to update to.
:vartype pushed_version: str
:ivar latest_version: The latest version on download center.
:vartype latest_version: str
:ivar auto_update_eta: The estimated time when the self-hosted integration runtime will be
updated.
:vartype auto_update_eta: ~datetime.datetime
"""
_validation = {
'type': {'required': True},
'data_factory_name': {'readonly': True},
'state': {'readonly': True},
'create_time': {'readonly': True},
'task_queue_id': {'readonly': True},
'internal_channel_encryption': {'readonly': True},
'version': {'readonly': True},
'scheduled_update_date': {'readonly': True},
'update_delay_offset': {'readonly': True},
'local_time_zone_offset': {'readonly': True},
'capabilities': {'readonly': True},
'service_urls': {'readonly': True},
'auto_update': {'readonly': True},
'version_status': {'readonly': True},
'pushed_version': {'readonly': True},
'latest_version': {'readonly': True},
'auto_update_eta': {'readonly': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'data_factory_name': {'key': 'dataFactoryName', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'create_time': {'key': 'typeProperties.createTime', 'type': 'iso-8601'},
'task_queue_id': {'key': 'typeProperties.taskQueueId', 'type': 'str'},
'internal_channel_encryption': {'key': 'typeProperties.internalChannelEncryption', 'type': 'str'},
'version': {'key': 'typeProperties.version', 'type': 'str'},
'nodes': {'key': 'typeProperties.nodes', 'type': '[SelfHostedIntegrationRuntimeNode]'},
'scheduled_update_date': {'key': 'typeProperties.scheduledUpdateDate', 'type': 'iso-8601'},
'update_delay_offset': {'key': 'typeProperties.updateDelayOffset', 'type': 'str'},
'local_time_zone_offset': {'key': 'typeProperties.localTimeZoneOffset', 'type': 'str'},
'capabilities': {'key': 'typeProperties.capabilities', 'type': '{str}'},
'service_urls': {'key': 'typeProperties.serviceUrls', 'type': '[str]'},
'auto_update': {'key': 'typeProperties.autoUpdate', 'type': 'str'},
'version_status': {'key': 'typeProperties.versionStatus', 'type': 'str'},
'links': {'key': 'typeProperties.links', 'type': '[LinkedIntegrationRuntime]'},
'pushed_version': {'key': 'typeProperties.pushedVersion', 'type': 'str'},
'latest_version': {'key': 'typeProperties.latestVersion', 'type': 'str'},
'auto_update_eta': {'key': 'typeProperties.autoUpdateETA', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SelfHostedIntegrationRuntimeStatus, self).__init__(**kwargs)
self.type = 'SelfHosted' # type: str
self.create_time = None
self.task_queue_id = None
self.internal_channel_encryption = None
self.version = None
self.nodes = kwargs.get('nodes', None)
self.scheduled_update_date = None
self.update_delay_offset = None
self.local_time_zone_offset = None
self.capabilities = None
self.service_urls = None
self.auto_update = None
self.version_status = None
self.links = kwargs.get('links', None)
self.pushed_version = None
self.latest_version = None
self.auto_update_eta = None
class SsisObjectMetadata(msrest.serialization.Model):
"""SSIS object metadata.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SsisEnvironment, SsisFolder, SsisPackage, SsisProject.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of metadata.Constant filled by server. Possible values include:
"Folder", "Project", "Package", "Environment".
:type type: str or ~dfaz_management_client.models.SsisObjectMetadataType
:param id: Metadata id.
:type id: long
:param name: Metadata name.
:type name: str
:param description: Metadata description.
:type description: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
_subtype_map = {
'type': {'Environment': 'SsisEnvironment', 'Folder': 'SsisFolder', 'Package': 'SsisPackage', 'Project': 'SsisProject'}
}
def __init__(
self,
**kwargs
):
super(SsisObjectMetadata, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.id = kwargs.get('id', None)
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
class SsisEnvironment(SsisObjectMetadata):
"""Ssis environment.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of metadata.Constant filled by server. Possible values include:
"Folder", "Project", "Package", "Environment".
:type type: str or ~dfaz_management_client.models.SsisObjectMetadataType
:param id: Metadata id.
:type id: long
:param name: Metadata name.
:type name: str
:param description: Metadata description.
:type description: str
:param folder_id: Folder id which contains environment.
:type folder_id: long
:param variables: Variable in environment.
:type variables: list[~dfaz_management_client.models.SsisVariable]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'folder_id': {'key': 'folderId', 'type': 'long'},
'variables': {'key': 'variables', 'type': '[SsisVariable]'},
}
def __init__(
self,
**kwargs
):
super(SsisEnvironment, self).__init__(**kwargs)
self.type = 'Environment' # type: str
self.folder_id = kwargs.get('folder_id', None)
self.variables = kwargs.get('variables', None)
class SsisEnvironmentReference(msrest.serialization.Model):
"""Ssis environment reference.
:param id: Environment reference id.
:type id: long
:param environment_folder_name: Environment folder name.
:type environment_folder_name: str
:param environment_name: Environment name.
:type environment_name: str
:param reference_type: Reference type.
:type reference_type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'long'},
'environment_folder_name': {'key': 'environmentFolderName', 'type': 'str'},
'environment_name': {'key': 'environmentName', 'type': 'str'},
'reference_type': {'key': 'referenceType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SsisEnvironmentReference, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.environment_folder_name = kwargs.get('environment_folder_name', None)
self.environment_name = kwargs.get('environment_name', None)
self.reference_type = kwargs.get('reference_type', None)
class SsisFolder(SsisObjectMetadata):
"""Ssis folder.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of metadata.Constant filled by server. Possible values include:
"Folder", "Project", "Package", "Environment".
:type type: str or ~dfaz_management_client.models.SsisObjectMetadataType
:param id: Metadata id.
:type id: long
:param name: Metadata name.
:type name: str
:param description: Metadata description.
:type description: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SsisFolder, self).__init__(**kwargs)
self.type = 'Folder' # type: str
class SsisObjectMetadataListResponse(msrest.serialization.Model):
"""A list of SSIS object metadata.
:param value: List of SSIS object metadata.
:type value: list[~dfaz_management_client.models.SsisObjectMetadata]
:param next_link: The link to the next page of results, if any remaining results exist.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[SsisObjectMetadata]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SsisObjectMetadataListResponse, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class SsisPackage(SsisObjectMetadata):
"""Ssis Package.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of metadata.Constant filled by server. Possible values include:
"Folder", "Project", "Package", "Environment".
:type type: str or ~dfaz_management_client.models.SsisObjectMetadataType
:param id: Metadata id.
:type id: long
:param name: Metadata name.
:type name: str
:param description: Metadata description.
:type description: str
:param folder_id: Folder id which contains package.
:type folder_id: long
:param project_version: Project version which contains package.
:type project_version: long
:param project_id: Project id which contains package.
:type project_id: long
:param parameters: Parameters in package.
:type parameters: list[~dfaz_management_client.models.SsisParameter]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'folder_id': {'key': 'folderId', 'type': 'long'},
'project_version': {'key': 'projectVersion', 'type': 'long'},
'project_id': {'key': 'projectId', 'type': 'long'},
'parameters': {'key': 'parameters', 'type': '[SsisParameter]'},
}
def __init__(
self,
**kwargs
):
super(SsisPackage, self).__init__(**kwargs)
self.type = 'Package' # type: str
self.folder_id = kwargs.get('folder_id', None)
self.project_version = kwargs.get('project_version', None)
self.project_id = kwargs.get('project_id', None)
self.parameters = kwargs.get('parameters', None)
class SsisParameter(msrest.serialization.Model):
"""Ssis parameter.
:param id: Parameter id.
:type id: long
:param name: Parameter name.
:type name: str
:param description: Parameter description.
:type description: str
:param data_type: Parameter type.
:type data_type: str
:param required: Whether parameter is required.
:type required: bool
:param sensitive: Whether parameter is sensitive.
:type sensitive: bool
:param design_default_value: Design default value of parameter.
:type design_default_value: str
:param default_value: Default value of parameter.
:type default_value: str
:param sensitive_default_value: Default sensitive value of parameter.
:type sensitive_default_value: str
:param value_type: Parameter value type.
:type value_type: str
:param value_set: Parameter value set.
:type value_set: bool
:param variable: Parameter reference variable.
:type variable: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'data_type': {'key': 'dataType', 'type': 'str'},
'required': {'key': 'required', 'type': 'bool'},
'sensitive': {'key': 'sensitive', 'type': 'bool'},
'design_default_value': {'key': 'designDefaultValue', 'type': 'str'},
'default_value': {'key': 'defaultValue', 'type': 'str'},
'sensitive_default_value': {'key': 'sensitiveDefaultValue', 'type': 'str'},
'value_type': {'key': 'valueType', 'type': 'str'},
'value_set': {'key': 'valueSet', 'type': 'bool'},
'variable': {'key': 'variable', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SsisParameter, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
self.data_type = kwargs.get('data_type', None)
self.required = kwargs.get('required', None)
self.sensitive = kwargs.get('sensitive', None)
self.design_default_value = kwargs.get('design_default_value', None)
self.default_value = kwargs.get('default_value', None)
self.sensitive_default_value = kwargs.get('sensitive_default_value', None)
self.value_type = kwargs.get('value_type', None)
self.value_set = kwargs.get('value_set', None)
self.variable = kwargs.get('variable', None)
class SsisProject(SsisObjectMetadata):
"""Ssis project.
All required parameters must be populated in order to send to Azure.
:param type: Required. Type of metadata.Constant filled by server. Possible values include:
"Folder", "Project", "Package", "Environment".
:type type: str or ~dfaz_management_client.models.SsisObjectMetadataType
:param id: Metadata id.
:type id: long
:param name: Metadata name.
:type name: str
:param description: Metadata description.
:type description: str
:param folder_id: Folder id which contains project.
:type folder_id: long
:param version: Project version.
:type version: long
:param environment_refs: Environment reference in project.
:type environment_refs: list[~dfaz_management_client.models.SsisEnvironmentReference]
:param parameters: Parameters in project.
:type parameters: list[~dfaz_management_client.models.SsisParameter]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'folder_id': {'key': 'folderId', 'type': 'long'},
'version': {'key': 'version', 'type': 'long'},
'environment_refs': {'key': 'environmentRefs', 'type': '[SsisEnvironmentReference]'},
'parameters': {'key': 'parameters', 'type': '[SsisParameter]'},
}
def __init__(
self,
**kwargs
):
super(SsisProject, self).__init__(**kwargs)
self.type = 'Project' # type: str
self.folder_id = kwargs.get('folder_id', None)
self.version = kwargs.get('version', None)
self.environment_refs = kwargs.get('environment_refs', None)
self.parameters = kwargs.get('parameters', None)
class SsisVariable(msrest.serialization.Model):
"""Ssis variable.
:param id: Variable id.
:type id: long
:param name: Variable name.
:type name: str
:param description: Variable description.
:type description: str
:param data_type: Variable type.
:type data_type: str
:param sensitive: Whether variable is sensitive.
:type sensitive: bool
:param value: Variable value.
:type value: str
:param sensitive_value: Variable sensitive value.
:type sensitive_value: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'data_type': {'key': 'dataType', 'type': 'str'},
'sensitive': {'key': 'sensitive', 'type': 'bool'},
'value': {'key': 'value', 'type': 'str'},
'sensitive_value': {'key': 'sensitiveValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SsisVariable, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
self.data_type = kwargs.get('data_type', None)
self.sensitive = kwargs.get('sensitive', None)
self.value = kwargs.get('value', None)
self.sensitive_value = kwargs.get('sensitive_value', None)
class TestJob(AutomlJob):
"""Automl Job definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param job_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "Command", "Sweep", "Labeling", "Pipeline", "Data", "AutoML".
:type job_type: str or ~dfaz_management_client.models.JobType
:ivar interaction_endpoints: Dictonary of endpoint URIs, keyed by enumerated JobEndpoints, can
be added, removed or updated.
:vartype interaction_endpoints: ~dfaz_management_client.models.JobBaseInteractionEndpoints
:param description: The asset description text.
:type description: str
:param tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:type tags: dict[str, str]
:param experiment_name: The name of the experiment the job belongs to. If not set, the job is
placed in the "Default" experiment.
:type experiment_name: str
:param compute_binding: Required. computeBinding of the job.
:type compute_binding: str
:param status: Status of the job. Possible values include: "NotStarted", "Starting",
"Provisioning", "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed",
"Failed", "Canceled", "NotResponding", "Paused".
:type status: str or ~dfaz_management_client.models.JobStatus
:param max_run_duration_seconds: The max run duration in seconds, ater which the job will be
cancelled.
:type max_run_duration_seconds: long
:param code_configuration: Required. Code Configuration of the job.
:type code_configuration: str
:param environment_id: Environment specification of the job.
:type environment_id: str
:param data_bindings: Mapping of data bindings used in the job.
:type data_bindings: object
:param distribution_configuration: Distributon configuration of the job. This should be one of
MpiConfiguration, TensorflowConfiguration, or PyTorchConfiguration.
:type distribution_configuration: object
:param run_type: Run type.
:type run_type: str
:param run_source: Run source would be used by services calling AutoML CreateParentRun,
if none is provided it would default to "AutoML"
This value would be used for RootAttribution.
:type run_source: str
:param num_iterations: Number of iterations.
:type num_iterations: int
:param training_type: Training type. Possible values include: "TrainFull", "TrainAndValidate",
"CrossValidate", "MeanCrossValidate".
:type training_type: str or ~dfaz_management_client.models.TrainingType
:param acquisition_function: Aquisition function. Possible values include: "EI", "PI", "UCB".
:type acquisition_function: str or ~dfaz_management_client.models.AcquisitionFunction
:param metrics: Optimization metrics.
:type metrics: list[str or ~dfaz_management_client.models.OptimizationMetric]
:param primary_metric: Primary optimization metric. Possible values include: "AUC_weighted",
"Accuracy", "Norm_macro_recall", "Average_precision_score_weighted",
"Precision_score_weighted", "Spearman_correlation", "Normalized_root_mean_squared_error",
"R2_score", "Normalized_mean_absolute_error", "Normalized_root_mean_squared_log_error",
"Mean_average_precision", "Iou".
:type primary_metric: str or ~dfaz_management_client.models.OptimizationMetric
:param train_split: Train split percentage.
:type train_split: float
:param acquisition_parameter: Aquisition parameter.
:type acquisition_parameter: float
:param num_cross_validation: Num cross validation.
:type num_cross_validation: int
:param target: Target.
:type target: str
:param aml_settings_json_string: AMLSettings Json string.
:type aml_settings_json_string: str
:param data_prep_json_string: Serialized DataPrep dataflow object.
:type data_prep_json_string: str
:param enable_subsampling: Enable subsampling.
:type enable_subsampling: bool
:param scenario: Which scenario is being used to mapping to a curated environment.
:type scenario: str
:param parent_run_id: The parent run id for the current parent run dto.
:type parent_run_id: str
:param test_type: Run type.
:type test_type: str
"""
_validation = {
'job_type': {'required': True},
'interaction_endpoints': {'readonly': True},
'compute_binding': {'required': True},
'code_configuration': {'required': True},
}
_attribute_map = {
'job_type': {'key': 'jobType', 'type': 'str'},
'interaction_endpoints': {'key': 'interactionEndpoints', 'type': 'JobBaseInteractionEndpoints'},
'description': {'key': 'description', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'experiment_name': {'key': 'experimentName', 'type': 'str'},
'compute_binding': {'key': 'computeBinding', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'max_run_duration_seconds': {'key': 'maxRunDurationSeconds', 'type': 'long'},
'code_configuration': {'key': 'codeConfiguration', 'type': 'str'},
'environment_id': {'key': 'environmentId', 'type': 'str'},
'data_bindings': {'key': 'dataBindings', 'type': 'object'},
'distribution_configuration': {'key': 'distributionConfiguration', 'type': 'object'},
'run_type': {'key': 'runType', 'type': 'str'},
'run_source': {'key': 'runSource', 'type': 'str'},
'num_iterations': {'key': 'numIterations', 'type': 'int'},
'training_type': {'key': 'trainingType', 'type': 'str'},
'acquisition_function': {'key': 'acquisitionFunction', 'type': 'str'},
'metrics': {'key': 'metrics', 'type': '[str]'},
'primary_metric': {'key': 'primaryMetric', 'type': 'str'},
'train_split': {'key': 'trainSplit', 'type': 'float'},
'acquisition_parameter': {'key': 'acquisitionParameter', 'type': 'float'},
'num_cross_validation': {'key': 'numCrossValidation', 'type': 'int'},
'target': {'key': 'target', 'type': 'str'},
'aml_settings_json_string': {'key': 'amlSettingsJsonString', 'type': 'str'},
'data_prep_json_string': {'key': 'dataPrepJsonString', 'type': 'str'},
'enable_subsampling': {'key': 'enableSubsampling', 'type': 'bool'},
'scenario': {'key': 'scenario', 'type': 'str'},
'parent_run_id': {'key': 'parentRunId', 'type': 'str'},
'test_type': {'key': 'testType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TestJob, self).__init__(**kwargs)
self.job_type = 'TestJob' # type: str
self.test_type = kwargs.get('test_type', None)
class TriggerDependencyReference(DependencyReference):
"""Trigger referenced dependency.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: TumblingWindowTriggerDependencyReference.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of dependency reference.Constant filled by server.
:type type: str
:param reference_trigger: Required. Referenced trigger.
:type reference_trigger: ~dfaz_management_client.models.TriggerReference
"""
_validation = {
'type': {'required': True},
'reference_trigger': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_trigger': {'key': 'referenceTrigger', 'type': 'TriggerReference'},
}
_subtype_map = {
'type': {'TumblingWindowTriggerDependencyReference': 'TumblingWindowTriggerDependencyReference'}
}
def __init__(
self,
**kwargs
):
super(TriggerDependencyReference, self).__init__(**kwargs)
self.type = 'TriggerDependencyReference' # type: str
self.reference_trigger = kwargs['reference_trigger']
class TriggerFilterParameters(msrest.serialization.Model):
"""Query parameters for triggers.
:param continuation_token: The continuation token for getting the next page of results. Null
for first page.
:type continuation_token: str
:param parent_trigger_name: The name of the parent TumblingWindowTrigger to get the child rerun
triggers.
:type parent_trigger_name: str
"""
_attribute_map = {
'continuation_token': {'key': 'continuationToken', 'type': 'str'},
'parent_trigger_name': {'key': 'parentTriggerName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TriggerFilterParameters, self).__init__(**kwargs)
self.continuation_token = kwargs.get('continuation_token', None)
self.parent_trigger_name = kwargs.get('parent_trigger_name', None)
class TriggerListResponse(msrest.serialization.Model):
"""A list of trigger resources.
All required parameters must be populated in order to send to Azure.
:param value: Required. List of triggers.
:type value: list[~dfaz_management_client.models.TriggerResource]
:param next_link: The link to the next page of results, if any remaining results exist.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[TriggerResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TriggerListResponse, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = kwargs.get('next_link', None)
class TriggerPipelineReference(msrest.serialization.Model):
"""Pipeline that needs to be triggered with the given parameters.
:param pipeline_reference: Pipeline reference.
:type pipeline_reference: ~dfaz_management_client.models.PipelineReference
:param parameters: Pipeline parameters.
:type parameters: dict[str, object]
"""
_attribute_map = {
'pipeline_reference': {'key': 'pipelineReference', 'type': 'PipelineReference'},
'parameters': {'key': 'parameters', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(TriggerPipelineReference, self).__init__(**kwargs)
self.pipeline_reference = kwargs.get('pipeline_reference', None)
self.parameters = kwargs.get('parameters', None)
class TriggerQueryResponse(msrest.serialization.Model):
"""A query of triggers.
All required parameters must be populated in order to send to Azure.
:param value: Required. List of triggers.
:type value: list[~dfaz_management_client.models.TriggerResource]
:param continuation_token: The continuation token for getting the next page of results, if any
remaining results exist, null otherwise.
:type continuation_token: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[TriggerResource]'},
'continuation_token': {'key': 'continuationToken', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TriggerQueryResponse, self).__init__(**kwargs)
self.value = kwargs['value']
self.continuation_token = kwargs.get('continuation_token', None)
class TriggerReference(msrest.serialization.Model):
"""Trigger reference type.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. Trigger reference type. Default value: "TriggerReference".
:vartype type: str
:param reference_name: Required. Reference trigger name.
:type reference_name: str
"""
_validation = {
'type': {'required': True, 'constant': True},
'reference_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
}
type = "TriggerReference"
def __init__(
self,
**kwargs
):
super(TriggerReference, self).__init__(**kwargs)
self.reference_name = kwargs['reference_name']
class TriggerResource(SubResource):
"""Trigger resource type.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar etag: Etag identifies change in the resource.
:vartype etag: str
:param properties: Required. Properties of the trigger.
:type properties: ~dfaz_management_client.models.Trigger
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'Trigger'},
}
def __init__(
self,
**kwargs
):
super(TriggerResource, self).__init__(**kwargs)
self.properties = kwargs['properties']
class TriggerSubscriptionOperationStatus(msrest.serialization.Model):
"""Defines the response of a trigger subscription operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar trigger_name: Trigger name.
:vartype trigger_name: str
:ivar status: Event Subscription Status. Possible values include: "Enabled", "Provisioning",
"Deprovisioning", "Disabled", "Unknown".
:vartype status: str or ~dfaz_management_client.models.EventSubscriptionStatus
"""
_validation = {
'trigger_name': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'trigger_name': {'key': 'triggerName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TriggerSubscriptionOperationStatus, self).__init__(**kwargs)
self.trigger_name = None
self.status = None
class TumblingWindowTrigger(Trigger):
"""Trigger that schedules pipeline runs for all fixed time interval windows from a start time without gaps and also supports backfill scenarios (when start time is in the past).
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Required. Trigger type.Constant filled by server.
:type type: str
:param description: Trigger description.
:type description: str
:ivar runtime_state: Indicates if trigger is running or not. Updated when Start/Stop APIs are
called on the Trigger. Possible values include: "Started", "Stopped", "Disabled".
:vartype runtime_state: str or ~dfaz_management_client.models.TriggerRuntimeState
:param annotations: List of tags that can be used for describing the trigger.
:type annotations: list[object]
:param pipeline: Required. Pipeline for which runs are created when an event is fired for
trigger window that is ready.
:type pipeline: ~dfaz_management_client.models.TriggerPipelineReference
:param frequency: Required. The frequency of the time windows. Possible values include:
"Minute", "Hour".
:type frequency: str or ~dfaz_management_client.models.TumblingWindowFrequency
:param interval: Required. The interval of the time windows. The minimum interval allowed is 15
Minutes.
:type interval: int
:param start_time: Required. The start time for the time period for the trigger during which
events are fired for windows that are ready. Only UTC time is currently supported.
:type start_time: ~datetime.datetime
:param end_time: The end time for the time period for the trigger during which events are fired
for windows that are ready. Only UTC time is currently supported.
:type end_time: ~datetime.datetime
:param delay: Specifies how long the trigger waits past due time before triggering new run. It
doesn't alter window start and end time. The default is 0. Type: string (or Expression with
resultType string), pattern: ((\d+).)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:type delay: object
:param max_concurrency: Required. The max number of parallel time windows (ready for execution)
for which a new run is triggered.
:type max_concurrency: int
:param retry_policy: Retry policy that will be applied for failed pipeline runs.
:type retry_policy: ~dfaz_management_client.models.RetryPolicy
:param depends_on: Triggers that this trigger depends on. Only tumbling window triggers are
supported.
:type depends_on: list[~dfaz_management_client.models.DependencyReference]
"""
_validation = {
'type': {'required': True},
'runtime_state': {'readonly': True},
'pipeline': {'required': True},
'frequency': {'required': True},
'interval': {'required': True},
'start_time': {'required': True},
'max_concurrency': {'required': True, 'maximum': 50, 'minimum': 1},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'runtime_state': {'key': 'runtimeState', 'type': 'str'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'pipeline': {'key': 'pipeline', 'type': 'TriggerPipelineReference'},
'frequency': {'key': 'typeProperties.frequency', 'type': 'str'},
'interval': {'key': 'typeProperties.interval', 'type': 'int'},
'start_time': {'key': 'typeProperties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'typeProperties.endTime', 'type': 'iso-8601'},
'delay': {'key': 'typeProperties.delay', 'type': 'object'},
'max_concurrency': {'key': 'typeProperties.maxConcurrency', 'type': 'int'},
'retry_policy': {'key': 'typeProperties.retryPolicy', 'type': 'RetryPolicy'},
'depends_on': {'key': 'typeProperties.dependsOn', 'type': '[DependencyReference]'},
}
def __init__(
self,
**kwargs
):
super(TumblingWindowTrigger, self).__init__(**kwargs)
self.type = 'TumblingWindowTrigger' # type: str
self.pipeline = kwargs['pipeline']
self.frequency = kwargs['frequency']
self.interval = kwargs['interval']
self.start_time = kwargs['start_time']
self.end_time = kwargs.get('end_time', None)
self.delay = kwargs.get('delay', None)
self.max_concurrency = kwargs['max_concurrency']
self.retry_policy = kwargs.get('retry_policy', None)
self.depends_on = kwargs.get('depends_on', None)
class TumblingWindowTriggerDependencyReference(TriggerDependencyReference):
"""Referenced tumbling window trigger dependency.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of dependency reference.Constant filled by server.
:type type: str
:param reference_trigger: Required. Referenced trigger.
:type reference_trigger: ~dfaz_management_client.models.TriggerReference
:param offset: Timespan applied to the start time of a tumbling window when evaluating
dependency.
:type offset: str
:param size: The size of the window when evaluating the dependency. If undefined the frequency
of the tumbling window will be used.
:type size: str
"""
_validation = {
'type': {'required': True},
'reference_trigger': {'required': True},
'offset': {'max_length': 15, 'min_length': 8, 'pattern': r'-?((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9]))'},
'size': {'max_length': 15, 'min_length': 8, 'pattern': r'((\d+)\.)?(\d\d):(60|([0-5][0-9])):(60|([0-5][0-9]))'},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'reference_trigger': {'key': 'referenceTrigger', 'type': 'TriggerReference'},
'offset': {'key': 'offset', 'type': 'str'},
'size': {'key': 'size', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TumblingWindowTriggerDependencyReference, self).__init__(**kwargs)
self.type = 'TumblingWindowTriggerDependencyReference' # type: str
self.offset = kwargs.get('offset', None)
self.size = kwargs.get('size', None)
class UpdateIntegrationRuntimeRequest(msrest.serialization.Model):
"""Update integration runtime request.
:param auto_update: Enables or disables the auto-update feature of the self-hosted integration
runtime. See https://go.microsoft.com/fwlink/?linkid=854189. Possible values include: "On",
"Off", "fakeValue1", "fakeValue2", "fakeValue3", "fakeValue4", "fakeValue5", "fakeValue6".
:type auto_update: str or ~dfaz_management_client.models.IntegrationRuntimeAutoUpdate
:param update_delay_offset: The time offset (in hours) in the day, e.g., PT03H is 3 hours. The
integration runtime auto update will happen on that time.
:type update_delay_offset: str
"""
_attribute_map = {
'auto_update': {'key': 'autoUpdate', 'type': 'str'},
'update_delay_offset': {'key': 'updateDelayOffset', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UpdateIntegrationRuntimeRequest, self).__init__(**kwargs)
self.auto_update = kwargs.get('auto_update', None)
self.update_delay_offset = kwargs.get('update_delay_offset', None)
class UserAccessPolicy(msrest.serialization.Model):
"""Get Data Plane read only token request definition.
:param permissions: The string with permissions for Data Plane access. Currently only 'r' is
supported which grants read only access.
:type permissions: str
:param access_resource_path: The resource path to get access relative to factory. Currently
only empty string is supported which corresponds to the factory resource.
:type access_resource_path: str
:param profile_name: The name of the profile. Currently only the default is supported. The
default value is DefaultProfile.
:type profile_name: str
:param start_time: Start time for the token. If not specified the current time will be used.
:type start_time: str
:param expire_time: Expiration time for the token. Maximum duration for the token is eight
hours and by default the token will expire in eight hours.
:type expire_time: str
"""
_attribute_map = {
'permissions': {'key': 'permissions', 'type': 'str'},
'access_resource_path': {'key': 'accessResourcePath', 'type': 'str'},
'profile_name': {'key': 'profileName', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'str'},
'expire_time': {'key': 'expireTime', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAccessPolicy, self).__init__(**kwargs)
self.permissions = kwargs.get('permissions', None)
self.access_resource_path = kwargs.get('access_resource_path', None)
self.profile_name = kwargs.get('profile_name', None)
self.start_time = kwargs.get('start_time', None)
self.expire_time = kwargs.get('expire_time', None)
| 39.827296 | 367 | 0.660159 |
7942edc7f4eb1edd116a8bd68f3f16c39c496e4e | 262 | py | Python | forecast/trial_hello.py | BlackPoint-CX/forecast | 1ea9b9abda8babadd67ad63c4aa8eaa024873d3e | [
"Apache-2.0"
] | 1 | 2018-11-08T03:38:22.000Z | 2018-11-08T03:38:22.000Z | forecast/trial_hello.py | BlackPoint-CX/forecast | 1ea9b9abda8babadd67ad63c4aa8eaa024873d3e | [
"Apache-2.0"
] | 4 | 2019-11-03T14:19:46.000Z | 2021-10-17T01:47:48.000Z | forecast/trial_hello.py | BlackPoint-CX/forecast | 1ea9b9abda8babadd67ad63c4aa8eaa024873d3e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
__author__ : BlackPoint-CX
__email__ : [email protected]
__file_name__ : trial_hello.py.py
__create_time__ : 2021/10/17
"""
def hello(someone):
result = "hello, {}".format(someone)
return result
| 18.714286 | 40 | 0.683206 |
7942eedabdefccdf324006ee12215e60f5cbfb33 | 1,786 | py | Python | sim_mat_analysis.py | Yao-Yao/Shazam | b1c4153880791812067d87b44ca3d21cf9bd58e4 | [
"MIT"
] | null | null | null | sim_mat_analysis.py | Yao-Yao/Shazam | b1c4153880791812067d87b44ca3d21cf9bd58e4 | [
"MIT"
] | null | null | null | sim_mat_analysis.py | Yao-Yao/Shazam | b1c4153880791812067d87b44ca3d21cf9bd58e4 | [
"MIT"
] | null | null | null | import numpy as np
from pprint import pprint as pp
class DisjointSet(object):
def __init__(self):
self.leader = {} # maps a member to the group's leader
self.group = {} # maps a group leader to the group (which is a set)
def add(self, a, b):
leadera = self.leader.get(a)
leaderb = self.leader.get(b)
if leadera is not None:
if leaderb is not None:
if leadera == leaderb: return # nothing to do
groupa = self.group[leadera]
groupb = self.group[leaderb]
if len(groupa) < len(groupb):
a, leadera, groupa, b, leaderb, groupb = b, leaderb, groupb, a, leadera, groupa
groupa |= groupb
del self.group[leaderb]
for k in groupb:
self.leader[k] = leadera
else:
self.group[leadera].add(b)
self.leader[b] = leadera
else:
if leaderb is not None:
self.group[leaderb].add(a)
self.leader[a] = leaderb
else:
self.leader[a] = self.leader[b] = a
self.group[a] = set([a, b])
simmat = np.genfromtxt('result.csv', delimiter=',')
print 'result:'
nrow, ncol = simmat.shape
print simmat.shape
print simmat
ds = DisjointSet()
selfsim=set()
s=set()
with np.errstate(invalid='ignore'):
np.set_printoptions(threshold=10000)
x, y = np.where(simmat > 0.8)
for i, px in enumerate(x):
py = y[i]
if py != px:
if (px, py) not in s:
print px, py, simmat[px][py]
s.add((py, px))
ds.add(px, py)
if py == px:
selfsim.add(px)
pp(ds.leader)
pp(ds.group)
print 'nan: ', set(range(0,nrow)) - selfsim
| 30.271186 | 99 | 0.530235 |
7942ef076a06e68a4f8fd6bf3534367a4c4d0b86 | 599 | py | Python | submissions/count-vowels-permutation/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | null | null | null | submissions/count-vowels-permutation/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | 1 | 2022-03-04T20:24:32.000Z | 2022-03-04T20:31:58.000Z | submissions/count-vowels-permutation/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | null | null | null | # https://leetcode.com/problems/count-vowels-permutation
from collections import defaultdict
class Solution:
def countVowelPermutation(self, n: int) -> int:
M = 10 ** 9 + 7
dp = {"a": 1, "e": 1, "i": 1, "o": 1, "u": 1}
for _ in range(n - 1):
new = defaultdict(int)
new["a"] = (dp["e"] + dp["i"] + dp["u"]) % M
new["e"] = (dp["a"] + dp["i"]) % M
new["i"] = (dp["e"] + dp["o"]) % M
new["o"] = dp["i"] % M
new["u"] = (dp["i"] + dp["o"]) % M
dp = new
return sum(dp.values()) % M
| 29.95 | 56 | 0.419032 |
7942ef0c72167341ead27ee499c92673fdc210c4 | 1,235 | py | Python | src/dumbgrepcli/__init__.py | arcturus140/dumbgrep | c4c1335a699d8fa9b5099be6553a248e25c18f39 | [
"MIT"
] | 4 | 2020-12-27T12:35:02.000Z | 2022-01-02T00:44:49.000Z | src/dumbgrepcli/__init__.py | arcturus140/dumbgrep | c4c1335a699d8fa9b5099be6553a248e25c18f39 | [
"MIT"
] | null | null | null | src/dumbgrepcli/__init__.py | arcturus140/dumbgrep | c4c1335a699d8fa9b5099be6553a248e25c18f39 | [
"MIT"
] | 1 | 2021-03-09T09:20:44.000Z | 2021-03-09T09:20:44.000Z | import argparse
import sys
import re
import math
def main():
parser = argparse.ArgumentParser(description="A replacement for grep.")
parser.add_argument("pattern", type=str, help="the pattern to search for")
parser.add_argument("-v", dest="invert", default=False, action="store_true", help="invert matches")
parser.add_argument("--max-count", "-m", type=int, default=math.inf, help="max number of matches to print")
args = parser.parse_args()
regex = re.compile(args.pattern)
matches = 0
for line in sys.stdin:
match = regex.search(line)
if args.invert != bool(match):
matches += 1
try:
sys.stdout.write(highlight(match, line))
except BrokenPipeError:
# Next process in pipe isn't accepting input
# anymore, let's stop.
break
if matches >= args.max_count:
break
def highlight(match, line):
if not match or not sys.stdout.isatty():
return line
return (line[:match.start()]
+ "\033[31m" # change to red
+ line[match.start():match.end()]
+ "\033[0m" # reset
+ line[match.end():])
if __name__ == "__main__":
main()
| 32.5 | 111 | 0.595142 |
7942ef9bbc5a2a1ae08f2edca627333d52e3398a | 9,765 | py | Python | sb2.py | dfm/maelstrom | 0085218a22400ae54d8295060489dc3a23f1282a | [
"MIT"
] | null | null | null | sb2.py | dfm/maelstrom | 0085218a22400ae54d8295060489dc3a23f1282a | [
"MIT"
] | null | null | null | sb2.py | dfm/maelstrom | 0085218a22400ae54d8295060489dc3a23f1282a | [
"MIT"
] | null | null | null |
# coding: utf-8
# In[1]
import numpy as np
import pandas as pd
import tensorflow as tf
import corner
import matplotlib.pyplot as plt
import hemcee
from hemcee.sampler import TFModel
from maelstrom.kepler import kepler
# In[2]
kicid=5709664 # PB1/SB1
rv = False
Hemcee = True
td=True
times, dmag = np.loadtxt("kic5709664_appended-msMAP_Q99_llc.txt",usecols=(0,1)).T
time_mid = (times[0] + times[-1]) / 2.
times -= time_mid
dmmags = dmag * 1000.
nu_arr = [19.44005582, 16.25960082, 22.55802495, 19.123847 , 27.87541656,
22.07540612]
if rv:
# Read in radial velocity data
rv_JD, rv_RV, rv_err = np.loadtxt('kic5709664b_JDrv.txt',delimiter=",", usecols=(0,1,2)).T
rv_JD -= time_mid
porb = 95.4
a1 = 114.
tp = -220
e = 0.483
varpi = 0.388
a1d = a1#/86400.0
# In[4]:
class BoundParam(object):
def __init__(self, name, value, min_value, max_value, dtype=tf.float64):
self.name = name
self.value = value
self.min_value = min_value
self.max_value = max_value
# Bound
self.param = tf.Variable(self.get_bounded_for_value(self.value, self.min_value, self.max_value), dtype=dtype, name=name + "_param")
self.var = self.min_value + (self.max_value - self.min_value) / (1.0 + tf.exp(-self.param))
self.log_jacobian = tf.log(self.var - self.min_value) + tf.log(self.max_value - self.var) - np.log(self.max_value - self.min_value)
# Add this to the log prior
self.log_prior = tf.reduce_sum(self.log_jacobian)
def get_bounded_for_value(self, value, min_val, max_val):
# Check if correct bounds
if np.any(value <= min_val) or np.any(value >= max_val):
raise ValueError("Value must be within the given bounds")
return np.log(value-min_val)-np.log(max_val-value)
def get_value_for_bounded(self,param):
return self.min_value + (self.max_value - self.min_value) / (1.0 + np.exp(-param))
# In[5]: Setup tensorflow variables with bounds
sess = tf.InteractiveSession()
T = tf.float64
# Unbound tensors
nu_tensor = tf.Variable(nu_arr, dtype=T)
# Bound tensors
porb_tensor = BoundParam('Porb', porb, 1, 500) # Orbital period
varpi_tensor = BoundParam('Varpi', varpi, 0, 5) # Angle of the ascending node
tp_tensor = BoundParam('t_p', tp, -1000, 0) # Time of periastron
e_tensor = BoundParam('e', e, 1e-10, 0.99) # Eccentricity
log_sigma2_tensor = BoundParam('log_sigma2', -1.14, -5, 5) # Known value
a1d_tensor = BoundParam('a_1d', a1d, -300, 300.) # Projected semimajor axis
if rv:
# Tensors specific to SB1/2
# Some notes on gammav:
# If gammav is specified as gammav/c then scipy fit will work fine
gammav_tensor = BoundParam('gammav',np.mean(rv_RV),-100,100)
log_rv_sigma2_tensor = BoundParam('logrv', 0., -0.1,0.1)
times_tensor = tf.placeholder(T, times.shape)
dmmags_tensor = tf.placeholder(T, dmmags.shape)
if td:
# Solve Kepler's equation
mean_anom = 2.0 * np.pi * (times_tensor - tp_tensor.var) / porb_tensor.var
ecc_anom = kepler(mean_anom, e_tensor.var)
true_anom = 2.0 * tf.atan2(tf.sqrt(1.0+e_tensor.var)*tf.tan(0.5*ecc_anom),tf.sqrt(1.0-e_tensor.var) + tf.zeros_like(times_tensor))
# Here we define how the time delay will be calculated:
tau_tensor = -(a1d_tensor.var / 86400) * (1.0 - tf.square(e_tensor.var)) * tf.sin(true_anom + varpi_tensor.var) / (1.0 + e_tensor.var*tf.cos(true_anom))
# And the design matrix:
arg_tensor = 2.0 * np.pi * nu_tensor[None, :] * (times_tensor - tau_tensor)[:, None]
D_tensor = tf.concat([tf.cos(arg_tensor), tf.sin(arg_tensor)], axis=1)
# Define the linear solve for W_hat:
DTD_tensor = tf.matmul(D_tensor, D_tensor, transpose_a=True)
DTy_tensor = tf.matmul(D_tensor, dmmags_tensor[:, None], transpose_a=True)
W_hat_tensor = tf.linalg.solve(DTD_tensor, DTy_tensor)
# Finally, the model and the chi^2 objective:
model_tensor = tf.squeeze(tf.matmul(D_tensor, W_hat_tensor)) # Removes dimensions of size 1 from the shape of a tensor.
# Sometimes faster with switched signs on log_sigma2 here:
chi2_tensor = tf.reduce_sum(tf.square(dmmags_tensor - model_tensor)) * tf.exp(-log_sigma2_tensor.var)
chi2_tensor += len(times) * (log_sigma2_tensor.var)
if rv:
# Equations specific to RV
rv_time_tensor = tf.placeholder(T)
rv_tensor = tf.placeholder(T)
rv_err_tensor = tf.placeholder(T)
# Solve Kepler's equation for the RVs
rv_mean_anom = 2.0 * np.pi * (rv_time_tensor - tp_tensor.var) / porb_tensor.var
rv_ecc_anom = kepler(rv_mean_anom, e_tensor.var)
rv_true_anom = 2.0 * tf.atan2(tf.sqrt(1.0+e_tensor.var)*tf.tan(0.5*rv_ecc_anom), tf.sqrt(1.0-e_tensor.var) + tf.zeros_like(rv_time_tensor))
# Here we define how the RV will be calculated:
vrad_tensor = -2.0 * np.pi * ((a1d_tensor.var /86400) / porb_tensor.var) * (1/tf.sqrt(1.0 - tf.square(e_tensor.var))) * (tf.cos(rv_true_anom + varpi_tensor.var) + e_tensor.var*tf.cos(varpi_tensor.var))
vrad_tensor *= 299792.458 # c in km/s
vrad_tensor += gammav_tensor.var
rv_sig2 = tf.square(rv_err_tensor) + tf.exp(log_rv_sigma2_tensor.var)
chi = tf.square(rv_tensor - vrad_tensor) / rv_sig2 + tf.log(rv_sig2)
if not td:
print("RESETTING CHI2")
chi2_tensor = tf.Variable(0., dtype=tf.float64)
chi2_tensor += tf.reduce_sum(chi)
init = tf.global_variables_initializer()
sess.run(init)
feed_dict = {
times_tensor: times,
dmmags_tensor: dmmags
}
var = [
porb_tensor,
varpi_tensor,
tp_tensor,
a1d_tensor,
e_tensor,
log_sigma2_tensor,
]
if rv:
feed_dict.update({
rv_time_tensor: rv_JD,
rv_tensor: rv_RV,
rv_err_tensor: rv_err,
})
var+=[
log_rv_sigma2_tensor,
gammav_tensor #
]
var_list = [tensors.param for tensors in var]
# In[8]:
if Hemcee:
# Calculate prior
log_prior = tf.constant(0.0, dtype=tf.float64)
# Add the jacobian to the prior
for tensor in var:
if tensor.log_prior is not None:
log_prior += tensor.log_prior
log_prob = - 0.5 * chi2_tensor + log_prior
model = TFModel(log_prob, var_list=var_list, feed_dict=feed_dict)
model.setup()
coords = model.current_vector()
metric = hemcee.metric.DenseMetric(np.eye(len(coords)))
step = hemcee.step_size.VariableStepSize()
sampler = hemcee.NoUTurnSampler(model.value, model.gradient, step_size=step, metric=metric)
# Burn-in
results = sampler.run_warmup(coords, 1500, tune_metric=True)
# Run the sampler
coords_chain, logprob_chain = sampler.run_mcmc(
results[0],
5000,
initial_log_prob=results[1],
var_names='',
plot=False, update_interval=100,
tune=False
)
plt.plot([coord[0] for coord in coords_chain])
plt.title('$P_{orb}$ trace')
for i,tensor in enumerate(var):
tensor.real = tensor.get_value_for_bounded(coords_chain[:,i])
ndim = len(coords)
var_real = [tensor.real for tensor in var]
figure = corner.corner(list(zip(*var_real)),
labels=[tensor.name for tensor in var],
#quantiles=[0.16, 0.5, 0.84],
show_titles=True, title_kwargs={"fontsize": 12})
true_vars = [porb, varpi, tp, e, 0, a1d]
true_vars = [tensor.value for tensor in var]
sample_vars = [np.median(tensor.real) for tensor in var]
axes = np.array(figure.axes).reshape((ndim, ndim))
for i in range(len(var_list)):
ax = axes[i, i]
ax.axvline(true_vars[i], color="b")
ax.axvline(sample_vars[i], color="r")
for yi in range(len(var_list)):
for xi in range(yi):
ax = axes[yi, xi]
ax.axvline(sample_vars[xi], color="r")
ax.axvline(true_vars[xi], color="b")
ax.axhline(sample_vars[yi], color="r")
ax.axhline(true_vars[yi], color="b")
if rv:
fig = plt.figure()
rv_phi_test = np.sort(np.linspace(0, np.mean(porb_tensor.real), 5000) % np.mean(porb_tensor.real))
vrad_test = sess.run(vrad_tensor, feed_dict={rv_time_tensor: rv_phi_test})
plt.errorbar((rv_JD % np.mean(porb_tensor.real))/np.mean(porb_tensor.real),rv_RV,rv_err,fmt=".",label='RV obs')
plt.plot(rv_phi_test/np.mean(porb_tensor.real), vrad_test,label='RV th')
plt.xlabel("Orbital phase")
plt.ylabel("RV (km/s)")
plt.legend()
plt.show()
else:
# Use Scipy to minimise
for i in var:
print(i.name, ":", i.value, ':', i.get_value_for_bounded(sess.run(i.param)))
opt = tf.contrib.opt.ScipyOptimizerInterface(chi2_tensor, var_list=var_list)
for i in range(10):
opt.minimize(sess, feed_dict=feed_dict)
for i,tensor in enumerate(var):
tensor.real = tensor.get_value_for_bounded(sess.run(tensor.param))
for i in var:
print(i.name, ":", np.round(i.value,5), ':', i.get_value_for_bounded(sess.run(i.param)))
if rv:
rv_phi_test = np.sort(np.linspace(0, porb_tensor.real, 5000) % porb_tensor.real)
vrad_test = sess.run(vrad_tensor, feed_dict={rv_time_tensor: rv_phi_test})
plt.errorbar((rv_JD % porb_tensor.real)/porb_tensor.real,rv_RV,rv_err,fmt=".",label='RV obs')
plt.plot(rv_phi_test/porb_tensor.real, vrad_test,label='RV th')
plt.xlabel("Orbital phase")
plt.ylabel("RV (km/s)")
plt.legend()
plt.show()
#sess.close()
tf.InteractiveSession.close(sess) | 35.509091 | 205 | 0.643011 |
7942f016b7713610e44456de967c334ea5640fa8 | 5,886 | py | Python | utils/dataset.py | vuiseng9/CalibTIP | 69077c92611b079234706784c344e8c9156f3283 | [
"MIT"
] | 61 | 2020-06-14T20:00:05.000Z | 2022-03-08T10:18:41.000Z | utils/dataset.py | vuiseng9/CalibTIP | 69077c92611b079234706784c344e8c9156f3283 | [
"MIT"
] | 5 | 2020-09-01T06:53:41.000Z | 2022-01-28T07:40:45.000Z | utils/dataset.py | vuiseng9/CalibTIP | 69077c92611b079234706784c344e8c9156f3283 | [
"MIT"
] | 18 | 2020-06-28T10:30:58.000Z | 2022-02-28T02:04:15.000Z | from io import BytesIO
import pickle
import PIL
import torch
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler, RandomSampler, BatchSampler, _int_classes
from numpy.random import choice
class RandomSamplerReplacment(torch.utils.data.sampler.Sampler):
"""Samples elements randomly, with replacement.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source):
self.num_samples = len(data_source)
def __iter__(self):
return iter(torch.from_numpy(choice(self.num_samples, self.num_samples, replace=True)))
def __len__(self):
return self.num_samples
class LimitDataset(Dataset):
def __init__(self, dset, max_len):
self.dset = dset
self.max_len = max_len
def __len__(self):
return min(len(self.dset), self.max_len)
def __getitem__(self, index):
return self.dset[index]
class ByClassDataset(Dataset):
def __init__(self, ds):
self.dataset = ds
self.idx_by_class = {}
for idx, (_, c) in enumerate(ds):
self.idx_by_class.setdefault(c, [])
self.idx_by_class[c].append(idx)
def __len__(self):
return min([len(d) for d in self.idx_by_class.values()])
def __getitem__(self, idx):
idx_per_class = [self.idx_by_class[c][idx]
for c in range(len(self.idx_by_class))]
labels = torch.LongTensor([self.dataset[i][1]
for i in idx_per_class])
items = [self.dataset[i][0] for i in idx_per_class]
if torch.is_tensor(items[0]):
items = torch.stack(items)
return (items, labels)
class IdxDataset(Dataset):
"""docstring for IdxDataset."""
def __init__(self, dset):
super(IdxDataset, self).__init__()
self.dset = dset
self.idxs = range(len(self.dset))
def __getitem__(self, idx):
data, labels = self.dset[self.idxs[idx]]
return (idx, data, labels)
def __len__(self):
return len(self.idxs)
def image_loader(imagebytes):
img = PIL.Image.open(BytesIO(imagebytes))
return img.convert('RGB')
class IndexedFileDataset(Dataset):
""" A dataset that consists of an indexed file (with sample offsets in
another file). For example, a .tar that contains image files.
The dataset does not extract the samples, but works with the indexed
file directly.
NOTE: The index file is assumed to be a pickled list of 3-tuples:
(name, offset, size).
"""
def __init__(self, filename, index_filename=None, extract_target_fn=None,
transform=None, target_transform=None, loader=image_loader):
super(IndexedFileDataset, self).__init__()
# Defaults
if index_filename is None:
index_filename = filename + '.index'
if extract_target_fn is None:
extract_target_fn = lambda *args: args
# Read index
with open(index_filename, 'rb') as index_fp:
sample_list = pickle.load(index_fp)
# Collect unique targets (sorted by name)
targetset = set(extract_target_fn(target) for target, _, _ in sample_list)
targetmap = {target: i for i, target in enumerate(sorted(targetset))}
self.samples = [(targetmap[extract_target_fn(target)], offset, size)
for target, offset, size in sample_list]
self.filename = filename
self.loader = loader
self.transform = transform
self.target_transform = target_transform
def _get_sample(self, fp, idx):
target, offset, size = self.samples[idx]
fp.seek(offset)
sample = self.loader(fp.read(size))
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __getitem__(self, index):
with open(self.filename, 'rb') as fp:
# Handle slices
if isinstance(index, slice):
return [self._get_sample(fp, subidx) for subidx in
range(index.start or 0, index.stop or len(self),
index.step or 1)]
return self._get_sample(fp, index)
def __len__(self):
return len(self.samples)
class DuplicateBatchSampler(Sampler):
def __init__(self, sampler, batch_size, duplicates, drop_last):
if not isinstance(sampler, Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
self.duplicates = duplicates
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx)
if len(batch) == self.batch_size:
yield batch * self.duplicates
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch * self.duplicates
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
| 33.634286 | 95 | 0.61315 |
7942f0a2def8cbf8f88884300b3dad076af11c8a | 284 | py | Python | mmdet/core/bbox/iou_calculators/__init__.py | cameronchoi/r3det-docker | 30af0b845b2baa4dcb7dccdc4ccd3238fd72bf75 | [
"Apache-2.0"
] | 176 | 2020-06-18T12:35:30.000Z | 2022-03-28T02:20:57.000Z | mmdet/core/bbox/iou_calculators/__init__.py | cameronchoi/r3det-docker | 30af0b845b2baa4dcb7dccdc4ccd3238fd72bf75 | [
"Apache-2.0"
] | 35 | 2020-06-28T07:03:24.000Z | 2022-01-09T01:20:46.000Z | mmdet/core/bbox/iou_calculators/__init__.py | cameronchoi/r3det-docker | 30af0b845b2baa4dcb7dccdc4ccd3238fd72bf75 | [
"Apache-2.0"
] | 44 | 2020-06-20T07:51:01.000Z | 2022-02-10T01:17:54.000Z | from .builder import build_iou_calculator
from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps
from .riou2d_calculator import RBboxOverlaps2D, rbbox_overlaps
__all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps',
'RBboxOverlaps2D', 'rbbox_overlaps']
| 40.571429 | 69 | 0.806338 |
7942f11ab1ed85d39a681ef25a53e1f6a59eb22b | 5,492 | py | Python | 1DBedEvolutionModel/SWE-Bed-Exner.py | nakamori1024/WRR | d6595cb5559c76db58dce8c0ae26d2a6c38fe480 | [
"MIT"
] | null | null | null | 1DBedEvolutionModel/SWE-Bed-Exner.py | nakamori1024/WRR | d6595cb5559c76db58dce8c0ae26d2a6c38fe480 | [
"MIT"
] | null | null | null | 1DBedEvolutionModel/SWE-Bed-Exner.py | nakamori1024/WRR | d6595cb5559c76db58dce8c0ae26d2a6c38fe480 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 24 16:13:50 2019
@author: river801
"""
from numba import njit, jit, f8
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import time
import yaml
@jit
def momentum2(h,eta,u,wu,q,g,rdx,dt,snm,nx):
for i in np.arange(1,nx):
h_bar = (h[i]+h[i+1])*0.5
pressure = -g*(-h[i]+h[i+1])*rdx-g*(-eta[i]+eta[i+1])*rdx
roughness = g*snm**2.0*u[i]/h_bar**(4.0/3.0)
advection = u[i]*(-u[i-1]+u[i])*rdx
wu[i] = (u[i]+(-advection+pressure)*dt)/(1.0+roughness*dt)
q[i] = wu[i]*h_bar
wu[nx] = wu[nx-1]
q[nx] = q[nx-1]
return wu, q
@jit
def continuity2(h,q,rdx,dt,nx):
for i in np.arange(1,nx):
h[i] = h[i]-(-q[i-1]+q[i])*rdx*dt
h[nx] = h[nx-1]
return h
@jit
def bedload(h,u,qb,qbin,snm,spec,diam,tsc,g,nx):
for i in np.arange(1,nx):
h_bar = (h[i]+h[i+1])*0.5
ts = (snm*u[i])**2./(spec*diam*h_bar**(1./3.))
if ts>tsc:
qb[i] = 8.0*(ts-tsc)**1.5*(spec*g*diam**3.)**0.5
else:
qb[i] = 0.
qb[ 0] = qbin
qb[nx] = qb[nx-1]
return qb
@jit
def exner(eta,qb,rdx,dt,poro,nx):
for i in np.arange(1,nx+1):
eta[i] = eta[i]-(-qb[i-1]+qb[i])*rdx*dt/(1.-poro)
return eta
def main(args):
#set config
with open(args[1], 'r') as yml:
config = yaml.load(yml, Loader=yaml.FullLoader)
# Setting the river geometry and model parameters
g = config['g']
# nu = config['nu']
q_min = config['q_min'] # minimun discharge m3/s
q_max = config['q_max'] # maximum discharge m3/s
t_q = config['t_q']*3600. # time from min discharge to maximum discharge (sec)
chlen = config['chlen'] # length of river reach (m)
wid = config['wid'] # channel width (m)
snm = config['snm'] # mannings roughness coefs
ib = config['ib'] # bed slope
spec = config['spec']
diam = config['diam'] # sediment diameter
tsc = config['tsc']
poro = config['poro']
qbin = config['qbin'] #sediment supply m2/s
i_vis = config['i_vis']
tuk = 3600.*config['output_interval']
etime = t_q*2.*config['hyd_cycle']
nx = config['nx'] # number of grid
dx = chlen/float(nx) # size of grid
dt = config['dt'] # computational time step (sec)
rdx = 1./dx
x = np.zeros(nx+1)
z = np.zeros(nx+1)
h = np.zeros(nx+1)
u = np.zeros(nx+1)
wu = np.zeros(nx+1)
q = np.zeros(nx+1)
hh = np.zeros(nx+1)
xc = np.zeros(nx+1)
eta = np.zeros(nx+1)
qb = np.zeros(nx+1)
dzdx = np.zeros(nx+1)
x[0] = 0.
for i in np.arange(1,nx+1):
x[i] = x[i-1]+dx
z00 = chlen*ib
z[0] = z00
for i in np.arange(1,nx+1):
z[i] = z[i-1]-ib*dx
for i in np.arange(1,nx+1):
eta[i] = (z[i-1]+z[i])*0.5
xc[i] = (x[i-1]+x[i])*0.5
h0 = (snm*q_min/(wid*ib**0.5))**0.6
hmax = (snm*q_max/(wid*ib**0.5))**0.6
h[:] = h0
u[:] = q_min/(h0*wid)
q[:] = q_min/wid
tt = 0.
t_hyd = 0.
optime = 0.
n = 0
t0 = time.time()
while tt<etime:
if t_hyd<t_q:
qt = q_min+(q_max-q_min)/t_q*t_hyd
else:
qt = q_max-(q_max-q_min)/t_q*(t_hyd-t_q)
h[0] = (snm*qt/(wid*ib**0.5))**0.6
u[0] = qt/(h[0]*wid)
q[0] = qt/wid
momentum2(h,eta,u,wu,q,g,rdx,dt,snm,nx)
continuity2(h,q,rdx,dt,nx)
u = wu
bedload(h,u,qb,qbin,snm,spec,diam,tsc,g,nx)
exner(eta,qb,rdx,dt,poro,nx)
if optime>tuk:
print("Time= ",tt/3600)
optime = optime-tuk
hh = h+eta
plt.xlim([0,chlen])
plt.xlabel( "Downstream distance (m)" )
if i_vis==0:
plt.ylim([0,z00+hmax*10.])
plt.plot(xc[1:nx],eta[1:nx],color='k',label='Bed surface')
plt.plot(xc[1:nx],hh[1:nx],color='b',label='Water surface')
plt.ylabel( "Elevation (m)" )
else:
plt.ylim([0.5,1.5])
dzdx[1:nx] = -(-eta[1:nx]+eta[2:nx+1])*rdx/ib
plt.plot(xc[1:nx],dzdx[1:nx],color='b',label='slope')
plt.ylabel( "slope/initial slope" )
plt.legend()
nnn = str(n)
plt.savefig('fig/Figure' + nnn.zfill(4) +".jpg", dpi=300)
plt.close()
n += 1
optime+=dt
tt+=dt
t_hyd+=dt
if t_hyd>2.*t_q:
t_hyd = t_hyd-2.*t_q
t1 = time.time()
print("Computational time= ",t1-t0)
# nx=100 t=100
# jit 19 sec
# normal 270 sec
# normal 319 sec
# nx=100 t=300
# jit 53 sec
# normal 806 sec
#--------------------------------------------------
# root
#--------------------------------------------------
if __name__ == '__main__':
import sys
# set workiing directly and file name
args = sys.argv
main(args) | 23.774892 | 87 | 0.446286 |
7942f1f89662ebaeca07d3789f4f97cc18f4c019 | 225 | py | Python | iiif_store/urls/public.py | digirati-co-uk/drf-iiif-store | e7053ea197f6324a7efcb23aff67246634c84841 | [
"MIT"
] | null | null | null | iiif_store/urls/public.py | digirati-co-uk/drf-iiif-store | e7053ea197f6324a7efcb23aff67246634c84841 | [
"MIT"
] | null | null | null | iiif_store/urls/public.py | digirati-co-uk/drf-iiif-store | e7053ea197f6324a7efcb23aff67246634c84841 | [
"MIT"
] | null | null | null | from rest_framework import routers
from ..views import (
IIIFResourcePublicViewSet,
)
app_name = "iiif_store"
router = routers.SimpleRouter()
router.register("iiif", IIIFResourcePublicViewSet)
urlpatterns = router.urls
| 20.454545 | 50 | 0.791111 |
7942f40668fb0f6f17d1943732208bc8525f2fa3 | 3,324 | py | Python | project_cl/tools/visualization.py | zhyever/SimIPU | 5b346e392c161a5e9fdde09b1692656bc7cd3faf | [
"Apache-2.0"
] | 29 | 2021-09-29T13:31:12.000Z | 2022-03-15T13:31:25.000Z | project_cl/tools/visualization.py | zhyever/SimIPU | 5b346e392c161a5e9fdde09b1692656bc7cd3faf | [
"Apache-2.0"
] | 3 | 2021-12-13T01:21:12.000Z | 2022-02-24T01:46:14.000Z | project_cl/tools/visualization.py | zhyever/SimIPU | 5b346e392c161a5e9fdde09b1692656bc7cd3faf | [
"Apache-2.0"
] | 1 | 2021-12-03T08:39:18.000Z | 2021-12-03T08:39:18.000Z |
import random
from .calib_utils import Calibration, get_lidar_in_image_fov, draw_lidar, show_lidar_on_image
import torch
import cv2
from PIL import Image
import time
class Frustum_Region(object):
def __init__(self, wr_max=0.7, wr_min=0.4, hr_max=1.0, hr_min=0.4, th=0.7, p=0.5):
super(Frustum_Region, self).__init__()
self.wr_max=wr_max
self.wr_min=wr_min
self.hr_max=hr_max
self.hr_min=hr_min
self.th=th
self.p=p
def __call__(self, input_dict):
# if random.random()>self.p: # p=0.5 more opportunities to global views
# return input_dict
idx =input_dict['sample_idx']
ori_h,ori_w = (input_dict['ori_shape'][:2]
if 'ori_shape' in input_dict.keys() else 1)
img = input_dict['img']
points = input_dict['points'].tensor
calib = Calibration(input_dict['P2'],input_dict['Trv2c'],input_dict['rect'])
img_scale_factor = (
input_dict['scale_factor'][:2]
if 'scale_factor' in input_dict.keys() else 1)
print("Check", points.shape)
img_scale_factor = (1)
# random select 2d region
h, w = img.shape[:2]
region_w = int(random.uniform(self.wr_min, self.wr_max)* w)
region_h = int(random.uniform(self.hr_min, self.hr_max)* h)
x1 = random.randint(0, w-region_w)
y1 = random.randint(max(0, int(self.th*h-region_h)), h-region_h) # mainly focus on bottom regions
x2,y2 = x1+region_w, y1+region_h
# get Frustum 3D Region
# option1
points = points.numpy()
_, pc_image_coord, img_fov_inds = get_lidar_in_image_fov(points[:,0:3],
calib, 0, 0, ori_w, ori_h, True)
# # option2
# # project points from velo coordinate to camera coordinate
# num_points = points.shape[0]
# pts_4d = torch.cat([points[:,0:3], points.new_ones(size=(num_points, 1))], dim=-1)
# pts_2d = pts_4d @ torch.tensor(input_dict['lidar2img']).t()
# # cam_points is Tensor of Nx4 whose last column is 1
# # transform camera coordinate to image coordinate
# pts_2d[:, 2] = torch.clamp(pts_2d[:, 2], min=1e-5)
# pts_2d[:, 0] /= pts_2d[:, 2] # normalization
# pts_2d[:, 1] /= pts_2d[:, 2] # normalization
# pc_image_coord = pts_2d
# Filter
pc_image_coord = pc_image_coord[:, 0:2] * img_scale_factor
box_fov_inds = (pc_image_coord[:,0]<x2) & \
(pc_image_coord[:,0]>=x1) & \
(pc_image_coord[:,1]<y2) & \
(pc_image_coord[:,1]>=y1)
pc_in_box_fov = points[box_fov_inds]
input_dict['img'] = img[y1:y2,x1:x2]
input_dict['points'].tensor = torch.tensor(pc_in_box_fov)
print("P", points[:3,:])
# visualize
# draw_lidar(points, color=None, fig=None, bgcolor=(0,0,0), pts_scale=1, pts_mode='point', pts_color=None)
# show_lidar_on_image(pc_in_box_fov[:,0:3], img.copy(), calib, ori_w, ori_h, img_scale_factor, 'lidar_in_region_{}'.format(idx), region=(x1,y1,x2,y2))
show_lidar_on_image(points[:,0:3], img.copy(), calib, ori_w, ori_h, img_scale_factor, 'lidar_in_image_{}'.format(idx))
# time.sleep(30)
# print(hhh)
return input_dict | 42.075949 | 159 | 0.606197 |
7942f4680262e8dda246fc6c0368dd0cf0acabb3 | 325 | py | Python | euler/py/euler-0004.py | kerkeslager/sandbox | 45ec9c36ab7241cee93e615b3c901b5b80aa7aff | [
"MIT"
] | null | null | null | euler/py/euler-0004.py | kerkeslager/sandbox | 45ec9c36ab7241cee93e615b3c901b5b80aa7aff | [
"MIT"
] | null | null | null | euler/py/euler-0004.py | kerkeslager/sandbox | 45ec9c36ab7241cee93e615b3c901b5b80aa7aff | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from euler import *
import itertools
def products_of_three_digit_numbers():
three_digit_pairs = itertools.combinations(numbers_with_digits(3),2)
return itertools.imap(product, three_digit_pairs)
result = max(itertools.ifilter(is_palindromic, products_of_three_digit_numbers()))
print result
| 25 | 82 | 0.806154 |
7942f5250ad89e274539e5d59f06eeb2a9e3aa66 | 1,971 | py | Python | nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py | moloney/nipype | a7a9c85c79cb1412ba03406074f83200447ef50b | [
"Apache-2.0"
] | 7 | 2017-02-17T08:54:26.000Z | 2022-03-10T20:57:23.000Z | nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py | moloney/nipype | a7a9c85c79cb1412ba03406074f83200447ef50b | [
"Apache-2.0"
] | 1 | 2016-04-25T15:07:09.000Z | 2016-04-25T15:07:09.000Z | nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py | moloney/nipype | a7a9c85c79cb1412ba03406074f83200447ef50b | [
"Apache-2.0"
] | 2 | 2017-09-23T16:22:00.000Z | 2019-08-01T14:18:52.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..model import SphericalAverage
def test_SphericalAverage_inputs():
input_map = dict(
args=dict(argstr='%s', ),
environ=dict(
nohash=True,
usedefault=True,
),
erode=dict(argstr='-erode %d', ),
fname=dict(
argstr='%s',
mandatory=True,
position=-5,
),
hemisphere=dict(
argstr='%s',
mandatory=True,
position=-4,
),
ignore_exception=dict(
deprecated='1.0.0',
nohash=True,
usedefault=True,
),
in_average=dict(
argstr='%s',
genfile=True,
position=-2,
),
in_orig=dict(argstr='-orig %s', ),
in_surf=dict(
argstr='%s',
mandatory=True,
position=-3,
),
out_file=dict(
argstr='%s',
genfile=True,
position=-1,
),
subject_id=dict(
argstr='-o %s',
mandatory=True,
),
subjects_dir=dict(),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
threshold=dict(argstr='-t %.1f', ),
which=dict(
argstr='%s',
mandatory=True,
position=-6,
),
)
inputs = SphericalAverage.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_SphericalAverage_outputs():
output_map = dict(out_file=dict(), )
outputs = SphericalAverage.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 27 | 67 | 0.506342 |
7942f571b962976573d03d18224952f70b985725 | 358 | py | Python | tests/cuda.pkg/extension.py | avalentino/pyre | 7e1f0287eb7eba1c6d1ef385e5160079283ac363 | [
"BSD-3-Clause"
] | 25 | 2018-04-23T01:45:39.000Z | 2021-12-10T06:01:23.000Z | tests/cuda.pkg/extension.py | avalentino/pyre | 7e1f0287eb7eba1c6d1ef385e5160079283ac363 | [
"BSD-3-Clause"
] | 53 | 2018-05-31T04:55:00.000Z | 2021-10-07T21:41:32.000Z | tests/cuda.pkg/extension.py | avalentino/pyre | 7e1f0287eb7eba1c6d1ef385e5160079283ac363 | [
"BSD-3-Clause"
] | 12 | 2018-04-23T22:50:40.000Z | 2022-02-20T17:27:23.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2021 all rights reserved
#
"""
Sanity check: verify that the extension module is accessible
"""
def test():
# access the extension module
from cuda import cuda
# all done
return
# main
if __name__ == "__main__":
test()
# end of file
| 12.344828 | 60 | 0.631285 |
7942f586e2efe06667ed6ea64dfbcdaff4d7152a | 30 | py | Python | apps/notifications/templatetags/__init__.py | sunmoon11/apilotus_django | 115e9d0800751018f41ae8b9d85331d861126fd2 | [
"Apache-2.0"
] | null | null | null | apps/notifications/templatetags/__init__.py | sunmoon11/apilotus_django | 115e9d0800751018f41ae8b9d85331d861126fd2 | [
"Apache-2.0"
] | 3 | 2019-12-04T22:21:46.000Z | 2020-05-09T18:57:24.000Z | apps/notifications/templatetags/__init__.py | zaza316614/apilotus_django | 115e9d0800751018f41ae8b9d85331d861126fd2 | [
"Apache-2.0"
] | null | null | null |
__author__ = 'Narayan Kandel' | 15 | 29 | 0.766667 |
7942f65f9ead75831cb0c4f44d3aec2f5c9f33f1 | 2,008 | py | Python | wes_service/util.py | jaeddy/workflow-service | e20fda03768990f92389363938ccb45daeffd6fa | [
"Apache-2.0"
] | null | null | null | wes_service/util.py | jaeddy/workflow-service | e20fda03768990f92389363938ccb45daeffd6fa | [
"Apache-2.0"
] | null | null | null | wes_service/util.py | jaeddy/workflow-service | e20fda03768990f92389363938ccb45daeffd6fa | [
"Apache-2.0"
] | null | null | null | import tempfile
import json
import os
from six import itervalues, iterlists
import connexion
from werkzeug.utils import secure_filename
def visit(d, op):
"""Recursively call op(d) for all list subelements and dictionary 'values' that d may have."""
op(d)
if isinstance(d, list):
for i in d:
visit(i, op)
elif isinstance(d, dict):
for i in itervalues(d):
visit(i, op)
class WESBackend(object):
"""Stores and retrieves options. Intended to be inherited."""
def __init__(self, opts):
"""Parse and store options as a list of tuples."""
self.pairs = []
for o in opts if opts else []:
k, v = o.split("=", 1)
self.pairs.append((k, v))
def getopt(self, p, default=None):
"""Returns the first option value stored that matches p or default."""
for k, v in self.pairs:
if k == p:
return v
return default
def getoptlist(self, p):
"""Returns all option values stored that match p as a list."""
optlist = []
for k, v in self.pairs:
if k == p:
optlist.append(v)
return optlist
def collect_attachments(self):
tempdir = tempfile.mkdtemp()
body = {}
for k, ls in iterlists(connexion.request.files):
for v in ls:
if k == "workflow_attachment":
filename = secure_filename(v.filename)
v.save(os.path.join(tempdir, filename))
body[k] = "file://%s" % tempdir # Reference to tem working dir.
elif k in ("workflow_params", "tags", "workflow_engine_parameters"):
body[k] = json.loads(v.read())
else:
body[k] = v.read()
if ":" not in body["workflow_url"]:
body["workflow_url"] = "file://%s" % os.path.join(tempdir, secure_filename(body["workflow_url"]))
return tempdir, body
| 31.873016 | 109 | 0.550299 |
7942f661a64e4f5650185822ee17e0e154e580f9 | 4,129 | py | Python | DEMA.py | louisvathan/NSE-Stocks-Strategies-Tester | 53d6bfd05cb46ebbdb19f88ed42a8aad4cafdfd8 | [
"MIT"
] | null | null | null | DEMA.py | louisvathan/NSE-Stocks-Strategies-Tester | 53d6bfd05cb46ebbdb19f88ed42a8aad4cafdfd8 | [
"MIT"
] | 1 | 2021-08-07T14:47:25.000Z | 2021-08-07T14:47:25.000Z | DEMA.py | louisvathan/NSE-Stocks-Strategies-Tester | 53d6bfd05cb46ebbdb19f88ed42a8aad4cafdfd8 | [
"MIT"
] | 1 | 2021-06-27T21:36:42.000Z | 2021-06-27T21:36:42.000Z | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn')
from Status import *
from BackTest import BackTest
def plot_dema(stock, plt_df, plot_full_location, cur_descp):
plt_df = plt_df.set_index(pd.DatetimeIndex(plt_df['Date'].values))
#Plotting DEMA Chart.
plt.figure(figsize=(12.2, 4.5))
column_list = ['DEMA_short', 'DEMA_long', 'Close']
plt_df[column_list].plot(figsize=(12.2, 6.4))
plt.title('Close price for {} - {}'.format(stock, cur_descp))
plt.xlabel('Price in INR.')
plt.xlabel('Date')
plt.legend(loc='upper left')
plot_name = '{}_DEMA.png'.format(stock)
plot_file = plot_full_location + '/' + stock + '/' + plot_name
plt.savefig(plot_file, dpi=150)
dema_hlink = '=HYPERLINK("{}","DEMA Plot")'.format(plot_file)
plt.close('all')
#Visually show the stock buy and sell signals.
plt.figure(figsize=(12.2, 4.5))
plt.scatter(plt_df.index, plt_df['Buy_Signal_Price'], color = 'green', label='Buy Signal', marker='^', alpha=1)
plt.scatter(plt_df.index, plt_df['Sell_Signal_Price'], color = 'red', label='Sell Signal', marker='v', alpha=1)
plt.plot(plt_df['Close'], label='Close Price', alpha=0.35)
plt.plot(plt_df['DEMA_short'], label='DEMA_short', alpha=0.35)
plt.plot(plt_df['DEMA_long'], label='DEMA_long', alpha=0.35)
plt.xticks(rotation=45)
plt.title('Close Price Buy and Sell Signals ({})'.format(stock))
plt.xlabel('Date', fontsize=18)
plt.ylabel('Close Price in INR', fontsize = 18)
plt.legend(loc='upper left')
plot_name = '{}_DEMA_Signals.png'.format(stock)
plot_file = plot_full_location + '/' + stock + '/' + plot_name
plt.savefig(plot_file, dpi=150)
signal_hlink = '=HYPERLINK("{}","DEMA Signal Plot")'.format(plot_file)
plt.close('all')
return (dema_hlink, signal_hlink)
def DEMA_Calc(data, time_period, column):
#Calculate the Exponential Moving Average for some time period.
EMA = data[column].ewm(span=time_period, adjust=False).mean()
#Calculate the DEMA.
DEMA = 2 * EMA - EMA.ewm(span=time_period, adjust=False).mean()
return DEMA
def buy_sell_DEMA(data):
buy_list = []
sell_list = []
flag = False
#Loop through the data.
for i in range(0, len(data)):
if data['DEMA_short'][i] > data['DEMA_long'][i] and flag == False:
buy_list.append(data['Close'][i])
sell_list.append(np.nan)
flag = True
elif data['DEMA_short'][i] < data['DEMA_long'][i] and flag == True:
buy_list.append(np.nan)
sell_list.append(data['Close'][i])
flag = False
else:
buy_list.append(np.nan)
sell_list.append(np.nan)
#Store the buy and sell signals lists into the data set.
data['Buy_Signal_Price'] = buy_list
data['Sell_Signal_Price'] = sell_list
return data
def DEMA(stock, input_var, plot_full_location, wb, cur_descp, stock_summary, stkplt_hlink):
period_short_DEMA = input_var[0]
period_long_DEMA = input_var[1]
column = input_var[2]
## pd.set_option('display.max_columns', None)
## df_DEMA = input_var[3]
df = input_var[3]
df_DEMA = pd.DataFrame()
columns = ['Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume']
for col in columns:
df_DEMA[col] = df[col]
#Store the short term DEMA (20 day period) and the long term DEMA (50 day period) into the data set.
df_DEMA['DEMA_short'] = DEMA_Calc(df_DEMA, period_short_DEMA, column)
df_DEMA['DEMA_long'] = DEMA_Calc(df_DEMA, period_long_DEMA, column)
df_DEMA = buy_sell_DEMA(df_DEMA)
strategy_hlinks = plot_dema(stock, df_DEMA, plot_full_location, cur_descp)
stock_status = stock_current_status(df_DEMA)
bt_data = BackTest('DEMA', stock, df_DEMA, plot_full_location, wb, cur_descp)
print_report(stock, cur_descp, df_DEMA, bt_data, stock_summary,
stock_status, stkplt_hlink, strategy_hlinks)
return stock_status
| 38.231481 | 116 | 0.644466 |
7942f6b4a86f3358f453a7c0dbabb49623295c47 | 31,144 | py | Python | venv/Lib/site-packages/jedi/api/__init__.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 1,318 | 2019-07-11T10:34:39.000Z | 2022-03-29T15:05:19.000Z | venv/Lib/site-packages/jedi/api/__init__.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 387 | 2020-12-15T14:54:04.000Z | 2022-03-31T07:00:21.000Z | venv/Lib/site-packages/jedi/api/__init__.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 66 | 2019-11-11T15:33:12.000Z | 2022-03-01T07:55:55.000Z | """
The API basically only provides one class. You can create a :class:`Script` and
use its methods.
Additionally you can add a debug function with :func:`set_debug_function`.
Alternatively, if you don't need a custom function and are happy with printing
debug messages to stdout, simply call :func:`set_debug_function` without
arguments.
"""
import sys
from pathlib import Path
import parso
from parso.python import tree
from jedi._compatibility import cast_path
from jedi.parser_utils import get_executable_nodes
from jedi import debug
from jedi import settings
from jedi import cache
from jedi.file_io import KnownContentFileIO
from jedi.api import classes
from jedi.api import interpreter
from jedi.api import helpers
from jedi.api.helpers import validate_line_column
from jedi.api.completion import Completion, search_in_module
from jedi.api.keywords import KeywordName
from jedi.api.environment import InterpreterEnvironment
from jedi.api.project import get_default_project, Project
from jedi.api.errors import parso_to_jedi_errors
from jedi.api import refactoring
from jedi.api.refactoring.extract import extract_function, extract_variable
from jedi.inference import InferenceState
from jedi.inference import imports
from jedi.inference.references import find_references
from jedi.inference.arguments import try_iter_content
from jedi.inference.helpers import infer_call_of_leaf
from jedi.inference.sys_path import transform_path_to_dotted
from jedi.inference.syntax_tree import tree_name_to_values
from jedi.inference.value import ModuleValue
from jedi.inference.base_value import ValueSet
from jedi.inference.value.iterable import unpack_tuple_to_dict
from jedi.inference.gradual.conversion import convert_names, convert_values
from jedi.inference.gradual.utils import load_proper_stub_module
from jedi.inference.utils import to_list
# Jedi uses lots and lots of recursion. By setting this a little bit higher, we
# can remove some "maximum recursion depth" errors.
sys.setrecursionlimit(3000)
class Script:
"""
A Script is the base for completions, goto or whatever you want to do with
Jedi. The counter part of this class is :class:`Interpreter`, which works
with actual dictionaries and can work with a REPL. This class
should be used when a user edits code in an editor.
You can either use the ``code`` parameter or ``path`` to read a file.
Usually you're going to want to use both of them (in an editor).
The Script's ``sys.path`` is very customizable:
- If `project` is provided with a ``sys_path``, that is going to be used.
- If `environment` is provided, its ``sys.path`` will be used
(see :func:`Environment.get_sys_path <jedi.api.environment.Environment.get_sys_path>`);
- Otherwise ``sys.path`` will match that of the default environment of
Jedi, which typically matches the sys path that was used at the time
when Jedi was imported.
Most methods have a ``line`` and a ``column`` parameter. Lines in Jedi are
always 1-based and columns are always zero based. To avoid repetition they
are not always documented. You can omit both line and column. Jedi will
then just do whatever action you are calling at the end of the file. If you
provide only the line, just will complete at the end of that line.
.. warning:: By default :attr:`jedi.settings.fast_parser` is enabled, which means
that parso reuses modules (i.e. they are not immutable). With this setting
Jedi is **not thread safe** and it is also not safe to use multiple
:class:`.Script` instances and its definitions at the same time.
If you are a normal plugin developer this should not be an issue. It is
an issue for people that do more complex stuff with Jedi.
This is purely a performance optimization and works pretty well for all
typical usages, however consider to turn the setting off if it causes
you problems. See also
`this discussion <https://github.com/davidhalter/jedi/issues/1240>`_.
:param code: The source code of the current file, separated by newlines.
:type code: str
:param path: The path of the file in the file system, or ``''`` if
it hasn't been saved yet.
:type path: str or pathlib.Path or None
:param Environment environment: Provide a predefined :ref:`Environment <environments>`
to work with a specific Python version or virtualenv.
:param Project project: Provide a :class:`.Project` to make sure finding
references works well, because the right folder is searched. There are
also ways to modify the sys path and other things.
"""
def __init__(self, code=None, *, path=None, environment=None, project=None):
self._orig_path = path
# An empty path (also empty string) should always result in no path.
if isinstance(path, str):
path = Path(path)
self.path = path.absolute() if path else None
if code is None:
# TODO add a better warning than the traceback!
with open(path, 'rb') as f:
code = f.read()
if project is None:
# Load the Python grammar of the current interpreter.
project = get_default_project(None if self.path is None else self.path.parent)
self._inference_state = InferenceState(
project, environment=environment, script_path=self.path
)
debug.speed('init')
self._module_node, code = self._inference_state.parse_and_get_code(
code=code,
path=self.path,
use_latest_grammar=path and path.suffix == '.pyi',
cache=False, # No disk cache, because the current script often changes.
diff_cache=settings.fast_parser,
cache_path=settings.cache_directory,
)
debug.speed('parsed')
self._code_lines = parso.split_lines(code, keepends=True)
self._code = code
cache.clear_time_caches()
debug.reset_time()
# Cache the module, this is mostly useful for testing, since this shouldn't
# be called multiple times.
@cache.memoize_method
def _get_module(self):
names = None
is_package = False
if self.path is not None:
import_names, is_p = transform_path_to_dotted(
self._inference_state.get_sys_path(add_parent_paths=False),
self.path
)
if import_names is not None:
names = import_names
is_package = is_p
if self.path is None:
file_io = None
else:
file_io = KnownContentFileIO(cast_path(self.path), self._code)
if self.path is not None and self.path.suffix == '.pyi':
# We are in a stub file. Try to load the stub properly.
stub_module = load_proper_stub_module(
self._inference_state,
self._inference_state.latest_grammar,
file_io,
names,
self._module_node
)
if stub_module is not None:
return stub_module
if names is None:
names = ('__main__',)
module = ModuleValue(
self._inference_state, self._module_node,
file_io=file_io,
string_names=names,
code_lines=self._code_lines,
is_package=is_package,
)
if names[0] not in ('builtins', 'typing'):
# These modules are essential for Jedi, so don't overwrite them.
self._inference_state.module_cache.add(names, ValueSet([module]))
return module
def _get_module_context(self):
return self._get_module().as_context()
def __repr__(self):
return '<%s: %s %r>' % (
self.__class__.__name__,
repr(self._orig_path),
self._inference_state.environment,
)
@validate_line_column
def complete(self, line=None, column=None, *, fuzzy=False):
"""
Completes objects under the cursor.
Those objects contain information about the completions, more than just
names.
:param fuzzy: Default False. Will return fuzzy completions, which means
that e.g. ``ooa`` will match ``foobar``.
:return: Completion objects, sorted by name. Normal names appear
before "private" names that start with ``_`` and those appear
before magic methods and name mangled names that start with ``__``.
:rtype: list of :class:`.Completion`
"""
with debug.increase_indent_cm('complete'):
completion = Completion(
self._inference_state, self._get_module_context(), self._code_lines,
(line, column), self.get_signatures, fuzzy=fuzzy,
)
return completion.complete()
@validate_line_column
def infer(self, line=None, column=None, *, only_stubs=False, prefer_stubs=False):
"""
Return the definitions of under the cursor. It is basically a wrapper
around Jedi's type inference.
This method follows complicated paths and returns the end, not the
first definition. The big difference between :meth:`goto` and
:meth:`infer` is that :meth:`goto` doesn't
follow imports and statements. Multiple objects may be returned,
because depending on an option you can have two different versions of a
function.
:param only_stubs: Only return stubs for this method.
:param prefer_stubs: Prefer stubs to Python objects for this method.
:rtype: list of :class:`.Name`
"""
pos = line, column
leaf = self._module_node.get_name_of_position(pos)
if leaf is None:
leaf = self._module_node.get_leaf_for_position(pos)
if leaf is None or leaf.type == 'string':
return []
if leaf.end_pos == (line, column) and leaf.type == 'operator':
next_ = leaf.get_next_leaf()
if next_.start_pos == leaf.end_pos \
and next_.type in ('number', 'string', 'keyword'):
leaf = next_
context = self._get_module_context().create_context(leaf)
values = helpers.infer(self._inference_state, context, leaf)
values = convert_values(
values,
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
defs = [classes.Name(self._inference_state, c.name) for c in values]
# The additional set here allows the definitions to become unique in an
# API sense. In the internals we want to separate more things than in
# the API.
return helpers.sorted_definitions(set(defs))
@validate_line_column
def goto(self, line=None, column=None, *, follow_imports=False, follow_builtin_imports=False,
only_stubs=False, prefer_stubs=False):
"""
Goes to the name that defined the object under the cursor. Optionally
you can follow imports.
Multiple objects may be returned, depending on an if you can have two
different versions of a function.
:param follow_imports: The method will follow imports.
:param follow_builtin_imports: If ``follow_imports`` is True will try
to look up names in builtins (i.e. compiled or extension modules).
:param only_stubs: Only return stubs for this method.
:param prefer_stubs: Prefer stubs to Python objects for this method.
:rtype: list of :class:`.Name`
"""
tree_name = self._module_node.get_name_of_position((line, column))
if tree_name is None:
# Without a name we really just want to jump to the result e.g.
# executed by `foo()`, if we the cursor is after `)`.
return self.infer(line, column, only_stubs=only_stubs, prefer_stubs=prefer_stubs)
name = self._get_module_context().create_name(tree_name)
# Make it possible to goto the super class function/attribute
# definitions, when they are overwritten.
names = []
if name.tree_name.is_definition() and name.parent_context.is_class():
class_node = name.parent_context.tree_node
class_value = self._get_module_context().create_value(class_node)
mro = class_value.py__mro__()
next(mro) # Ignore the first entry, because it's the class itself.
for cls in mro:
names = cls.goto(tree_name.value)
if names:
break
if not names:
names = list(name.goto())
if follow_imports:
names = helpers.filter_follow_imports(names, follow_builtin_imports)
names = convert_names(
names,
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
defs = [classes.Name(self._inference_state, d) for d in set(names)]
# Avoid duplicates
return list(set(helpers.sorted_definitions(defs)))
def search(self, string, *, all_scopes=False):
"""
Searches a name in the current file. For a description of how the
search string should look like, please have a look at
:meth:`.Project.search`.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:yields: :class:`.Name`
"""
return self._search_func(string, all_scopes=all_scopes)
@to_list
def _search_func(self, string, all_scopes=False, complete=False, fuzzy=False):
names = self._names(all_scopes=all_scopes)
wanted_type, wanted_names = helpers.split_search_string(string)
return search_in_module(
self._inference_state,
self._get_module_context(),
names=names,
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
fuzzy=fuzzy,
)
def complete_search(self, string, **kwargs):
"""
Like :meth:`.Script.search`, but completes that string. If you want to
have all possible definitions in a file you can also provide an empty
string.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:param fuzzy: Default False. Will return fuzzy completions, which means
that e.g. ``ooa`` will match ``foobar``.
:yields: :class:`.Completion`
"""
return self._search_func(string, complete=True, **kwargs)
@validate_line_column
def help(self, line=None, column=None):
"""
Used to display a help window to users. Uses :meth:`.Script.goto` and
returns additional definitions for keywords and operators.
Typically you will want to display :meth:`.BaseName.docstring` to the
user for all the returned definitions.
The additional definitions are ``Name(...).type == 'keyword'``.
These definitions do not have a lot of value apart from their docstring
attribute, which contains the output of Python's :func:`help` function.
:rtype: list of :class:`.Name`
"""
definitions = self.goto(line, column, follow_imports=True)
if definitions:
return definitions
leaf = self._module_node.get_leaf_for_position((line, column))
if leaf is not None and leaf.type in ('keyword', 'operator', 'error_leaf'):
def need_pydoc():
if leaf.value in ('(', ')', '[', ']'):
if leaf.parent.type == 'trailer':
return False
if leaf.parent.type == 'atom':
return False
grammar = self._inference_state.grammar
# This parso stuff is not public, but since I control it, this
# is fine :-) ~dave
reserved = grammar._pgen_grammar.reserved_syntax_strings.keys()
return leaf.value in reserved
if need_pydoc():
name = KeywordName(self._inference_state, leaf.value)
return [classes.Name(self._inference_state, name)]
return []
@validate_line_column
def get_references(self, line=None, column=None, **kwargs):
"""
Lists all references of a variable in a project. Since this can be
quite hard to do for Jedi, if it is too complicated, Jedi will stop
searching.
:param include_builtins: Default ``True``. If ``False``, checks if a reference
is a builtin (e.g. ``sys``) and in that case does not return it.
:param scope: Default ``'project'``. If ``'file'``, include references in
the current module only.
:rtype: list of :class:`.Name`
"""
def _references(include_builtins=True, scope='project'):
if scope not in ('project', 'file'):
raise ValueError('Only the scopes "file" and "project" are allowed')
tree_name = self._module_node.get_name_of_position((line, column))
if tree_name is None:
# Must be syntax
return []
names = find_references(self._get_module_context(), tree_name, scope == 'file')
definitions = [classes.Name(self._inference_state, n) for n in names]
if not include_builtins or scope == 'file':
definitions = [d for d in definitions if not d.in_builtin_module()]
return helpers.sorted_definitions(definitions)
return _references(**kwargs)
@validate_line_column
def get_signatures(self, line=None, column=None):
"""
Return the function object of the call under the cursor.
E.g. if the cursor is here::
abs(# <-- cursor is here
This would return the ``abs`` function. On the other hand::
abs()# <-- cursor is here
This would return an empty list..
:rtype: list of :class:`.Signature`
"""
pos = line, column
call_details = helpers.get_signature_details(self._module_node, pos)
if call_details is None:
return []
context = self._get_module_context().create_context(call_details.bracket_leaf)
definitions = helpers.cache_signatures(
self._inference_state,
context,
call_details.bracket_leaf,
self._code_lines,
pos
)
debug.speed('func_call followed')
# TODO here we use stubs instead of the actual values. We should use
# the signatures from stubs, but the actual values, probably?!
return [classes.Signature(self._inference_state, signature, call_details)
for signature in definitions.get_signatures()]
@validate_line_column
def get_context(self, line=None, column=None):
"""
Returns the scope context under the cursor. This basically means the
function, class or module where the cursor is at.
:rtype: :class:`.Name`
"""
pos = (line, column)
leaf = self._module_node.get_leaf_for_position(pos, include_prefixes=True)
if leaf.start_pos > pos or leaf.type == 'endmarker':
previous_leaf = leaf.get_previous_leaf()
if previous_leaf is not None:
leaf = previous_leaf
module_context = self._get_module_context()
n = tree.search_ancestor(leaf, 'funcdef', 'classdef')
if n is not None and n.start_pos < pos <= n.children[-1].start_pos:
# This is a bit of a special case. The context of a function/class
# name/param/keyword is always it's parent context, not the
# function itself. Catch all the cases here where we are before the
# suite object, but still in the function.
context = module_context.create_value(n).as_context()
else:
context = module_context.create_context(leaf)
while context.name is None:
context = context.parent_context # comprehensions
definition = classes.Name(self._inference_state, context.name)
while definition.type != 'module':
name = definition._name # TODO private access
tree_name = name.tree_name
if tree_name is not None: # Happens with lambdas.
scope = tree_name.get_definition()
if scope.start_pos[1] < column:
break
definition = definition.parent()
return definition
def _analysis(self):
self._inference_state.is_analysis = True
self._inference_state.analysis_modules = [self._module_node]
module = self._get_module_context()
try:
for node in get_executable_nodes(self._module_node):
context = module.create_context(node)
if node.type in ('funcdef', 'classdef'):
# Resolve the decorators.
tree_name_to_values(self._inference_state, context, node.children[1])
elif isinstance(node, tree.Import):
import_names = set(node.get_defined_names())
if node.is_nested():
import_names |= set(path[-1] for path in node.get_paths())
for n in import_names:
imports.infer_import(context, n)
elif node.type == 'expr_stmt':
types = context.infer_node(node)
for testlist in node.children[:-1:2]:
# Iterate tuples.
unpack_tuple_to_dict(context, types, testlist)
else:
if node.type == 'name':
defs = self._inference_state.infer(context, node)
else:
defs = infer_call_of_leaf(context, node)
try_iter_content(defs)
self._inference_state.reset_recursion_limitations()
ana = [a for a in self._inference_state.analysis if self.path == a.path]
return sorted(set(ana), key=lambda x: x.line)
finally:
self._inference_state.is_analysis = False
def get_names(self, **kwargs):
"""
Returns names defined in the current file.
:param all_scopes: If True lists the names of all scopes instead of
only the module namespace.
:param definitions: If True lists the names that have been defined by a
class, function or a statement (``a = b`` returns ``a``).
:param references: If True lists all the names that are not listed by
``definitions=True``. E.g. ``a = b`` returns ``b``.
:rtype: list of :class:`.Name`
"""
names = self._names(**kwargs)
return [classes.Name(self._inference_state, n) for n in names]
def get_syntax_errors(self):
"""
Lists all syntax errors in the current file.
:rtype: list of :class:`.SyntaxError`
"""
return parso_to_jedi_errors(self._inference_state.grammar, self._module_node)
def _names(self, all_scopes=False, definitions=True, references=False):
# Set line/column to a random position, because they don't matter.
module_context = self._get_module_context()
defs = [
module_context.create_name(name)
for name in helpers.get_module_names(
self._module_node,
all_scopes=all_scopes,
definitions=definitions,
references=references,
)
]
return sorted(defs, key=lambda x: x.start_pos)
def rename(self, line=None, column=None, *, new_name):
"""
Renames all references of the variable under the cursor.
:param new_name: The variable under the cursor will be renamed to this
string.
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
definitions = self.get_references(line, column, include_builtins=False)
return refactoring.rename(self._inference_state, definitions, new_name)
@validate_line_column
def extract_variable(self, line, column, *, new_name, until_line=None, until_column=None):
"""
Moves an expression to a new statemenet.
For example if you have the cursor on ``foo`` and provide a
``new_name`` called ``bar``::
foo = 3.1
x = int(foo + 1)
the code above will become::
foo = 3.1
bar = foo + 1
x = int(bar)
:param new_name: The expression under the cursor will be renamed to
this string.
:param int until_line: The the selection range ends at this line, when
omitted, Jedi will be clever and try to define the range itself.
:param int until_column: The the selection range ends at this column, when
omitted, Jedi will be clever and try to define the range itself.
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
if until_line is None and until_column is None:
until_pos = None
else:
if until_line is None:
until_line = line
if until_column is None:
until_column = len(self._code_lines[until_line - 1])
until_pos = until_line, until_column
return extract_variable(
self._inference_state, self.path, self._module_node,
new_name, (line, column), until_pos
)
@validate_line_column
def extract_function(self, line, column, *, new_name, until_line=None, until_column=None):
"""
Moves an expression to a new function.
For example if you have the cursor on ``foo`` and provide a
``new_name`` called ``bar``::
global_var = 3
def x():
foo = 3.1
x = int(foo + 1 + global_var)
the code above will become::
global_var = 3
def bar(foo):
return int(foo + 1 + global_var)
def x():
foo = 3.1
x = bar(foo)
:param new_name: The expression under the cursor will be replaced with
a function with this name.
:param int until_line: The the selection range ends at this line, when
omitted, Jedi will be clever and try to define the range itself.
:param int until_column: The the selection range ends at this column, when
omitted, Jedi will be clever and try to define the range itself.
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
if until_line is None and until_column is None:
until_pos = None
else:
if until_line is None:
until_line = line
if until_column is None:
until_column = len(self._code_lines[until_line - 1])
until_pos = until_line, until_column
return extract_function(
self._inference_state, self.path, self._get_module_context(),
new_name, (line, column), until_pos
)
def inline(self, line=None, column=None):
"""
Inlines a variable under the cursor. This is basically the opposite of
extracting a variable. For example with the cursor on bar::
foo = 3.1
bar = foo + 1
x = int(bar)
the code above will become::
foo = 3.1
x = int(foo + 1)
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
names = [d._name for d in self.get_references(line, column, include_builtins=True)]
return refactoring.inline(self._inference_state, names)
class Interpreter(Script):
"""
Jedi's API for Python REPLs.
Implements all of the methods that are present in :class:`.Script` as well.
In addition to completions that normal REPL completion does like
``str.upper``, Jedi also supports code completion based on static code
analysis. For example Jedi will complete ``str().upper``.
>>> from os.path import join
>>> namespace = locals()
>>> script = Interpreter('join("").up', [namespace])
>>> print(script.complete()[0].name)
upper
All keyword arguments are same as the arguments for :class:`.Script`.
:param str code: Code to parse.
:type namespaces: typing.List[dict]
:param namespaces: A list of namespace dictionaries such as the one
returned by :func:`globals` and :func:`locals`.
"""
_allow_descriptor_getattr_default = True
def __init__(self, code, namespaces, **kwds):
try:
namespaces = [dict(n) for n in namespaces]
except Exception:
raise TypeError("namespaces must be a non-empty list of dicts.")
environment = kwds.get('environment', None)
if environment is None:
environment = InterpreterEnvironment()
else:
if not isinstance(environment, InterpreterEnvironment):
raise TypeError("The environment needs to be an InterpreterEnvironment subclass.")
super().__init__(code, environment=environment,
project=Project(Path.cwd()), **kwds)
self.namespaces = namespaces
self._inference_state.allow_descriptor_getattr = self._allow_descriptor_getattr_default
@cache.memoize_method
def _get_module_context(self):
tree_module_value = ModuleValue(
self._inference_state, self._module_node,
file_io=KnownContentFileIO(str(self.path), self._code),
string_names=('__main__',),
code_lines=self._code_lines,
)
return interpreter.MixedModuleContext(
tree_module_value,
self.namespaces,
)
def preload_module(*modules):
"""
Preloading modules tells Jedi to load a module now, instead of lazy parsing
of modules. This can be useful for IDEs, to control which modules to load
on startup.
:param modules: different module names, list of string.
"""
for m in modules:
s = "import %s as x; x." % m
Script(s).complete(1, len(s))
def set_debug_function(func_cb=debug.print_to_stdout, warnings=True,
notices=True, speed=True):
"""
Define a callback debug function to get all the debug messages.
If you don't specify any arguments, debug messages will be printed to stdout.
:param func_cb: The callback function for debug messages.
"""
debug.debug_function = func_cb
debug.enable_warning = warnings
debug.enable_notice = notices
debug.enable_speed = speed
| 40.446753 | 98 | 0.627761 |
7942f7e797c023abb15e67e5afd0091935de890c | 2,382 | py | Python | tests/test_logger.py | fredstro/mrq | eec5dfb425c765afa1ab5b41ca1e6f76869a6726 | [
"MIT"
] | 745 | 2015-01-02T06:54:37.000Z | 2022-03-27T13:23:33.000Z | tests/test_logger.py | fredstro/mrq | eec5dfb425c765afa1ab5b41ca1e6f76869a6726 | [
"MIT"
] | 175 | 2015-01-01T20:46:08.000Z | 2022-01-24T09:40:55.000Z | tests/test_logger.py | fredstro/mrq | eec5dfb425c765afa1ab5b41ca1e6f76869a6726 | [
"MIT"
] | 143 | 2015-01-06T06:55:26.000Z | 2021-09-13T19:47:12.000Z | import time
import pytest
import os
OPTS = []
for cls in ["string", "unicode", "latin-1", "bytes1"]:
for utf8_sys_stdout in [True, False]:
OPTS.append([cls, utf8_sys_stdout])
@pytest.mark.parametrize(["class_name", "utf8_sys_stdout"], OPTS)
def test_supports_string_and_unicode(worker, class_name, utf8_sys_stdout):
result = worker.send_task("tests.tasks.logger.Simple", {
"class_name": class_name,
"utf8_sys_stdout": utf8_sys_stdout
})
# Force-flush the logs
worker.stop(deps=False)
assert result
time.sleep(0.1)
# Job logs
db_logs = list(worker.mongodb_logs.mrq_logs.find({"job": {"$exists": True}}))
assert len(db_logs) == 1
if class_name == "unicode":
assert u"caf\xe9" in db_logs[0]["logs"]
elif class_name == "string":
assert u"cafe" in db_logs[0]["logs"]
elif class_name == "latin-1":
assert "caf" in db_logs[0]["logs"]
assert u"cafe" not in db_logs[0]["logs"]
assert u"caf\xe9" not in db_logs[0]["logs"]
# Worker logs
# db_logs = list(worker.mongodb_logs.mrq_logs.find({"worker": db_workers[0]["_id"]}))
# assert len(db_logs) >= 1
# if class_name == "unicode":
# assert u"caf\xe9" in db_logs
# else:
# assert u"cafe" in db_logs
worker.stop_deps()
def test_other_handlers(worker):
worker.start(flags="--config tests/fixtures/config-logger.py")
worker.send_task("tests.tasks.logger.Simple", {
"class_name": "string"
})
worker.stop(deps=False)
assert os.path.isfile("/tmp/mrq.log")
worker.stop_deps()
os.unlink("/tmp/mrq.log")
def test_log_level(worker):
worker.start(flags="--log_level INFO --config tests/fixtures/config-logger.py")
worker.send_task("tests.tasks.logger.Simple", {
"class_name": "string"
}, block=True)
worker.stop(deps=False)
assert os.path.isfile("/tmp/mrq.log")
with open("/tmp/mrq.log") as f:
lines = f.readlines()
assert all(["DEBUG" not in line for line in lines])
worker.stop_deps()
os.unlink("/tmp/mrq.log")
def test_collection_is_capped(worker):
result = worker.send_task("tests.tasks.logger.Simple", {
"class_name": "string"
})
# Force-flush the logs
worker.stop(deps=False)
assert worker.mongodb_logs.mrq_logs.options()["capped"] is True
worker.stop_deps()
| 29.407407 | 89 | 0.642317 |
7942f8f14883e4b10210f801cd42e6e94ef3185e | 13,357 | py | Python | test/test_pysindy.py | jzjosuerivera71/pysindy-1 | 6ed02140c3e255c8bb69b19a7d9452930bd3253d | [
"MIT"
] | 1 | 2021-03-02T20:31:40.000Z | 2021-03-02T20:31:40.000Z | test/test_pysindy.py | jzjosuerivera71/pysindy-1 | 6ed02140c3e255c8bb69b19a7d9452930bd3253d | [
"MIT"
] | null | null | null | test/test_pysindy.py | jzjosuerivera71/pysindy-1 | 6ed02140c3e255c8bb69b19a7d9452930bd3253d | [
"MIT"
] | null | null | null | """
Unit tests for SINDy class.
Note: all tests should be encapsulated in functions whose
names start with "test_"
To run all tests for this package, navigate to the top-level
directory and execute the following command:
pytest
To run tests for just one file, run
pytest file_to_test.py
"""
import numpy as np
import pytest
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import Lasso
from sklearn.utils.validation import check_is_fitted
from pysindy import SINDy
from pysindy.differentiation import FiniteDifference
from pysindy.feature_library import FourierLibrary
from pysindy.feature_library import PolynomialLibrary
from pysindy.optimizers import SR3
from pysindy.optimizers import STLSQ
def test_get_feature_names_len(data_lorenz):
x, t = data_lorenz
model = SINDy()
with pytest.raises(NotFittedError):
model.get_feature_names()
model.fit(x, t)
# Assumes default library is polynomial features of degree 2
assert len(model.get_feature_names()) == 10
def test_not_fitted(data_1d):
x, t = data_1d
model = SINDy()
with pytest.raises(NotFittedError):
model.predict(x)
with pytest.raises(NotFittedError):
model.get_feature_names()
with pytest.raises(NotFittedError):
model.coefficients()
with pytest.raises(NotFittedError):
model.equations()
with pytest.raises(NotFittedError):
model.simulate(x[0], t)
def test_improper_shape_input(data_1d):
x, t = data_1d
# Ensure model successfully handles different data shapes
model = SINDy()
model.fit(x.flatten(), t)
check_is_fitted(model)
model = SINDy()
model.fit(x.flatten(), t, x_dot=x.flatten())
check_is_fitted(model)
model = SINDy()
model.fit(x, t, x_dot=x.flatten())
check_is_fitted(model)
def test_nan_derivatives(data_lorenz):
x, t = data_lorenz
model = SINDy(differentiation_method=FiniteDifference(drop_endpoints=True))
model.fit(x, t)
check_is_fitted(model)
@pytest.mark.parametrize(
"data",
[
pytest.lazy_fixture("data_1d"),
pytest.lazy_fixture("data_lorenz"),
pytest.lazy_fixture("data_1d_bad_shape"),
],
)
def test_mixed_inputs(data):
x, t = data
# Scalar t
model = SINDy()
model.fit(x, t=2)
check_is_fitted(model)
# x_dot is passed in
model = SINDy()
model.fit(x, x_dot=x)
check_is_fitted(model)
model = SINDy()
model.fit(x, t, x_dot=x)
check_is_fitted(model)
@pytest.mark.parametrize(
"data", [pytest.lazy_fixture("data_1d"), pytest.lazy_fixture("data_lorenz")]
)
def test_bad_t(data):
x, t = data
model = SINDy()
# Wrong type
with pytest.raises(ValueError):
model.fit(x, t="1")
# Invalid value of t
with pytest.raises(ValueError):
model.fit(x, t=-1)
# t is a list
with pytest.raises(ValueError):
model.fit(x, list(t))
# Wrong number of time points
with pytest.raises(ValueError):
model.fit(x, t[:-1])
# Two points in t out of order
t[2], t[4] = t[4], t[2]
with pytest.raises(ValueError):
model.fit(x, t)
t[2], t[4] = t[4], t[2]
# Two matching times in t
t[3] = t[5]
with pytest.raises(ValueError):
model.fit(x, t)
@pytest.mark.parametrize(
"data", [pytest.lazy_fixture("data_1d"), pytest.lazy_fixture("data_lorenz")]
)
def test_t_default(data):
x, t = data
dt = t[1] - t[0]
with pytest.raises(ValueError):
model = SINDy(t_default=0)
with pytest.raises(ValueError):
model = SINDy(t_default="1")
model = SINDy()
model.fit(x, dt)
model_t_default = SINDy(t_default=dt)
model_t_default.fit(x)
np.testing.assert_allclose(model.coefficients(), model_t_default.coefficients())
np.testing.assert_almost_equal(model.score(x, t=dt), model_t_default.score(x))
np.testing.assert_almost_equal(
model.differentiate(x, t=dt), model_t_default.differentiate(x)
)
@pytest.mark.parametrize(
"data, optimizer",
[
(pytest.lazy_fixture("data_1d"), STLSQ()),
(pytest.lazy_fixture("data_lorenz"), STLSQ()),
(pytest.lazy_fixture("data_1d"), SR3()),
(pytest.lazy_fixture("data_lorenz"), SR3()),
(pytest.lazy_fixture("data_1d"), Lasso(fit_intercept=False)),
(pytest.lazy_fixture("data_lorenz"), Lasso(fit_intercept=False)),
(pytest.lazy_fixture("data_1d"), ElasticNet(fit_intercept=False)),
(pytest.lazy_fixture("data_lorenz"), ElasticNet(fit_intercept=False)),
],
)
def test_predict(data, optimizer):
x, t = data
model = SINDy(optimizer=optimizer)
model.fit(x, t)
x_dot = model.predict(x)
assert x.shape == x_dot.shape
@pytest.mark.parametrize(
"data",
[
pytest.lazy_fixture("data_1d"),
pytest.lazy_fixture("data_lorenz"),
pytest.lazy_fixture("data_1d_bad_shape"),
],
)
def test_simulate(data):
x, t = data
model = SINDy()
model.fit(x, t)
x1 = model.simulate(x[0], t)
assert len(x1) == len(t)
@pytest.mark.parametrize(
"library",
[
PolynomialLibrary(degree=3),
FourierLibrary(n_frequencies=3),
pytest.lazy_fixture("data_custom_library"),
PolynomialLibrary() + FourierLibrary(),
],
)
def test_libraries(data_lorenz, library):
x, t = data_lorenz
model = SINDy(feature_library=library)
model.fit(x, t)
s = model.score(x, t)
assert s <= 1
@pytest.mark.parametrize(
"data",
[
pytest.lazy_fixture("data_1d"),
pytest.lazy_fixture("data_lorenz"),
pytest.lazy_fixture("data_1d_bad_shape"),
],
)
def test_score(data):
x, t = data
model = SINDy()
model.fit(x, t)
assert model.score(x) <= 1
assert model.score(x, t) <= 1
assert model.score(x, x_dot=x) <= 1
assert model.score(x, t, x_dot=x) <= 1
def test_parallel(data_lorenz):
x, t = data_lorenz
model = SINDy(n_jobs=4)
model.fit(x, t)
x_dot = model.predict(x)
s = model.score(x, x_dot=x_dot)
assert s >= 0.95
def test_fit_multiple_trajectores(data_multiple_trajctories):
x, t = data_multiple_trajctories
model = SINDy()
# Should fail if multiple_trajectories flag is not set
with pytest.raises(ValueError):
model.fit(x, t=t)
model.fit(x, multiple_trajectories=True)
check_is_fitted(model)
model.fit(x, t=t, multiple_trajectories=True)
assert model.score(x, t=t, multiple_trajectories=True) > 0.8
model = SINDy()
model.fit(x, x_dot=x, multiple_trajectories=True)
check_is_fitted(model)
model = SINDy()
model.fit(x, t=t, x_dot=x, multiple_trajectories=True)
check_is_fitted(model)
# Test validate_input
t[0] = None
with pytest.raises(ValueError):
model.fit(x, t=t, multiple_trajectories=True)
def test_predict_multiple_trajectories(data_multiple_trajctories):
x, t = data_multiple_trajctories
model = SINDy()
model.fit(x, t=t, multiple_trajectories=True)
# Should fail if multiple_trajectories flag is not set
with pytest.raises(ValueError):
model.predict(x)
p = model.predict(x, multiple_trajectories=True)
assert len(p) == len(x)
def test_score_multiple_trajectories(data_multiple_trajctories):
x, t = data_multiple_trajctories
model = SINDy()
model.fit(x, t=t, multiple_trajectories=True)
# Should fail if multiple_trajectories flag is not set
with pytest.raises(ValueError):
model.score(x)
s = model.score(x, multiple_trajectories=True)
assert s <= 1
s = model.score(x, t=t, multiple_trajectories=True)
assert s <= 1
s = model.score(x, x_dot=x, multiple_trajectories=True)
assert s <= 1
s = model.score(x, t=t, x_dot=x, multiple_trajectories=True)
assert s <= 1
def test_fit_discrete_time(data_discrete_time):
x = data_discrete_time
model = SINDy(discrete_time=True)
model.fit(x)
check_is_fitted(model)
model = SINDy(discrete_time=True)
model.fit(x[:-1], x_dot=x[1:])
check_is_fitted(model)
def test_simulate_discrete_time(data_discrete_time):
x = data_discrete_time
model = SINDy(discrete_time=True)
model.fit(x)
n_steps = x.shape[0]
x1 = model.simulate(x[0], n_steps)
assert len(x1) == n_steps
# TODO: implement test using the stop_condition option
def test_predict_discrete_time(data_discrete_time):
x = data_discrete_time
model = SINDy(discrete_time=True)
model.fit(x)
assert len(model.predict(x)) == len(x)
def test_score_discrete_time(data_discrete_time):
x = data_discrete_time
model = SINDy(discrete_time=True)
model.fit(x)
assert model.score(x) > 0.75
assert model.score(x, x_dot=x) < 1
def test_fit_discrete_time_multiple_trajectories(
data_discrete_time_multiple_trajectories,
):
x = data_discrete_time_multiple_trajectories
# Should fail if multiple_trajectories flag is not set
model = SINDy(discrete_time=True)
with pytest.raises(ValueError):
model.fit(x)
model.fit(x, multiple_trajectories=True)
check_is_fitted(model)
model = SINDy(discrete_time=True)
model.fit(x, x_dot=x, multiple_trajectories=True)
check_is_fitted(model)
def test_predict_discrete_time_multiple_trajectories(
data_discrete_time_multiple_trajectories,
):
x = data_discrete_time_multiple_trajectories
model = SINDy(discrete_time=True)
model.fit(x, multiple_trajectories=True)
# Should fail if multiple_trajectories flag is not set
with pytest.raises(ValueError):
model.predict(x)
y = model.predict(x, multiple_trajectories=True)
assert len(y) == len(x)
def test_score_discrete_time_multiple_trajectories(
data_discrete_time_multiple_trajectories,
):
x = data_discrete_time_multiple_trajectories
model = SINDy(discrete_time=True)
model.fit(x, multiple_trajectories=True)
# Should fail if multiple_trajectories flag is not set
with pytest.raises(ValueError):
model.score(x)
s = model.score(x, multiple_trajectories=True)
assert s > 0.75
# x is not its own derivative, so we expect bad performance here
s = model.score(x, x_dot=x, multiple_trajectories=True)
assert s < 1
@pytest.mark.parametrize(
"data",
[
pytest.lazy_fixture("data_1d"),
pytest.lazy_fixture("data_lorenz"),
pytest.lazy_fixture("data_1d_bad_shape"),
],
)
def test_equations(data, capsys):
x, t = data
model = SINDy()
model.fit(x, t)
out, _ = capsys.readouterr()
assert len(out) == 0
model.print(precision=2)
out, _ = capsys.readouterr()
assert len(out) > 0
def test_print_discrete_time(data_discrete_time, capsys):
x = data_discrete_time
model = SINDy(discrete_time=True)
model.fit(x)
model.print()
out, _ = capsys.readouterr()
assert len(out) > 0
def test_print_discrete_time_multiple_trajectories(
data_discrete_time_multiple_trajectories, capsys
):
x = data_discrete_time_multiple_trajectories
model = SINDy(discrete_time=True)
model.fit(x, multiple_trajectories=True)
model.print()
out, _ = capsys.readouterr()
assert len(out) > 1
def test_differentiate(data_lorenz, data_multiple_trajctories):
x, t = data_lorenz
model = SINDy()
model.differentiate(x, t)
x, t = data_multiple_trajctories
model.differentiate(x, t, multiple_trajectories=True)
model = SINDy(discrete_time=True)
with pytest.raises(RuntimeError):
model.differentiate(x)
def test_coefficients(data_lorenz):
x, t = data_lorenz
model = SINDy()
model.fit(x, t)
c = model.coefficients()
assert np.count_nonzero(c) < 10
def test_complexity(data_lorenz):
x, t = data_lorenz
model = SINDy()
model.fit(x, t)
assert model.complexity < 10
def test_multiple_trajectories_errors(data_multiple_trajctories, data_discrete_time):
x, t = data_multiple_trajctories
model = SINDy()
with pytest.raises(TypeError):
model._process_multiple_trajectories(np.array(x), t, x)
with pytest.raises(TypeError):
model._process_multiple_trajectories(x, t, np.array(x))
# Test an option that doesn't get tested elsewhere
model._process_multiple_trajectories(x, t, x, return_array=False)
x = data_discrete_time
model = SINDy(discrete_time=True)
with pytest.raises(TypeError):
model._process_multiple_trajectories(x, t, np.array(x))
def test_simulate_errors(data_lorenz):
x, t = data_lorenz
model = SINDy()
model.fit(x, t)
with pytest.raises(ValueError):
model.simulate(x[0], t=1)
model = SINDy(discrete_time=True)
with pytest.raises(ValueError):
model.simulate(x[0], t=[1, 2])
@pytest.mark.parametrize(
"params, warning",
[({"threshold": 100}, UserWarning), ({"max_iter": 1}, ConvergenceWarning)],
)
def test_fit_warn(data_lorenz, params, warning):
x, t = data_lorenz
model = SINDy(optimizer=STLSQ(**params))
with pytest.warns(warning):
model.fit(x, t)
with pytest.warns(None) as warn_record:
model.fit(x, t, quiet=True)
assert len(warn_record) == 0
| 25.013109 | 85 | 0.681291 |
7942f9a01cdfb35811fcb8d80ff9afd99f41085a | 4,388 | py | Python | facts/learn.py | cdcihub/literature-to-facts | 9fcf7bd74790e4e067f799270edd958cc30ee143 | [
"MIT"
] | null | null | null | facts/learn.py | cdcihub/literature-to-facts | 9fcf7bd74790e4e067f799270edd958cc30ee143 | [
"MIT"
] | null | null | null | facts/learn.py | cdcihub/literature-to-facts | 9fcf7bd74790e4e067f799270edd958cc30ee143 | [
"MIT"
] | null | null | null | import logging
import typing
from concurrent import futures
import odakb.sparql # type: ignore
import re
import sys
import json
import importlib
from datetime import datetime
import requests
import click
import rdflib # type: ignore
import time
import multiprocessing
import threading
from facts.core import workflow
import facts.core
import facts.arxiv
import facts.gcn
import facts.atel
from colorama import Fore, Style # type: ignore
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(levelname)s %(threadName)s %(name)s %(message)s"
)
logger = logging.getLogger()
PaperEntry = typing.NewType("PaperEntry", dict)
@click.group()
@click.option("--debug", "-d", default=False, is_flag=True)
@click.option("-m", "--modules", multiple=True)
def cli(debug=False, modules=[]):
if debug:
logger.setLevel(logging.DEBUG)
for module_name in modules:
logger.info("loading additional module %s", module_name)
mod = importlib.import_module(module_name)
@cli.command()
@click.option("--workers", "-w", default=1)
@click.option("-a", "--arxiv", is_flag=True, default=False)
@click.option("-g", "--gcn", is_flag=True, default=False)
@click.option("-t", "--atel", is_flag=True, default=False)
def learn(workers, arxiv, gcn, atel):
it = []
if arxiv:
it.append(facts.arxiv.PaperEntry)
if gcn:
it.append(facts.gcn.GCNText)
if atel:
it.append(facts.atel.ATelEntry)
t = facts.core.workflows_by_input(workers, input_types=it)
logger.info(f"read in total {len(t)}")
open("knowledge.n3", "w").write(t)
@cli.command()
def publish():
D = open("knowledge.n3").read()
odakb.sparql.LocalGraph.default_prefixes.append("\n".join([d.strip().replace("@prefix","PREFIX").strip(".") for d in D.splitlines() if 'prefix' in d]))
D_g = D.split(".\n")
logger.info("found knowledge, lines: %d fact groups %d", len(D.splitlines()), len(D_g))
chunk_size = 1000
for i in range(0, len(D_g), chunk_size):
chunk_D = D_g[i:i + chunk_size]
logger.info("chunk of knowledge, lines from %d .. + %d / %d", i, len(chunk_D), len(D_g))
odakb.sparql.insert(
(".\n".join([d.strip() for d in chunk_D if 'prefix' not in d])).encode('utf-8').decode('latin-1')
)
@cli.command()
def contemplate():
G = rdflib.Graph()
G.parse("knowledge.n3", format="n3")
logger.info(f"parsed {len(list(G))}")
s = []
for rep_gcn_prop in "gcn:lvc_event_report", "gcn:reports_icecube_event":
for r in G.query("""
SELECT ?c ?ic_d ?ct_d ?t0 ?instr WHERE {{
?ic_g {rep_gcn_prop} ?c;
gcn:DATE ?ic_d .
?ct_g ?p ?c;
gcn:DATE ?ct_d;
gcn:original_event_utc ?t0;
gcn:instrument ?instr .
}}
""".format(rep_gcn_prop=rep_gcn_prop)):
if r[1] != r[2]:
logger.info(r)
s.append(dict(
event=str(r[0]),
event_gcn_time=str(r[1]),
counterpart_gcn_time=str(r[2]),
event_t0=str(r[3]),
instrument=str(r[4]),
))
byevent = dict()
for i in s:
ev = i['event']
if ev in byevent:
byevent[ev]['instrument'].append(i['instrument'])
else:
byevent[ev] = i
byevent[ev]['instrument'] = [i['instrument']]
s = list(byevent.values())
json.dump(s, open("counterpart_gcn_reaction_summary.json", "w"))
s = []
for r in G.query("""
SELECT ?grb ?t0 ?gcn_d WHERE {{
?gcn gcn:integral_grb_report ?grb .
?gcn gcn:DATE ?gcn_d .
?gcn gcn:event_t0 ?t0 .
}}
"""):
if r[1] != r[2]:
logger.info(r)
s.append(dict(
event=str(r[0]),
event_t0=str(r[1]),
event_gcn_time=str(r[2]),
))
json.dump(s, open("grb_gcn_reaction_summary.json", "w"))
if __name__ == "__main__":
cli()
| 27.08642 | 155 | 0.537375 |
7942f9cf809108824b1bacd32182c0acb0155351 | 2,984 | py | Python | tests/unit/states/test_status.py | xiaowei582648206/saltx | 1d17b030b973ce5422e0fbe7e17c98c7ca91c49b | [
"Apache-2.0"
] | 1 | 2022-02-09T06:40:14.000Z | 2022-02-09T06:40:14.000Z | tests/unit/states/test_status.py | xiaowei582648206/saltx | 1d17b030b973ce5422e0fbe7e17c98c7ca91c49b | [
"Apache-2.0"
] | null | null | null | tests/unit/states/test_status.py | xiaowei582648206/saltx | 1d17b030b973ce5422e0fbe7e17c98c7ca91c49b | [
"Apache-2.0"
] | 4 | 2020-11-04T06:28:05.000Z | 2022-02-09T10:54:49.000Z | # -*- coding: utf-8 -*-
'''
:codeauthor: Jayesh Kariya <[email protected]>
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
# Import Salt Libs
import salt.states.status as status
@skipIf(NO_MOCK, NO_MOCK_REASON)
class StatusTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.states.status
'''
def setup_loader_modules(self):
return {status: {}}
# 'loadavg' function tests: 1
def test_loadavg(self):
'''
Test to return the current load average for the specified minion.
'''
name = 'mymonitor'
ret = {'name': name,
'changes': {},
'result': True,
'data': {},
'comment': ''}
mock = MagicMock(return_value=[])
with patch.dict(status.__salt__, {'status.loadavg': mock}):
comt = ('Requested load average mymonitor not available ')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(status.loadavg(name), ret)
mock = MagicMock(return_value={name: 3})
with patch.dict(status.__salt__, {'status.loadavg': mock}):
comt = ('Min must be less than max')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(status.loadavg(name, 1, 5), ret)
comt = ('Load avg is below minimum of 4 at 3.0')
ret.update({'comment': comt, 'data': 3})
self.assertDictEqual(status.loadavg(name, 5, 4), ret)
comt = ('Load avg above maximum of 2 at 3.0')
ret.update({'comment': comt, 'data': 3})
self.assertDictEqual(status.loadavg(name, 2, 1), ret)
comt = ('Load avg in acceptable range')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(status.loadavg(name, 3, 1), ret)
# 'process' function tests: 1
def test_process(self):
'''
Test to return whether the specified signature
is found in the process tree.
'''
name = 'mymonitor'
ret = {'name': name,
'changes': {},
'result': True,
'data': {},
'comment': ''}
mock = MagicMock(side_effect=[{}, {name: 1}])
with patch.dict(status.__salt__, {'status.pid': mock}):
comt = ('Process signature "mymonitor" not found ')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(status.process(name), ret)
comt = ('Process signature "mymonitor" was found ')
ret.update({'comment': comt, 'result': True,
'data': {name: 1}})
self.assertDictEqual(status.process(name), ret)
| 32.434783 | 73 | 0.569035 |
7942f9d926764fc8bddf504aa50cd9d40488fcbb | 7,838 | py | Python | contrib/bitrpc/bitrpc.py | abc112116/nengcoin | f0306a468b5ddf8b5bac0268bbd9bf7a134624c8 | [
"MIT"
] | 6 | 2021-01-04T01:01:29.000Z | 2021-04-12T00:39:35.000Z | contrib/bitrpc/bitrpc.py | abc112116/nengcoin | f0306a468b5ddf8b5bac0268bbd9bf7a134624c8 | [
"MIT"
] | 4 | 2021-01-06T04:09:54.000Z | 2022-01-24T06:12:51.000Z | contrib/bitrpc/bitrpc.py | abc112116/nengcoin | f0306a468b5ddf8b5bac0268bbd9bf7a134624c8 | [
"MIT"
] | 5 | 2021-02-07T00:14:59.000Z | 2021-04-18T15:08:43.000Z | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:6376")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:6376")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Nengcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Nengcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| 24.116923 | 79 | 0.668283 |
7942fc360b21a8477d5ce3e92aaff48fc8fc8dde | 5,515 | py | Python | scripts/plot_dam_break.py | TormodLandet/Ocellaris | 6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58 | [
"Apache-2.0"
] | 1 | 2017-11-07T12:19:44.000Z | 2017-11-07T12:19:44.000Z | scripts/plot_dam_break.py | TormodLandet/Ocellaris | 6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58 | [
"Apache-2.0"
] | null | null | null | scripts/plot_dam_break.py | TormodLandet/Ocellaris | 6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58 | [
"Apache-2.0"
] | 2 | 2018-05-02T17:17:01.000Z | 2019-03-11T13:09:40.000Z | # Copyright (C) 2017-2019 Tormod Landet
# SPDX-License-Identifier: Apache-2.0
# encoding: utf-8
from __future__ import division, print_function
import os
import numpy
from matplotlib import pyplot
from matplotlib.ticker import MultipleLocator
def read_iso_surface_file(file_name):
with open(file_name, 'rt') as f:
# Read header
line1 = f.readline()
line2 = f.readline()
line3 = f.readline()
field_name = line1[31:-7]
value = float(line2[9:])
dim = int(line3[8:])
assert dim == 2
times = []
lines = []
tline = f.readline()
while tline:
wt = tline.split()
time = float(wt[1])
nlines = int(wt[3])
tlines = []
for _ in range(nlines):
xvals = [float(v) for v in f.readline().split()]
yvals = [float(v) for v in f.readline().split()]
zvals = [float(v) for v in f.readline().split()]
tlines.append((xvals, yvals, zvals))
times.append(time)
lines.append(tlines)
tline = f.readline()
return field_name, value, dim, times, lines
def get_simulation_name(file_name, names_allready_taken):
# Find a name for this simulation
basename = os.path.basename(file_name)
if '_free_surface' in basename:
basename = basename.split('_free_surface')[0]
i = 1
name = basename
while name in names_allready_taken:
i += 1
name = basename + str(i)
return name
def plot_iso_surface_file(file_names, lables, n=2 ** 0.5, a=0.05715, g=9.81):
"""
Plot free surface elevations in the format of Martin and Moyce (1952)
The definition of the parameters n and a are from the same article
and relate to the width and height of the fluid column
:param list[str] file_names: names of Ocellaris iso surface ouput files
:param float n: height_column == n**2 * a
:param float a: width_column
"""
# Data from Martin and Moyce (1952), Table 2 and 6
mmTvec = [
0.41,
0.84,
1.19,
1.43,
1.63,
1.83,
1.98,
2.20,
2.32,
2.51,
2.65,
2.83,
2.98,
3.11,
3.33,
]
mmZvec = [
1.11,
1.22,
1.44,
1.67,
1.89,
2.11,
2.33,
2.56,
2.78,
3.00,
3.22,
3.44,
3.67,
3.89,
4.11,
]
mmYvec = [
0.56,
0.77,
0.93,
1.08,
1.28,
1.46,
1.66,
1.84,
2.00,
2.21,
2.45,
2.70,
3.06,
3.44,
4.20,
5.25,
7.40,
]
mmHvec = [
0.94,
0.89,
0.83,
0.78,
0.72,
0.67,
0.61,
0.56,
0.50,
0.44,
0.39,
0.33,
0.28,
0.22,
0.17,
0.11,
0.06,
]
plots = [
('Horizontal maximum', [('MM', mmTvec, mmZvec)]),
('Vertical maximum', [('MM', mmYvec, mmHvec)]),
]
# Read files
for ifile, file_name in enumerate(file_names):
field_name, value, dim, times, lines = read_iso_surface_file(file_name)
label = lables[ifile]
print(label)
print(field_name, value, dim)
# Y = τ
Tvec, Yvec, Zvec, Hvec = [], [], [], []
for i, tlines in enumerate(lines):
txmax = tymax = -numpy.inf
for xvals, yvals, _zvals in tlines:
if len(xvals):
txmax = max(txmax, numpy.max(xvals))
if len(yvals):
tymax = max(tymax, numpy.max(yvals))
Tvec.append(times[i] * (g / a) ** 0.5 * n)
Yvec.append(times[i] * (g / a) ** 0.5)
Zvec.append(txmax / a)
Hvec.append(tymax / (a * n ** 2))
print('tmax, Tmax, Ymax', times[-1], Tvec[-1], Yvec[-1])
plots[0][1].append((label, Tvec, Zvec))
plots[1][1].append((label, Yvec, Hvec))
# Plot surface elevations with time
fig = pyplot.figure(figsize=(8, 4))
for i, (name, lines) in enumerate(plots):
ax = fig.add_subplot(1, 2, i + 1)
ax.set_title(name)
for label, tvec, vals in lines:
style = (
dict(marker='o', ls='', label='Martin & Moyce')
if label == 'MM'
else dict(label=label)
)
ax.plot(tvec, vals, **style)
if len(lines) > 1:
ax.legend()
if name == 'Horizontal maximum':
ax.set_xlim(0, 3.8)
# ax.set_ylim(1, 4)
ax.yaxis.set_minor_locator(MultipleLocator(0.5))
ax.yaxis.set_major_locator(MultipleLocator(1))
ax.set_xlabel('$T$')
ax.set_ylabel('$Z$')
else:
ax.set_xlim(0, 2.8)
# ax.set_ylim(0, 1.25)
ax.yaxis.set_minor_locator(MultipleLocator(0.25 / 2))
ax.yaxis.set_major_locator(MultipleLocator(0.25))
ax.set_xlabel('$\\tau$')
ax.set_ylabel('$H$')
fig.tight_layout()
if __name__ == '__main__':
import sys
iso_surface_file_names = sys.argv[1:]
names = []
for fn in iso_surface_file_names:
names.append(get_simulation_name(fn, names))
plot_iso_surface_file(iso_surface_file_names, names)
pyplot.show()
| 25.298165 | 79 | 0.497552 |
7942fdfffedec9a2db8c9edcf9fc2415ca6571e1 | 5,445 | py | Python | st2common/st2common/transport/publishers.py | kkkanil/st2 | 07cd195d7a6e177a37dd019e5c9ab8329259d0fa | [
"Apache-2.0"
] | null | null | null | st2common/st2common/transport/publishers.py | kkkanil/st2 | 07cd195d7a6e177a37dd019e5c9ab8329259d0fa | [
"Apache-2.0"
] | 15 | 2021-02-11T22:58:54.000Z | 2021-08-06T18:03:47.000Z | st2common/st2common/transport/publishers.py | kkkanil/st2 | 07cd195d7a6e177a37dd019e5c9ab8329259d0fa | [
"Apache-2.0"
] | 1 | 2021-07-10T15:02:29.000Z | 2021-07-10T15:02:29.000Z | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import copy
from kombu.messaging import Producer
from st2common import log as logging
from st2common.metrics.base import Timer
from st2common.transport import utils as transport_utils
from st2common.transport.connection_retry_wrapper import ConnectionRetryWrapper
__all__ = [
'PoolPublisher',
'SharedPoolPublishers',
'CUDPublisher',
'StatePublisherMixin'
]
ANY_RK = '*'
CREATE_RK = 'create'
UPDATE_RK = 'update'
DELETE_RK = 'delete'
LOG = logging.getLogger(__name__)
class PoolPublisher(object):
def __init__(self, urls=None):
"""
:param urls: Connection URLs to use. If not provided it uses a default value from th
config.
:type urls: ``list``
"""
urls = urls or transport_utils.get_messaging_urls()
connection = transport_utils.get_connection(urls=urls,
connection_kwargs={'failover_strategy':
'round-robin'})
self.pool = connection.Pool(limit=10)
self.cluster_size = len(urls)
def errback(self, exc, interval):
LOG.error('Rabbitmq connection error: %s', exc.message, exc_info=False)
def publish(self, payload, exchange, routing_key=''):
with Timer(key='amqp.pool_publisher.publish_with_retries.' + exchange.name):
with self.pool.acquire(block=True) as connection:
retry_wrapper = ConnectionRetryWrapper(cluster_size=self.cluster_size, logger=LOG)
def do_publish(connection, channel):
# ProducerPool ends up creating it own ConnectionPool which ends up
# completely invalidating this ConnectionPool. Also, a ConnectionPool for
# producer does not really solve any problems for us so better to create a
# Producer for each publish.
producer = Producer(channel)
kwargs = {
'body': payload,
'exchange': exchange,
'routing_key': routing_key,
'serializer': 'pickle',
'content_encoding': 'utf-8'
}
retry_wrapper.ensured(
connection=connection,
obj=producer,
to_ensure_func=producer.publish,
**kwargs
)
retry_wrapper.run(connection=connection, wrapped_callback=do_publish)
class SharedPoolPublishers(object):
"""
This maintains some shared PoolPublishers. Within a single process the configured AMQP
server is usually the same. This sharing allows from the same PoolPublisher to be reused
for publishing purposes. Sharing publishers leads to shared connections.
"""
shared_publishers = {}
def get_publisher(self, urls):
# The publisher_key format here only works because we are aware that urls will be a
# list of strings. Sorting to end up with the same PoolPublisher regardless of
# ordering in supplied list.
urls_copy = copy.copy(urls)
urls_copy.sort()
publisher_key = ''.join(urls_copy)
publisher = self.shared_publishers.get(publisher_key, None)
if not publisher:
# Use original urls here to preserve order.
publisher = PoolPublisher(urls=urls)
self.shared_publishers[publisher_key] = publisher
return publisher
class CUDPublisher(object):
def __init__(self, exchange):
urls = transport_utils.get_messaging_urls()
self._publisher = SharedPoolPublishers().get_publisher(urls=urls)
self._exchange = exchange
def publish_create(self, payload):
with Timer(key='amqp.publish.create'):
self._publisher.publish(payload, self._exchange, CREATE_RK)
def publish_update(self, payload):
with Timer(key='amqp.publish.update'):
self._publisher.publish(payload, self._exchange, UPDATE_RK)
def publish_delete(self, payload):
with Timer(key='amqp.publish.delete'):
self._publisher.publish(payload, self._exchange, DELETE_RK)
class StatePublisherMixin(object):
def __init__(self, exchange):
urls = transport_utils.get_messaging_urls()
self._state_publisher = SharedPoolPublishers().get_publisher(urls=urls)
self._state_exchange = exchange
def publish_state(self, payload, state):
if not state:
raise Exception('Unable to publish unassigned state.')
with Timer(key='amqp.publish.state'):
self._state_publisher.publish(payload, self._state_exchange, state)
| 38.617021 | 98 | 0.644077 |
7942ff3de74136ac6fc4eb89ebd342a8b049eb19 | 542 | py | Python | var/spack/repos/builtin.mock/packages/maintainers-1/package.py | alkino/spack | b87ff60c7e23d7b50fac620ad60c8e2537312ebd | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-06-25T15:25:29.000Z | 2020-06-25T15:25:29.000Z | var/spack/repos/builtin.mock/packages/maintainers-1/package.py | alkino/spack | b87ff60c7e23d7b50fac620ad60c8e2537312ebd | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin.mock/packages/maintainers-1/package.py | alkino/spack | b87ff60c7e23d7b50fac620ad60c8e2537312ebd | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Maintainers1(Package):
"""Package with a maintainers field."""
homepage = "http://www.example.com"
url = "http://www.example.com/maintainers-1.0.tar.gz"
maintainers = ['user1', 'user2']
version('1.0', '0123456789abcdef0123456789abcdef')
def install(self, spec, prefix):
pass
| 25.809524 | 73 | 0.690037 |
794300272ee88de3fa66375d377254a2b05554e3 | 10,596 | py | Python | ppgan/apps/first_order_predictor.py | guojiahuiEmily/PaddleGAN | 9fa708ffad23a96b0d0faeac0742909136618438 | [
"Apache-2.0"
] | null | null | null | ppgan/apps/first_order_predictor.py | guojiahuiEmily/PaddleGAN | 9fa708ffad23a96b0d0faeac0742909136618438 | [
"Apache-2.0"
] | null | null | null | ppgan/apps/first_order_predictor.py | guojiahuiEmily/PaddleGAN | 9fa708ffad23a96b0d0faeac0742909136618438 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import os
import sys
import cv2
import math
import yaml
import pickle
import imageio
import numpy as np
from tqdm import tqdm
from skimage import img_as_ubyte
from skimage.transform import resize
from scipy.spatial import ConvexHull
import paddle
from ppgan.utils.download import get_path_from_url
from ppgan.utils.animate import normalize_kp
from ppgan.modules.keypoint_detector import KPDetector
from ppgan.models.generators.occlusion_aware import OcclusionAwareGenerator
from ppgan.faceutils import face_detection
from .base_predictor import BasePredictor
class FirstOrderPredictor(BasePredictor):
def __init__(self,
output='output',
weight_path=None,
config=None,
relative=False,
adapt_scale=False,
find_best_frame=False,
best_frame=None,
ratio=1.0,
filename='result.mp4'):
if config is not None and isinstance(config, str):
self.cfg = yaml.load(config, Loader=yaml.SafeLoader)
elif isinstance(config, dict):
self.cfg = config
elif config is None:
self.cfg = {
'model_params': {
'common_params': {
'num_kp': 10,
'num_channels': 3,
'estimate_jacobian': True
},
'kp_detector_params': {
'temperature': 0.1,
'block_expansion': 32,
'max_features': 1024,
'scale_factor': 0.25,
'num_blocks': 5
},
'generator_params': {
'block_expansion': 64,
'max_features': 512,
'num_down_blocks': 2,
'num_bottleneck_blocks': 6,
'estimate_occlusion_map': True,
'dense_motion_params': {
'block_expansion': 64,
'max_features': 1024,
'num_blocks': 5,
'scale_factor': 0.25
}
}
}
}
if weight_path is None:
vox_cpk_weight_url = 'https://paddlegan.bj.bcebos.com/applications/first_order_model/vox-cpk.pdparams'
weight_path = get_path_from_url(vox_cpk_weight_url)
self.weight_path = weight_path
if not os.path.exists(output):
os.makedirs(output)
self.output = output
self.filename = filename
self.relative = relative
self.adapt_scale = adapt_scale
self.find_best_frame = find_best_frame
self.best_frame = best_frame
self.ratio = ratio
self.generator, self.kp_detector = self.load_checkpoints(
self.cfg, self.weight_path)
def run(self, source_image, driving_video):
source_image = imageio.imread(source_image)
bboxes = self.extract_bbox(source_image.copy())
reader = imageio.get_reader(driving_video)
fps = reader.get_meta_data()['fps']
driving_video = []
try:
for im in reader:
driving_video.append(im)
except RuntimeError:
pass
reader.close()
driving_video = [
resize(frame, (256, 256))[..., :3] for frame in driving_video
]
results = []
for rec in bboxes:
face_image = source_image.copy()[rec[1]:rec[3], rec[0]:rec[2]]
face_image = resize(face_image, (256, 256))
if self.find_best_frame or self.best_frame is not None:
i = self.best_frame if self.best_frame is not None else self.find_best_frame_func(
source_image, driving_video)
print("Best frame: " + str(i))
driving_forward = driving_video[i:]
driving_backward = driving_video[:(i + 1)][::-1]
predictions_forward = self.make_animation(
face_image,
driving_forward,
self.generator,
self.kp_detector,
relative=self.relative,
adapt_movement_scale=self.adapt_scale)
predictions_backward = self.make_animation(
face_image,
driving_backward,
self.generator,
self.kp_detector,
relative=self.relative,
adapt_movement_scale=self.adapt_scale)
predictions = predictions_backward[::-1] + predictions_forward[
1:]
else:
predictions = self.make_animation(
face_image,
driving_video,
self.generator,
self.kp_detector,
relative=self.relative,
adapt_movement_scale=self.adapt_scale)
results.append({'rec': rec, 'predict': predictions})
out_frame = []
for i in range(len(driving_video)):
frame = source_image.copy()
for result in results:
x1, y1, x2, y2 = result['rec']
h = y2 - y1
w = x2 - x1
out = result['predict'][i] * 255.0
out = cv2.resize(out.astype(np.uint8), (x2 - x1, y2 - y1))
patch = np.zeros(frame.shape).astype('uint8')
patch[y1:y2, x1:x2] = out
mask = np.zeros(frame.shape[:2]).astype('uint8')
cx = int((x1 + x2) / 2)
cy = int((y1 + y2) / 2)
cv2.circle(mask, (cx, cy), math.ceil(h * self.ratio),
(255, 255, 255), -1, 8, 0)
frame = cv2.copyTo(patch, mask, frame)
out_frame.append(frame)
imageio.mimsave(os.path.join(self.output, self.filename),
[frame for frame in out_frame],
fps=fps)
def load_checkpoints(self, config, checkpoint_path):
generator = OcclusionAwareGenerator(
**config['model_params']['generator_params'],
**config['model_params']['common_params'])
kp_detector = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
checkpoint = paddle.load(self.weight_path)
generator.set_state_dict(checkpoint['generator'])
kp_detector.set_state_dict(checkpoint['kp_detector'])
generator.eval()
kp_detector.eval()
return generator, kp_detector
def make_animation(self,
source_image,
driving_video,
generator,
kp_detector,
relative=True,
adapt_movement_scale=True):
with paddle.no_grad():
predictions = []
source = paddle.to_tensor(source_image[np.newaxis].astype(
np.float32)).transpose([0, 3, 1, 2])
driving = paddle.to_tensor(
np.array(driving_video)[np.newaxis].astype(
np.float32)).transpose([0, 4, 1, 2, 3])
kp_source = kp_detector(source)
kp_driving_initial = kp_detector(driving[:, :, 0])
for frame_idx in tqdm(range(driving.shape[2])):
driving_frame = driving[:, :, frame_idx]
kp_driving = kp_detector(driving_frame)
kp_norm = normalize_kp(
kp_source=kp_source,
kp_driving=kp_driving,
kp_driving_initial=kp_driving_initial,
use_relative_movement=relative,
use_relative_jacobian=relative,
adapt_movement_scale=adapt_movement_scale)
out = generator(source, kp_source=kp_source, kp_driving=kp_norm)
predictions.append(
np.transpose(out['prediction'].numpy(), [0, 2, 3, 1])[0])
return predictions
def find_best_frame_func(self, source, driving):
import face_alignment
def normalize_kp(kp):
kp = kp - kp.mean(axis=0, keepdims=True)
area = ConvexHull(kp[:, :2]).volume
area = np.sqrt(area)
kp[:, :2] = kp[:, :2] / area
return kp
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D,
flip_input=True)
kp_source = fa.get_landmarks(255 * source)[0]
kp_source = normalize_kp(kp_source)
norm = float('inf')
frame_num = 0
for i, image in tqdm(enumerate(driving)):
kp_driving = fa.get_landmarks(255 * image)[0]
kp_driving = normalize_kp(kp_driving)
new_norm = (np.abs(kp_source - kp_driving)**2).sum()
if new_norm < norm:
norm = new_norm
frame_num = i
return frame_num
def extract_bbox(self, image):
detector = face_detection.FaceAlignment(
face_detection.LandmarksType._2D, flip_input=False)
frame = [image]
predictions = detector.get_detections_for_image(np.array(frame))
results = []
h, w, _ = image.shape
for rect in predictions:
bh = rect[3] - rect[1]
bw = rect[2] - rect[0]
cy = rect[1] + int(bh / 2)
cx = rect[0] + int(bw / 2)
margin = max(bh, bw)
y1 = max(0, cy - margin)
x1 = max(0, cx - margin)
y2 = min(h, cy + margin)
x2 = min(w, cx + margin)
results.append([x1, y1, x2, y2])
boxes = np.array(results)
return boxes
| 37.978495 | 118 | 0.534919 |
79430039b031380e8681e2a6865f8bf005298ea6 | 5,104 | py | Python | TrainingInterfaces/Text_to_Spectrogram/FastSpeech2/PitchCalculator_Dio.py | Adamantcat/IMS-Toucan | 1ae02026a2a3233aaacc9d3a63d391918a2581e8 | [
"Apache-2.0"
] | null | null | null | TrainingInterfaces/Text_to_Spectrogram/FastSpeech2/PitchCalculator_Dio.py | Adamantcat/IMS-Toucan | 1ae02026a2a3233aaacc9d3a63d391918a2581e8 | [
"Apache-2.0"
] | null | null | null | TrainingInterfaces/Text_to_Spectrogram/FastSpeech2/PitchCalculator_Dio.py | Adamantcat/IMS-Toucan | 1ae02026a2a3233aaacc9d3a63d391918a2581e8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
# Adapted by Florian Lux 2021
import numpy as np
import pyworld
import torch
import torch.nn.functional as F
from scipy.interpolate import interp1d
from Utility.utils import pad_list
class Dio(torch.nn.Module):
"""
F0 estimation with dio + stonemask algortihm.
This is f0 extractor based on dio + stonemask algorithm
introduced in https://doi.org/10.1587/transinf.2015EDP7457
"""
def __init__(self, fs=16000, n_fft=1024, hop_length=256, f0min=40, f0max=400, use_token_averaged_f0=True,
use_continuous_f0=False, use_log_f0=False, reduction_factor=1):
super().__init__()
self.fs = fs
self.n_fft = n_fft
self.hop_length = hop_length
self.frame_period = 1000 * hop_length / fs
self.f0min = f0min
self.f0max = f0max
self.use_token_averaged_f0 = use_token_averaged_f0
self.use_continuous_f0 = use_continuous_f0
self.use_log_f0 = use_log_f0
if use_token_averaged_f0:
assert reduction_factor >= 1
self.reduction_factor = reduction_factor
def output_size(self):
return 1
def get_parameters(self):
return dict(fs=self.fs, n_fft=self.n_fft, hop_length=self.hop_length, f0min=self.f0min, f0max=self.f0max,
use_token_averaged_f0=self.use_token_averaged_f0, use_continuous_f0=self.use_continuous_f0, use_log_f0=self.use_log_f0,
reduction_factor=self.reduction_factor)
def forward(self, input_waves, input_waves_lengths=None, feats_lengths=None, durations=None,
durations_lengths=None, norm_by_average=True, text=None):
# If not provided, we assume that the inputs have the same length
if input_waves_lengths is None:
input_waves_lengths = (input_waves.new_ones(input_waves.shape[0], dtype=torch.long) * input_waves.shape[1])
# F0 extraction
pitch = [self._calculate_f0(x[:xl]) for x, xl in zip(input_waves, input_waves_lengths)]
# (Optional): Adjust length to match with the mel-spectrogram
if feats_lengths is not None:
pitch = [self._adjust_num_frames(p, fl).view(-1) for p, fl in zip(pitch, feats_lengths)]
# (Optional): Average by duration to calculate token-wise f0
if self.use_token_averaged_f0:
pitch = [self._average_by_duration(p, d, text).view(-1) for p, d in zip(pitch, durations)]
pitch_lengths = durations_lengths
else:
pitch_lengths = input_waves.new_tensor([len(p) for p in pitch], dtype=torch.long)
# Padding
pitch = pad_list(pitch, 0.0)
# Return with the shape (B, T, 1)
if norm_by_average:
average = pitch[0][pitch[0] != 0.0].mean()
pitch = pitch / average
return pitch.unsqueeze(-1), pitch_lengths
def _calculate_f0(self, input):
x = input.cpu().numpy().astype(np.double)
f0, timeaxis = pyworld.dio(x, self.fs, f0_floor=self.f0min, f0_ceil=self.f0max, frame_period=self.frame_period)
f0 = pyworld.stonemask(x, f0, timeaxis, self.fs)
if self.use_continuous_f0:
f0 = self._convert_to_continuous_f0(f0)
if self.use_log_f0:
nonzero_idxs = np.where(f0 != 0)[0]
f0[nonzero_idxs] = np.log(f0[nonzero_idxs])
return input.new_tensor(f0.reshape(-1), dtype=torch.float)
@staticmethod
def _adjust_num_frames(x, num_frames):
if num_frames > len(x):
x = F.pad(x, (0, num_frames - len(x)))
elif num_frames < len(x):
x = x[:num_frames]
return x
@staticmethod
def _convert_to_continuous_f0(f0: np.array):
if (f0 == 0).all():
return f0
# padding start and end of f0 sequence
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
# get non-zero frame index
nonzero_idxs = np.where(f0 != 0)[0]
# perform linear interpolation
interp_fn = interp1d(nonzero_idxs, f0[nonzero_idxs])
f0 = interp_fn(np.arange(0, f0.shape[0]))
return f0
def _average_by_duration(self, x, d, text=None):
assert 0 <= len(x) - d.sum() < self.reduction_factor
d_cumsum = F.pad(d.cumsum(dim=0), (1, 0))
x_avg = [
x[start:end].masked_select(x[start:end].gt(0.0)).mean(dim=0) if len(x[start:end].masked_select(x[start:end].gt(0.0))) != 0 else x.new_tensor(0.0)
for start, end in zip(d_cumsum[:-1], d_cumsum[1:])]
# find tokens that are not phones and set pitch to 0
if text is not None:
for i, vector in enumerate(text):
if vector[13] == 0:
# idx 13 corresponds to 'phoneme' feature
x_avg[i] = torch.tensor(0.0)
return torch.stack(x_avg) | 39.261538 | 157 | 0.628918 |
7943003f99a102cdaff2f79aee8adfe9f278399e | 1,599 | py | Python | google-cloud-clients/google-cloud-kms/synth.py | teeraporn39/google-cloud-java | 0e2d6f25f6b5dcf3be71cced913fc174e6c46cb0 | [
"Apache-2.0"
] | 2 | 2019-08-25T15:16:57.000Z | 2019-08-25T15:17:04.000Z | google-cloud-clients/google-cloud-kms/synth.py | teeraporn39/google-cloud-java | 0e2d6f25f6b5dcf3be71cced913fc174e6c46cb0 | [
"Apache-2.0"
] | 3 | 2019-05-22T14:12:27.000Z | 2019-07-09T14:16:23.000Z | google-cloud-clients/google-cloud-kms/synth.py | darienesf/google-cloud-java | d66c22b2ee15453755307249c0e88c79cb20686f | [
"Apache-2.0"
] | 1 | 2019-02-01T22:12:05.000Z | 2019-02-01T22:12:05.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.java as java
gapic = gcp.GAPICGenerator()
service = 'kms'
versions = ['v1']
config_pattern = '/google/cloud/kms/artman_cloudkms.yaml'
for version in versions:
library = gapic.java_library(
service=service,
version=version,
config_path=config_pattern.format(version=version),
artman_output_name='')
s.copy(library / f'gapic-google-cloud-{service}-{version}/src', 'src')
s.copy(library / f'grpc-google-cloud-{service}-{version}/src', f'../../google-api-grpc/grpc-google-cloud-{service}-{version}/src')
s.copy(library / f'proto-google-cloud-{service}-{version}/src', f'../../google-api-grpc/proto-google-cloud-{service}-{version}/src')
java.format_code('./src')
java.format_code(f'../../google-api-grpc/grpc-google-cloud-{service}-{version}/src')
java.format_code(f'../../google-api-grpc/proto-google-cloud-{service}-{version}/src')
| 39 | 134 | 0.729206 |
7943014e2dcdec75f68768fc0dfbbff1e9abd8df | 3,975 | py | Python | yt_dlp/postprocessor/sponsorblock.py | jeroenj/yt-dlp | 11078c6d571673a0f09e21933f4ad1e6fcc35456 | [
"Unlicense"
] | 1 | 2022-03-26T15:43:50.000Z | 2022-03-26T15:43:50.000Z | yt_dlp/postprocessor/sponsorblock.py | jeroenj/yt-dlp | 11078c6d571673a0f09e21933f4ad1e6fcc35456 | [
"Unlicense"
] | null | null | null | yt_dlp/postprocessor/sponsorblock.py | jeroenj/yt-dlp | 11078c6d571673a0f09e21933f4ad1e6fcc35456 | [
"Unlicense"
] | 2 | 2022-01-10T08:38:40.000Z | 2022-01-25T11:32:14.000Z | from hashlib import sha256
import json
import re
from .ffmpeg import FFmpegPostProcessor
from ..compat import compat_urllib_parse_urlencode
class SponsorBlockPP(FFmpegPostProcessor):
# https://wiki.sponsor.ajay.app/w/Types
EXTRACTORS = {
'Youtube': 'YouTube',
}
POI_CATEGORIES = {
'poi_highlight': 'Highlight',
}
CATEGORIES = {
'sponsor': 'Sponsor',
'intro': 'Intermission/Intro Animation',
'outro': 'Endcards/Credits',
'selfpromo': 'Unpaid/Self Promotion',
'preview': 'Preview/Recap',
'filler': 'Filler Tangent',
'interaction': 'Interaction Reminder',
'music_offtopic': 'Non-Music Section',
**POI_CATEGORIES,
}
def __init__(self, downloader, categories=None, api='https://sponsor.ajay.app'):
FFmpegPostProcessor.__init__(self, downloader)
self._categories = tuple(categories or self.CATEGORIES.keys())
self._API_URL = api if re.match('^https?://', api) else 'https://' + api
def run(self, info):
extractor = info['extractor_key']
if extractor not in self.EXTRACTORS:
self.to_screen(f'SponsorBlock is not supported for {extractor}')
return [], info
self.to_screen('Fetching SponsorBlock segments')
info['sponsorblock_chapters'] = self._get_sponsor_chapters(info, info['duration'])
return [], info
def _get_sponsor_chapters(self, info, duration):
segments = self._get_sponsor_segments(info['id'], self.EXTRACTORS[info['extractor_key']])
def duration_filter(s):
start_end = s['segment']
# Ignore entire video segments (https://wiki.sponsor.ajay.app/w/Types).
if start_end == (0, 0):
return False
# Ignore milliseconds difference at the start.
if start_end[0] <= 1:
start_end[0] = 0
# Make POI chapters 1 sec so that we can properly mark them
if s['category'] in self.POI_CATEGORIES.keys():
start_end[1] += 1
# Ignore milliseconds difference at the end.
# Never allow the segment to exceed the video.
if duration and duration - start_end[1] <= 1:
start_end[1] = duration
# SponsorBlock duration may be absent or it may deviate from the real one.
return s['videoDuration'] == 0 or not duration or abs(duration - s['videoDuration']) <= 1
duration_match = [s for s in segments if duration_filter(s)]
if len(duration_match) != len(segments):
self.report_warning('Some SponsorBlock segments are from a video of different duration, maybe from an old version of this video')
def to_chapter(s):
(start, end), cat = s['segment'], s['category']
return {
'start_time': start,
'end_time': end,
'category': cat,
'title': self.CATEGORIES[cat],
'_categories': [(cat, start, end)]
}
sponsor_chapters = [to_chapter(s) for s in duration_match]
if not sponsor_chapters:
self.to_screen('No segments were found in the SponsorBlock database')
else:
self.to_screen(f'Found {len(sponsor_chapters)} segments in the SponsorBlock database')
return sponsor_chapters
def _get_sponsor_segments(self, video_id, service):
hash = sha256(video_id.encode('ascii')).hexdigest()
# SponsorBlock API recommends using first 4 hash characters.
url = f'{self._API_URL}/api/skipSegments/{hash[:4]}?' + compat_urllib_parse_urlencode({
'service': service,
'categories': json.dumps(self._categories),
'actionTypes': json.dumps(['skip', 'poi'])
})
for d in self._download_json(url) or []:
if d['videoID'] == video_id:
return d['segments']
return []
| 40.561224 | 141 | 0.604025 |
794301ac1ca5014d4568b7fd43d98acccf7caafe | 4,129 | py | Python | tests/scripts/test_buddy_cli.py | brentyi/hfdsajk | 2888aa5d969824ac1e1a528264674ece3f4703f9 | [
"MIT"
] | 5 | 2020-03-13T21:34:31.000Z | 2020-10-27T15:18:17.000Z | tests/scripts/test_buddy_cli.py | brentyi/hfdsajk | 2888aa5d969824ac1e1a528264674ece3f4703f9 | [
"MIT"
] | 2 | 2020-06-17T11:06:56.000Z | 2020-10-25T03:06:18.000Z | tests/scripts/test_buddy_cli.py | brentyi/hfdsajk | 2888aa5d969824ac1e1a528264674ece3f4703f9 | [
"MIT"
] | 4 | 2020-03-15T01:55:18.000Z | 2022-01-21T22:06:48.000Z | import os
import subprocess
from typing import List, Tuple, Union
import torch
import fannypack
def _run_command(command: Union[str, List[str]]) -> Tuple[str, str, int]:
"""Helper for running a command & returning results."""
proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=os.path.join(os.path.dirname(__file__), "../assets"),
)
out, err = proc.communicate()
def convert(x: Union[str, bytes]) -> str:
if isinstance(x, bytes):
return x.decode("utf8")
else:
return x
return convert(out), convert(err), proc.returncode
def test_buddy_no_args():
"""Make sure that `buddy` fails without arguments."""
out, err, exitcode = _run_command(["buddy"])
assert exitcode == 2
# Out of scope: testing for interactive features
#
# def test_buddy_delete_no_args():
# """Make sure that `buddy delete` with no arguments shows a menu."""
# out, err, exitcode = _run_command(["buddy", "delete"])
# assert "Navigate: j/k" in out
# assert "Select: <CR>" in out
# assert exitcode == 1
def test_buddy_info_no_args():
"""Make sure that `buddy info` fails without arguments."""
out, err, exitcode = _run_command(["buddy", "info"])
assert exitcode == 2
def test_buddy_list():
"""Check that we can list experiments."""
out, err, exitcode = _run_command(["buddy", "list"])
assert exitcode == 0
assert out.startswith("Found 2 experiments")
assert out
def test_buddy_rename_no_args():
"""Make sure that `buddy rename` fails without arguments."""
out, err, exitcode = _run_command(["buddy", "rename"])
assert exitcode == 2
def test_buddy_info():
"""Make sure that `buddy info` gives us sane results."""
out, err, exitcode = _run_command(["buddy", "info", "simple_net"])
assert exitcode == 0
assert "(steps: 200)" in out
def test_buddy_rename():
"""Make sure that we can rename experiments."""
# Pre-condition
out, err, exitcode = _run_command(["buddy", "list"])
assert exitcode == 0
assert out.startswith("Found 2 experiments")
assert "simple_net" in out
# Rename experiment
out, err, exitcode = _run_command(["buddy", "rename", "simple_net", "blah"])
assert exitcode == 0
# Post-condition
out, err, exitcode = _run_command(["buddy", "list"])
assert exitcode == 0
assert out.startswith("Found 2 experiments")
assert "simple_net" not in out
# Revert changes
out, err, exitcode = _run_command(["buddy", "rename", "blah", "simple_net"])
assert exitcode == 0
def test_buddy_delete():
"""Make sure that we can delete experiments."""
# Create experiment
buddy = fannypack.utils.Buddy(
"temporary_net",
model=torch.nn.Linear(10, 20),
# Use directories relative to this fixture
checkpoint_dir=os.path.join(
os.path.dirname(__file__), "../assets/checkpoints/"
),
metadata_dir=os.path.join(os.path.dirname(__file__), "../assets/metadata/"),
log_dir=os.path.join(os.path.dirname(__file__), "../assets/logs/"),
verbose=True,
# Disable auto-checkpointing
optimizer_checkpoint_interval=0,
cpu_only=True,
)
# Pre-condition
out, err, exitcode = _run_command(["buddy", "list"])
assert exitcode == 0
assert out.startswith("Found 2 experiments")
assert "temporary_net" not in out
# Save some files
buddy.add_metadata({"blah": "blah"})
buddy.save_checkpoint()
# Pre-condition
out, err, exitcode = _run_command(["buddy", "list"])
assert exitcode == 0
assert out.startswith("Found 3 experiments")
assert "temporary_net" in out
# Delete buddy
del buddy
# Delete experiment
out, err, exitcode = _run_command(["buddy", "delete", "temporary_net", "--forever"])
assert exitcode == 0
# Post-condition
out, err, exitcode = _run_command(["buddy", "list"])
assert exitcode == 0
assert out.startswith("Found 2 experiments")
assert "temporary_net" not in out
| 28.874126 | 88 | 0.640833 |
7943020f42d8b4148af3265f66df696448585bc0 | 2,242 | py | Python | testproj/testproj/settings/env.py | pivotal-energy-solutions/django-input-collection | cc2ce3e0a7104ba9c524eaba5706da94ddb04a5f | [
"Apache-2.0"
] | null | null | null | testproj/testproj/settings/env.py | pivotal-energy-solutions/django-input-collection | cc2ce3e0a7104ba9c524eaba5706da94ddb04a5f | [
"Apache-2.0"
] | 4 | 2019-08-25T15:47:24.000Z | 2022-03-24T19:35:09.000Z | testproj/testproj/settings/env.py | pivotal-energy-solutions/django-input-collection | cc2ce3e0a7104ba9c524eaba5706da94ddb04a5f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import warnings
from django.core.exceptions import ImproperlyConfigured
# BASE_DIR is the project root (i.e., where manage.py lives)
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../.."))
# Default .env location (override with environment variable PROJECT_ENV_FILE)
ENV_FILE = os.path.abspath(os.path.join(BASE_DIR, ".env"))
ENV_DATA = {} # Loaded env data
class UNSET(object):
pass
def get_variable(var_name, default=UNSET):
"""Read the given variable name from the environment or the designated .env file."""
if var_name in os.environ:
return os.environ[var_name]
env_path = _load_file()
if var_name in ENV_DATA:
return ENV_DATA[var_name]
elif default is not UNSET:
return default
raise ImproperlyConfigured(
"Provide a default, set the env variable {var!r}, or place it in the an env file "
"(currently: {env_path!r}) as: {var}='VALUE'".format(
var=var_name,
env_path=env_path,
)
)
def _load_file():
"""
Loads the environment's env or the default env into a cache. Returns early if the target file
doesn't exist or if an env has already loaded.
Returns the path to the env file that was loaded, or else None if the file didn't exist.
"""
env_path = os.environ.get("PROJECT_ENV_FILE", ENV_FILE)
env_path = os.path.abspath(env_path)
if not os.path.exists(env_path):
# warnings.warn("Invalid env path specified: {!r}".format(env_path))
return env_path
elif ENV_DATA: # Already loaded
return env_path
# Drop existing data and reload
ENV_DATA.clear()
with open(env_path) as f:
try:
for i, line in enumerate(f):
exec(line, {}, ENV_DATA)
except Exception as e:
raise ImproperlyConfigured(
'Error evaluating "{env_path}", line {line}'
"\n{exception_type}: {exception}".format(
env_path=env_path,
line=i + 1,
exception_type=e.__class__.__name__,
exception=e,
)
)
return env_path
| 29.5 | 97 | 0.620874 |
79430320a31acc5f3d718cc1c5043ca2cd21c60d | 748 | py | Python | src/functions.py | pratyuksh/xtDgWave | 4b3bf60fe5f974a9d166f3553c64c8ee4f1721d9 | [
"MIT"
] | null | null | null | src/functions.py | pratyuksh/xtDgWave | 4b3bf60fe5f974a9d166f3553c64c8ee4f1721d9 | [
"MIT"
] | null | null | null | src/functions.py | pratyuksh/xtDgWave | 4b3bf60fe5f974a9d166f3553c64c8ee4f1721d9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import src.legendre as legendre
# uniform time series: h_t = T/ne_t
def get_uniform_time_series(T, ne_t):
tMesh = np.zeros(ne_t + 1)
for k in range(0, ne_t + 1):
tMesh[k] = k * T / ne_t
return tMesh
# Affine mapping, reference interval (0,1) to _specified_ interval
def affineMap(a, b, xi):
z = a * (1 - xi) + b * xi
return z
# Build solution
def getSol(p, u, xi):
val = 0
for i in range(0, p + 1):
val += u[i] * legendre.basis1d(i, xi)
return val
# Build solution gradient
def getSolGrad(p, u, xi):
val = 0
for i in range(0, p + 1):
val += u[i] * legendre.basis1d_deriv(i, xi)
return val
# End of file
| 19.179487 | 66 | 0.593583 |
79430417b2d0950ca4be67d14ce9dcf1e3ac7e2c | 42,834 | py | Python | mesonbuild/mtest.py | gerion0/meson | 48a5d17830820e4729d252fc33c26fb9dac66404 | [
"Apache-2.0"
] | null | null | null | mesonbuild/mtest.py | gerion0/meson | 48a5d17830820e4729d252fc33c26fb9dac66404 | [
"Apache-2.0"
] | null | null | null | mesonbuild/mtest.py | gerion0/meson | 48a5d17830820e4729d252fc33c26fb9dac66404 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A tool to run tests in many different ways.
from pathlib import Path
from collections import namedtuple
from copy import deepcopy
import argparse
import concurrent.futures as conc
import datetime
import enum
import io
import json
import multiprocessing
import os
import pickle
import platform
import random
import re
import signal
import subprocess
import sys
import tempfile
import time
import typing
from . import build
from . import environment
from . import mlog
from .dependencies import ExternalProgram
from .mesonlib import MesonException, get_wine_shortpath, split_args
if typing.TYPE_CHECKING:
from .backend.backends import TestSerialisation
# GNU autotools interprets a return code of 77 from tests it executes to
# mean that the test should be skipped.
GNU_SKIP_RETURNCODE = 77
# GNU autotools interprets a return code of 99 from tests it executes to
# mean that the test failed even before testing what it is supposed to test.
GNU_ERROR_RETURNCODE = 99
def is_windows() -> bool:
platname = platform.system().lower()
return platname == 'windows' or 'mingw' in platname
def is_cygwin() -> bool:
platname = platform.system().lower()
return 'cygwin' in platname
def determine_worker_count() -> int:
varname = 'MESON_TESTTHREADS'
if varname in os.environ:
try:
num_workers = int(os.environ[varname])
except ValueError:
print('Invalid value in %s, using 1 thread.' % varname)
num_workers = 1
else:
try:
# Fails in some weird environments such as Debian
# reproducible build.
num_workers = multiprocessing.cpu_count()
except Exception:
num_workers = 1
return num_workers
def add_arguments(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--repeat', default=1, dest='repeat', type=int,
help='Number of times to run the tests.')
parser.add_argument('--no-rebuild', default=False, action='store_true',
help='Do not rebuild before running tests.')
parser.add_argument('--gdb', default=False, dest='gdb', action='store_true',
help='Run test under gdb.')
parser.add_argument('--list', default=False, dest='list', action='store_true',
help='List available tests.')
parser.add_argument('--wrapper', default=None, dest='wrapper', type=split_args,
help='wrapper to run tests with (e.g. Valgrind)')
parser.add_argument('-C', default='.', dest='wd',
help='directory to cd into before running')
parser.add_argument('--suite', default=[], dest='include_suites', action='append', metavar='SUITE',
help='Only run tests belonging to the given suite.')
parser.add_argument('--no-suite', default=[], dest='exclude_suites', action='append', metavar='SUITE',
help='Do not run tests belonging to the given suite.')
parser.add_argument('--no-stdsplit', default=True, dest='split', action='store_false',
help='Do not split stderr and stdout in test logs.')
parser.add_argument('--print-errorlogs', default=False, action='store_true',
help="Whether to print failing tests' logs.")
parser.add_argument('--benchmark', default=False, action='store_true',
help="Run benchmarks instead of tests.")
parser.add_argument('--logbase', default='testlog',
help="Base name for log file.")
parser.add_argument('--num-processes', default=determine_worker_count(), type=int,
help='How many parallel processes to use.')
parser.add_argument('-v', '--verbose', default=False, action='store_true',
help='Do not redirect stdout and stderr')
parser.add_argument('-q', '--quiet', default=False, action='store_true',
help='Produce less output to the terminal.')
parser.add_argument('-t', '--timeout-multiplier', type=float, default=None,
help='Define a multiplier for test timeout, for example '
' when running tests in particular conditions they might take'
' more time to execute.')
parser.add_argument('--setup', default=None, dest='setup',
help='Which test setup to use.')
parser.add_argument('--test-args', default=[], type=split_args,
help='Arguments to pass to the specified test(s) or all tests')
parser.add_argument('args', nargs='*',
help='Optional list of tests to run')
def returncode_to_status(retcode: int) -> str:
# Note: We can't use `os.WIFSIGNALED(result.returncode)` and the related
# functions here because the status returned by subprocess is munged. It
# returns a negative value if the process was killed by a signal rather than
# the raw status returned by `wait()`. Also, If a shell sits between Meson
# the the actual unit test that shell is likely to convert a termination due
# to a signal into an exit status of 128 plus the signal number.
if retcode < 0:
signum = -retcode
try:
signame = signal.Signals(signum).name
except ValueError:
signame = 'SIGinvalid'
return '(killed by signal %d %s)' % (signum, signame)
if retcode <= 128:
return '(exit status %d)' % (retcode,)
signum = retcode - 128
try:
signame = signal.Signals(signum).name
except ValueError:
signame = 'SIGinvalid'
return '(exit status %d or signal %d %s)' % (retcode, signum, signame)
def env_tuple_to_str(env: typing.Iterable[typing.Tuple[str, str]]) -> str:
return ''.join(["%s='%s' " % (k, v) for k, v in env])
class TestException(MesonException):
pass
@enum.unique
class TestResult(enum.Enum):
OK = 'OK'
TIMEOUT = 'TIMEOUT'
SKIP = 'SKIP'
FAIL = 'FAIL'
EXPECTEDFAIL = 'EXPECTEDFAIL'
UNEXPECTEDPASS = 'UNEXPECTEDPASS'
ERROR = 'ERROR'
class TAPParser:
Plan = namedtuple('Plan', ['count', 'late', 'skipped', 'explanation'])
Bailout = namedtuple('Bailout', ['message'])
Test = namedtuple('Test', ['number', 'name', 'result', 'explanation'])
Error = namedtuple('Error', ['message'])
Version = namedtuple('Version', ['version'])
_MAIN = 1
_AFTER_TEST = 2
_YAML = 3
_RE_BAILOUT = re.compile(r'Bail out!\s*(.*)')
_RE_DIRECTIVE = re.compile(r'(?:\s*\#\s*([Ss][Kk][Ii][Pp]\S*|[Tt][Oo][Dd][Oo])\b\s*(.*))?')
_RE_PLAN = re.compile(r'1\.\.([0-9]+)' + _RE_DIRECTIVE.pattern)
_RE_TEST = re.compile(r'((?:not )?ok)\s*(?:([0-9]+)\s*)?([^#]*)' + _RE_DIRECTIVE.pattern)
_RE_VERSION = re.compile(r'TAP version ([0-9]+)')
_RE_YAML_START = re.compile(r'(\s+)---.*')
_RE_YAML_END = re.compile(r'\s+\.\.\.\s*')
def __init__(self, io: typing.Iterator[str]):
self.io = io
def parse_test(self, ok: bool, num: int, name: str, directive: typing.Optional[str], explanation: typing.Optional[str]) -> \
typing.Generator[typing.Union['TAPParser.Test', 'TAPParser.Error'], None, None]:
name = name.strip()
explanation = explanation.strip() if explanation else None
if directive is not None:
directive = directive.upper()
if directive == 'SKIP':
if ok:
yield self.Test(num, name, TestResult.SKIP, explanation)
return
elif directive == 'TODO':
yield self.Test(num, name, TestResult.UNEXPECTEDPASS if ok else TestResult.EXPECTEDFAIL, explanation)
return
else:
yield self.Error('invalid directive "%s"' % (directive,))
yield self.Test(num, name, TestResult.OK if ok else TestResult.FAIL, explanation)
def parse(self) -> typing.Generator[typing.Union['TAPParser.Test', 'TAPParser.Error', 'TAPParser.Version', 'TAPParser.Plan', 'TAPParser.Bailout'], None, None]:
found_late_test = False
bailed_out = False
plan = None
lineno = 0
num_tests = 0
yaml_lineno = None
yaml_indent = ''
state = self._MAIN
version = 12
while True:
lineno += 1
try:
line = next(self.io).rstrip()
except StopIteration:
break
# YAML blocks are only accepted after a test
if state == self._AFTER_TEST:
if version >= 13:
m = self._RE_YAML_START.match(line)
if m:
state = self._YAML
yaml_lineno = lineno
yaml_indent = m.group(1)
continue
state = self._MAIN
elif state == self._YAML:
if self._RE_YAML_END.match(line):
state = self._MAIN
continue
if line.startswith(yaml_indent):
continue
yield self.Error('YAML block not terminated (started on line {})'.format(yaml_lineno))
state = self._MAIN
assert state == self._MAIN
if line.startswith('#'):
continue
m = self._RE_TEST.match(line)
if m:
if plan and plan.late and not found_late_test:
yield self.Error('unexpected test after late plan')
found_late_test = True
num_tests += 1
num = num_tests if m.group(2) is None else int(m.group(2))
if num != num_tests:
yield self.Error('out of order test numbers')
yield from self.parse_test(m.group(1) == 'ok', num,
m.group(3), m.group(4), m.group(5))
state = self._AFTER_TEST
continue
m = self._RE_PLAN.match(line)
if m:
if plan:
yield self.Error('more than one plan found')
else:
count = int(m.group(1))
skipped = (count == 0)
if m.group(2):
if m.group(2).upper().startswith('SKIP'):
if count > 0:
yield self.Error('invalid SKIP directive for plan')
skipped = True
else:
yield self.Error('invalid directive for plan')
plan = self.Plan(count=count, late=(num_tests > 0),
skipped=skipped, explanation=m.group(3))
yield plan
continue
m = self._RE_BAILOUT.match(line)
if m:
yield self.Bailout(m.group(1))
bailed_out = True
continue
m = self._RE_VERSION.match(line)
if m:
# The TAP version is only accepted as the first line
if lineno != 1:
yield self.Error('version number must be on the first line')
continue
version = int(m.group(1))
if version < 13:
yield self.Error('version number should be at least 13')
else:
yield self.Version(version=version)
continue
if len(line) == 0:
continue
yield self.Error('unexpected input at line %d' % (lineno,))
if state == self._YAML:
yield self.Error('YAML block not terminated (started on line {})'.format(yaml_lineno))
if not bailed_out and plan and num_tests != plan.count:
if num_tests < plan.count:
yield self.Error('Too few tests run (expected %d, got %d)' % (plan.count, num_tests))
else:
yield self.Error('Too many tests run (expected %d, got %d)' % (plan.count, num_tests))
class TestRun:
@classmethod
def make_exitcode(cls, test: 'TestSerialisation', test_env: typing.Dict[str, str],
returncode: int, duration: float, stdo: typing.Optional[str],
stde: typing.Optional[str],
cmd: typing.Optional[typing.List[str]]) -> 'TestRun':
if returncode == GNU_SKIP_RETURNCODE:
res = TestResult.SKIP
elif returncode == GNU_ERROR_RETURNCODE:
res = TestResult.ERROR
elif test.should_fail:
res = TestResult.EXPECTEDFAIL if bool(returncode) else TestResult.UNEXPECTEDPASS
else:
res = TestResult.FAIL if bool(returncode) else TestResult.OK
return cls(test, test_env, res, returncode, duration, stdo, stde, cmd)
@classmethod
def make_tap(cls, test: 'TestSerialisation', test_env: typing.Dict[str, str],
returncode: int, duration: float, stdo: str, stde: str,
cmd: typing.Optional[typing.List[str]]) -> 'TestRun':
res = None
num_tests = 0
failed = False
num_skipped = 0
for i in TAPParser(io.StringIO(stdo)).parse():
if isinstance(i, TAPParser.Bailout):
res = TestResult.ERROR
elif isinstance(i, TAPParser.Test):
if i.result == TestResult.SKIP:
num_skipped += 1
elif i.result in (TestResult.FAIL, TestResult.UNEXPECTEDPASS):
failed = True
num_tests += 1
elif isinstance(i, TAPParser.Error):
res = TestResult.ERROR
stde += '\nTAP parsing error: ' + i.message
if returncode != 0:
res = TestResult.ERROR
stde += '\n(test program exited with status code %d)' % (returncode,)
if res is None:
# Now determine the overall result of the test based on the outcome of the subcases
if num_skipped == num_tests:
# This includes the case where num_tests is zero
res = TestResult.SKIP
elif test.should_fail:
res = TestResult.EXPECTEDFAIL if failed else TestResult.UNEXPECTEDPASS
else:
res = TestResult.FAIL if failed else TestResult.OK
return cls(test, test_env, res, returncode, duration, stdo, stde, cmd)
def __init__(self, test: 'TestSerialisation', test_env: typing.Dict[str, str],
res: TestResult, returncode: int, duration: float,
stdo: typing.Optional[str], stde: typing.Optional[str],
cmd: typing.Optional[typing.List[str]]):
assert isinstance(res, TestResult)
self.res = res
self.returncode = returncode
self.duration = duration
self.stdo = stdo
self.stde = stde
self.cmd = cmd
self.env = test_env
self.should_fail = test.should_fail
def get_log(self) -> str:
res = '--- command ---\n'
if self.cmd is None:
res += 'NONE\n'
else:
test_only_env = set(self.env.items()) - set(os.environ.items())
res += '{}{}\n'.format(env_tuple_to_str(test_only_env), ' '.join(self.cmd))
if self.stdo:
res += '--- stdout ---\n'
res += self.stdo
if self.stde:
if res[-1:] != '\n':
res += '\n'
res += '--- stderr ---\n'
res += self.stde
if res[-1:] != '\n':
res += '\n'
res += '-------\n\n'
return res
def decode(stream: typing.Union[None, bytes]) -> str:
if stream is None:
return ''
try:
return stream.decode('utf-8')
except UnicodeDecodeError:
return stream.decode('iso-8859-1', errors='ignore')
def write_json_log(jsonlogfile: typing.TextIO, test_name: str, result: TestRun) -> None:
jresult = {'name': test_name,
'stdout': result.stdo,
'result': result.res.value,
'duration': result.duration,
'returncode': result.returncode,
'env': result.env,
'command': result.cmd} # type: typing.Dict[str, typing.Any]
if result.stde:
jresult['stderr'] = result.stde
jsonlogfile.write(json.dumps(jresult) + '\n')
def run_with_mono(fname: str) -> bool:
if fname.endswith('.exe') and not (is_windows() or is_cygwin()):
return True
return False
def load_benchmarks(build_dir: str) -> typing.List['TestSerialisation']:
datafile = Path(build_dir) / 'meson-private' / 'meson_benchmark_setup.dat'
if not datafile.is_file():
raise TestException('Directory {!r} does not seem to be a Meson build directory.'.format(build_dir))
with datafile.open('rb') as f:
obj = typing.cast(typing.List['TestSerialisation'], pickle.load(f))
return obj
def load_tests(build_dir: str) -> typing.List['TestSerialisation']:
datafile = Path(build_dir) / 'meson-private' / 'meson_test_setup.dat'
if not datafile.is_file():
raise TestException('Directory {!r} does not seem to be a Meson build directory.'.format(build_dir))
with datafile.open('rb') as f:
obj = typing.cast(typing.List['TestSerialisation'], pickle.load(f))
return obj
class SingleTestRunner:
def __init__(self, test: 'TestSerialisation', test_env: typing.Dict[str, str],
env: typing.Dict[str, str], options: argparse.Namespace):
self.test = test
self.test_env = test_env
self.env = env
self.options = options
def _get_cmd(self) -> typing.Optional[typing.List[str]]:
if self.test.fname[0].endswith('.jar'):
return ['java', '-jar'] + self.test.fname
elif not self.test.is_cross_built and run_with_mono(self.test.fname[0]):
return ['mono'] + self.test.fname
else:
if self.test.is_cross_built and self.test.needs_exe_wrapper:
if self.test.exe_runner is None:
# Can not run test on cross compiled executable
# because there is no execute wrapper.
return None
else:
if not self.test.exe_runner.found():
msg = 'The exe_wrapper defined in the cross file {!r} was not ' \
'found. Please check the command and/or add it to PATH.'
raise TestException(msg.format(self.test.exe_runner.name))
return self.test.exe_runner.get_command() + self.test.fname
else:
return self.test.fname
def run(self) -> TestRun:
cmd = self._get_cmd()
if cmd is None:
skip_stdout = 'Not run because can not execute cross compiled binaries.'
return TestRun(self.test, self.test_env, TestResult.SKIP, GNU_SKIP_RETURNCODE, 0.0, skip_stdout, None, None)
else:
wrap = TestHarness.get_wrapper(self.options)
if self.options.gdb:
self.test.timeout = None
return self._run_cmd(wrap + cmd + self.test.cmd_args + self.options.test_args)
def _run_cmd(self, cmd: typing.List[str]) -> TestRun:
starttime = time.time()
if len(self.test.extra_paths) > 0:
self.env['PATH'] = os.pathsep.join(self.test.extra_paths + ['']) + self.env['PATH']
winecmd = []
for c in cmd:
winecmd.append(c)
if os.path.basename(c).startswith('wine'):
self.env['WINEPATH'] = get_wine_shortpath(
winecmd,
['Z:' + p for p in self.test.extra_paths] + self.env.get('WINEPATH', '').split(';')
)
break
# If MALLOC_PERTURB_ is not set, or if it is set to an empty value,
# (i.e., the test or the environment don't explicitly set it), set
# it ourselves. We do this unconditionally for regular tests
# because it is extremely useful to have.
# Setting MALLOC_PERTURB_="0" will completely disable this feature.
if ('MALLOC_PERTURB_' not in self.env or not self.env['MALLOC_PERTURB_']) and not self.options.benchmark:
self.env['MALLOC_PERTURB_'] = str(random.randint(1, 255))
stdout = None
stderr = None
if not self.options.verbose:
stdout = tempfile.TemporaryFile("wb+")
stderr = tempfile.TemporaryFile("wb+") if self.options.split else stdout
if self.test.protocol == 'tap' and stderr is stdout:
stdout = tempfile.TemporaryFile("wb+")
# Let gdb handle ^C instead of us
if self.options.gdb:
previous_sigint_handler = signal.getsignal(signal.SIGINT)
# Make the meson executable ignore SIGINT while gdb is running.
signal.signal(signal.SIGINT, signal.SIG_IGN)
def preexec_fn() -> None:
if self.options.gdb:
# Restore the SIGINT handler for the child process to
# ensure it can handle it.
signal.signal(signal.SIGINT, signal.SIG_DFL)
else:
# We don't want setsid() in gdb because gdb needs the
# terminal in order to handle ^C and not show tcsetpgrp()
# errors avoid not being able to use the terminal.
os.setsid()
p = subprocess.Popen(cmd,
stdout=stdout,
stderr=stderr,
env=self.env,
cwd=self.test.workdir,
preexec_fn=preexec_fn if not is_windows() else None)
timed_out = False
kill_test = False
if self.test.timeout is None:
timeout = None
elif self.options.timeout_multiplier is not None:
timeout = self.test.timeout * self.options.timeout_multiplier
else:
timeout = self.test.timeout
try:
p.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
if self.options.verbose:
print('{} time out (After {} seconds)'.format(self.test.name, timeout))
timed_out = True
except KeyboardInterrupt:
mlog.warning('CTRL-C detected while running %s' % (self.test.name))
kill_test = True
finally:
if self.options.gdb:
# Let us accept ^C again
signal.signal(signal.SIGINT, previous_sigint_handler)
additional_error = None
if kill_test or timed_out:
# Python does not provide multiplatform support for
# killing a process and all its children so we need
# to roll our own.
if is_windows():
subprocess.call(['taskkill', '/F', '/T', '/PID', str(p.pid)])
else:
try:
# Kill the process group that setsid() created.
os.killpg(p.pid, signal.SIGKILL)
except ProcessLookupError:
# Sometimes (e.g. with Wine) this happens.
# There's nothing we can do (maybe the process
# already died) so carry on.
pass
try:
p.communicate(timeout=1)
except subprocess.TimeoutExpired:
# An earlier kill attempt has not worked for whatever reason.
# Try to kill it one last time with a direct call.
# If the process has spawned children, they will remain around.
p.kill()
try:
p.communicate(timeout=1)
except subprocess.TimeoutExpired:
additional_error = 'Test process could not be killed.'
except ValueError:
additional_error = 'Could not read output. Maybe the process has redirected its stdout/stderr?'
endtime = time.time()
duration = endtime - starttime
if additional_error is None:
if stdout is None:
stdo = ''
else:
stdout.seek(0)
stdo = decode(stdout.read())
if stderr is None or stderr is stdout:
stde = ''
else:
stderr.seek(0)
stde = decode(stderr.read())
else:
stdo = ""
stde = additional_error
if timed_out:
return TestRun(self.test, self.test_env, TestResult.TIMEOUT, p.returncode, duration, stdo, stde, cmd)
else:
if self.test.protocol == 'exitcode':
return TestRun.make_exitcode(self.test, self.test_env, p.returncode, duration, stdo, stde, cmd)
else:
if self.options.verbose:
print(stdo, end='')
return TestRun.make_tap(self.test, self.test_env, p.returncode, duration, stdo, stde, cmd)
class TestHarness:
def __init__(self, options: argparse.Namespace):
self.options = options
self.collected_logs = [] # type: typing.List[str]
self.fail_count = 0
self.expectedfail_count = 0
self.unexpectedpass_count = 0
self.success_count = 0
self.skip_count = 0
self.timeout_count = 0
self.is_run = False
self.tests = None
self.logfilename = None # type: typing.Optional[str]
self.logfile = None # type: typing.Optional[typing.TextIO]
self.jsonlogfile = None # type: typing.Optional[typing.TextIO]
if self.options.benchmark:
self.tests = load_benchmarks(options.wd)
else:
self.tests = load_tests(options.wd)
ss = set()
for t in self.tests:
for s in t.suite:
ss.add(s)
self.suites = list(ss)
def __del__(self) -> None:
self.close_logfiles()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.close_logfiles()
def close_logfiles(self) -> None:
if self.logfile:
self.logfile.close()
self.logfile = None
if self.jsonlogfile:
self.jsonlogfile.close()
self.jsonlogfile = None
def merge_suite_options(self, options: argparse.Namespace, test: 'TestSerialisation') -> typing.Dict[str, str]:
if ':' in options.setup:
if options.setup not in self.build_data.test_setups:
sys.exit("Unknown test setup '%s'." % options.setup)
current = self.build_data.test_setups[options.setup]
else:
full_name = test.project_name + ":" + options.setup
if full_name not in self.build_data.test_setups:
sys.exit("Test setup '%s' not found from project '%s'." % (options.setup, test.project_name))
current = self.build_data.test_setups[full_name]
if not options.gdb:
options.gdb = current.gdb
if options.gdb:
options.verbose = True
if options.timeout_multiplier is None:
options.timeout_multiplier = current.timeout_multiplier
# if options.env is None:
# options.env = current.env # FIXME, should probably merge options here.
if options.wrapper is not None and current.exe_wrapper is not None:
sys.exit('Conflict: both test setup and command line specify an exe wrapper.')
if options.wrapper is None:
options.wrapper = current.exe_wrapper
return current.env.get_env(os.environ.copy())
def get_test_runner(self, test: 'TestSerialisation') -> SingleTestRunner:
options = deepcopy(self.options)
if not options.setup:
options.setup = self.build_data.test_setup_default_name
if options.setup:
env = self.merge_suite_options(options, test)
else:
env = os.environ.copy()
test_env = test.env.get_env(env)
env.update(test_env)
return SingleTestRunner(test, test_env, env, options)
def process_test_result(self, result: TestRun) -> None:
if result.res is TestResult.TIMEOUT:
self.timeout_count += 1
elif result.res is TestResult.SKIP:
self.skip_count += 1
elif result.res is TestResult.OK:
self.success_count += 1
elif result.res is TestResult.FAIL or result.res is TestResult.ERROR:
self.fail_count += 1
elif result.res is TestResult.EXPECTEDFAIL:
self.expectedfail_count += 1
elif result.res is TestResult.UNEXPECTEDPASS:
self.unexpectedpass_count += 1
else:
sys.exit('Unknown test result encountered: {}'.format(result.res))
def print_stats(self, numlen: int, tests: typing.List['TestSerialisation'],
name: str, result: TestRun, i: int) -> None:
startpad = ' ' * (numlen - len('%d' % (i + 1)))
num = '%s%d/%d' % (startpad, i + 1, len(tests))
padding1 = ' ' * (38 - len(name))
padding2 = ' ' * (8 - len(result.res.value))
status = ''
if result.res is TestResult.FAIL:
status = returncode_to_status(result.returncode)
result_str = '%s %s %s%s%s%5.2f s %s' % \
(num, name, padding1, result.res.value, padding2, result.duration,
status)
ok_statuses = (TestResult.OK, TestResult.EXPECTEDFAIL)
bad_statuses = (TestResult.FAIL, TestResult.TIMEOUT, TestResult.UNEXPECTEDPASS,
TestResult.ERROR)
if not self.options.quiet or result.res not in ok_statuses:
if result.res not in ok_statuses and mlog.colorize_console:
if result.res in bad_statuses:
decorator = mlog.red
elif result.res is TestResult.SKIP:
decorator = mlog.yellow
else:
sys.exit('Unreachable code was ... well ... reached.')
print(decorator(result_str).get_text(True))
else:
print(result_str)
result_str += "\n\n" + result.get_log()
if result.res in bad_statuses:
if self.options.print_errorlogs:
self.collected_logs.append(result_str)
if self.logfile:
self.logfile.write(result_str)
if self.jsonlogfile:
write_json_log(self.jsonlogfile, name, result)
def print_summary(self) -> None:
msg = '''
Ok: %4d
Expected Fail: %4d
Fail: %4d
Unexpected Pass: %4d
Skipped: %4d
Timeout: %4d
''' % (self.success_count, self.expectedfail_count, self.fail_count,
self.unexpectedpass_count, self.skip_count, self.timeout_count)
print(msg)
if self.logfile:
self.logfile.write(msg)
def print_collected_logs(self) -> None:
if len(self.collected_logs) > 0:
if len(self.collected_logs) > 10:
print('\nThe output from 10 first failed tests:\n')
else:
print('\nThe output from the failed tests:\n')
for log in self.collected_logs[:10]:
lines = log.splitlines()
if len(lines) > 104:
print('\n'.join(lines[0:4]))
print('--- Listing only the last 100 lines from a long log. ---')
lines = lines[-100:]
for line in lines:
try:
print(line)
except UnicodeEncodeError:
line = line.encode('ascii', errors='replace').decode()
print(line)
def total_failure_count(self) -> int:
return self.fail_count + self.unexpectedpass_count + self.timeout_count
def doit(self) -> int:
if self.is_run:
raise RuntimeError('Test harness object can only be used once.')
self.is_run = True
tests = self.get_tests()
if not tests:
return 0
self.run_tests(tests)
return self.total_failure_count()
@staticmethod
def split_suite_string(suite: str) -> typing.Tuple[str, str]:
if ':' in suite:
# mypy can't figure out that str.split(n, 1) will return a list of
# length 2, so we have to help it.
return typing.cast(typing.Tuple[str, str], tuple(suite.split(':', 1)))
else:
return suite, ""
@staticmethod
def test_in_suites(test: 'TestSerialisation', suites: typing.List[str]) -> bool:
for suite in suites:
(prj_match, st_match) = TestHarness.split_suite_string(suite)
for prjst in test.suite:
(prj, st) = TestHarness.split_suite_string(prjst)
# the SUITE can be passed as
# suite_name
# or
# project_name:suite_name
# so we need to select only the test belonging to project_name
# this if hanlde the first case (i.e., SUITE == suite_name)
# in this way we can run tests belonging to different
# (sub)projects which share the same suite_name
if not st_match and st == prj_match:
return True
# these two conditions are needed to handle the second option
# i.e., SUITE == project_name:suite_name
# in this way we select the only the tests of
# project_name with suite_name
if prj_match and prj != prj_match:
continue
if st_match and st != st_match:
continue
return True
return False
def test_suitable(self, test: 'TestSerialisation') -> bool:
return ((not self.options.include_suites or
TestHarness.test_in_suites(test, self.options.include_suites)) and not
TestHarness.test_in_suites(test, self.options.exclude_suites))
def get_tests(self) -> typing.List['TestSerialisation']:
if not self.tests:
print('No tests defined.')
return []
if len(self.options.include_suites) or len(self.options.exclude_suites):
tests = []
for tst in self.tests:
if self.test_suitable(tst):
tests.append(tst)
else:
tests = self.tests
# allow specifying test names like "meson test foo1 foo2", where test('foo1', ...)
if self.options.args:
tests = [t for t in tests if t.name in self.options.args]
if not tests:
print('No suitable tests defined.')
return []
return tests
def open_log_files(self) -> None:
if not self.options.logbase or self.options.verbose:
return
namebase = None
logfile_base = os.path.join(self.options.wd, 'meson-logs', self.options.logbase)
if self.options.wrapper:
namebase = os.path.basename(self.get_wrapper(self.options)[0])
elif self.options.setup:
namebase = self.options.setup.replace(":", "_")
if namebase:
logfile_base += '-' + namebase.replace(' ', '_')
self.logfilename = logfile_base + '.txt'
self.jsonlogfilename = logfile_base + '.json'
self.jsonlogfile = open(self.jsonlogfilename, 'w', encoding='utf-8', errors='replace')
self.logfile = open(self.logfilename, 'w', encoding='utf-8', errors='surrogateescape')
self.logfile.write('Log of Meson test suite run on %s\n\n'
% datetime.datetime.now().isoformat())
inherit_env = env_tuple_to_str(os.environ.items())
self.logfile.write('Inherited environment: {}\n\n'.format(inherit_env))
@staticmethod
def get_wrapper(options: argparse.Namespace) -> typing.List[str]:
wrap = [] # type: typing.List[str]
if options.gdb:
wrap = ['gdb', '--quiet', '--nh']
if options.repeat > 1:
wrap += ['-ex', 'run', '-ex', 'quit']
# Signal the end of arguments to gdb
wrap += ['--args']
if options.wrapper:
wrap += options.wrapper
return wrap
def get_pretty_suite(self, test: 'TestSerialisation') -> str:
if len(self.suites) > 1 and test.suite:
rv = TestHarness.split_suite_string(test.suite[0])[0]
s = "+".join(TestHarness.split_suite_string(s)[1] for s in test.suite)
if len(s):
rv += ":"
return rv + s + " / " + test.name
else:
return test.name
def run_tests(self, tests: typing.List['TestSerialisation']) -> None:
executor = None
futures = [] # type: typing.List[typing.Tuple[conc.Future[TestRun], int, typing.List[TestSerialisation], str, int]]
numlen = len('%d' % len(tests))
self.open_log_files()
startdir = os.getcwd()
if self.options.wd:
os.chdir(self.options.wd)
self.build_data = build.load(os.getcwd())
try:
for _ in range(self.options.repeat):
for i, test in enumerate(tests):
visible_name = self.get_pretty_suite(test)
single_test = self.get_test_runner(test)
if not test.is_parallel or self.options.num_processes == 1 or single_test.options.gdb:
self.drain_futures(futures)
futures = []
res = single_test.run()
self.process_test_result(res)
self.print_stats(numlen, tests, visible_name, res, i)
else:
if not executor:
executor = conc.ThreadPoolExecutor(max_workers=self.options.num_processes)
f = executor.submit(single_test.run)
futures.append((f, numlen, tests, visible_name, i))
if self.options.repeat > 1 and self.fail_count:
break
if self.options.repeat > 1 and self.fail_count:
break
self.drain_futures(futures)
self.print_summary()
self.print_collected_logs()
if self.logfilename:
print('Full log written to %s' % self.logfilename)
finally:
os.chdir(startdir)
def drain_futures(self, futures: typing.List[typing.Tuple['conc.Future[TestRun]', int, typing.List['TestSerialisation'], str, int]]) -> None:
for x in futures:
(result, numlen, tests, name, i) = x
if self.options.repeat > 1 and self.fail_count:
result.cancel()
if self.options.verbose:
result.result()
self.process_test_result(result.result())
self.print_stats(numlen, tests, name, result.result(), i)
def run_special(self) -> int:
'''Tests run by the user, usually something like "under gdb 1000 times".'''
if self.is_run:
raise RuntimeError('Can not use run_special after a full run.')
tests = self.get_tests()
if not tests:
return 0
self.run_tests(tests)
return self.total_failure_count()
def list_tests(th: TestHarness) -> bool:
tests = th.get_tests()
for t in tests:
print(th.get_pretty_suite(t))
return not tests
def rebuild_all(wd: str) -> bool:
if not (Path(wd) / 'build.ninja').is_file():
print('Only ninja backend is supported to rebuild tests before running them.')
return True
ninja = environment.detect_ninja()
if not ninja:
print("Can't find ninja, can't rebuild test.")
return False
ret = subprocess.run([ninja, '-C', wd]).returncode
if ret != 0:
print('Could not rebuild {}'.format(wd))
return False
return True
def run(options: argparse.Namespace) -> int:
if options.benchmark:
options.num_processes = 1
if options.verbose and options.quiet:
print('Can not be both quiet and verbose at the same time.')
return 1
check_bin = None
if options.gdb:
options.verbose = True
if options.wrapper:
print('Must not specify both a wrapper and gdb at the same time.')
return 1
check_bin = 'gdb'
if options.wrapper:
check_bin = options.wrapper[0]
if check_bin is not None:
exe = ExternalProgram(check_bin, silent=True)
if not exe.found():
print('Could not find requested program: {!r}'.format(check_bin))
return 1
options.wd = os.path.abspath(options.wd)
if not options.list and not options.no_rebuild:
if not rebuild_all(options.wd):
# We return 125 here in case the build failed.
# The reason is that exit code 125 tells `git bisect run` that the current commit should be skipped.
# Thus users can directly use `meson test` to bisect without needing to handle the does-not-build case separately in a wrapper script.
return 125
with TestHarness(options) as th:
try:
if options.list:
return list_tests(th)
if not options.args:
return th.doit()
return th.run_special()
except TestException as e:
print('Meson test encountered an error:\n')
if os.environ.get('MESON_FORCE_BACKTRACE'):
raise e
else:
print(e)
return 1
def run_with_args(args: typing.List[str]) -> int:
parser = argparse.ArgumentParser(prog='meson test')
add_arguments(parser)
options = parser.parse_args(args)
return run(options)
| 40.71673 | 163 | 0.570645 |
7943056f8ac5256440472828c671b04e319d0c09 | 1,359 | py | Python | homeassistant/components/abode/cover.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/abode/cover.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/abode/cover.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Support for Abode Security System covers."""
from typing import Any
from abodepy.devices.cover import AbodeCover as AbodeCV
import abodepy.helpers.constants as CONST
from homeassistant.components.cover import CoverEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import AbodeDevice, AbodeSystem
from .const import DOMAIN
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Abode cover devices."""
data: AbodeSystem = hass.data[DOMAIN]
entities = []
for device in data.abode.get_devices(generic_type=CONST.TYPE_COVER):
entities.append(AbodeCover(data, device))
async_add_entities(entities)
class AbodeCover(AbodeDevice, CoverEntity):
"""Representation of an Abode cover."""
_device: AbodeCV
@property
def is_closed(self) -> bool:
"""Return true if cover is closed, else False."""
return not self._device.is_open
def close_cover(self, **kwargs: Any) -> None:
"""Issue close command to cover."""
self._device.close_cover()
def open_cover(self, **kwargs: Any) -> None:
"""Issue open command to cover."""
self._device.open_cover()
| 28.914894 | 84 | 0.726269 |
7943059c00adb0f4bb38d557ac8f828c9e255543 | 1,819 | py | Python | test/testers/winforms/datetimepicker_showupdown/__init__.py | ABEMBARKA/monoUI | 5fda266ad2db8f89580a40b525973d86cd8de939 | [
"MIT"
] | 1 | 2019-08-13T15:22:12.000Z | 2019-08-13T15:22:12.000Z | test/testers/winforms/datetimepicker_showupdown/__init__.py | ABEMBARKA/monoUI | 5fda266ad2db8f89580a40b525973d86cd8de939 | [
"MIT"
] | null | null | null | test/testers/winforms/datetimepicker_showupdown/__init__.py | ABEMBARKA/monoUI | 5fda266ad2db8f89580a40b525973d86cd8de939 | [
"MIT"
] | 1 | 2019-08-13T15:22:17.000Z | 2019-08-13T15:22:17.000Z | # vim: set tabstop=4 shiftwidth=4 expandtab
##############################################################################
# Written by: Ray Wang <[email protected]>
# Date: 02/16/2008
# Description: Application wrapper for datetimepicker_showupdown.py
# be called by ../datetimepicker_showupdown_ops.py
##############################################################################
"""Application wrapper for datetimepicker_showupdown.py"""
from strongwind import *
from os.path import exists
from sys import path
def launchDateTimePicker(exe=None):
"""
Launch datetimepicker_showupdown with accessibility enabled and return a datetimepicker_showupdown object.
Log an error and return None if something goes wrong
"""
if exe is None:
# make sure we can find the sample application
harness_dir = path[0]
i = harness_dir.rfind("/")
j = harness_dir[:i].rfind("/")
uiaqa_path = harness_dir[:j]
exe = '%s/samples/winforms/datetimepicker_showupdown.py' % uiaqa_path
if not exists(exe):
raise IOError, "Could not find file %s" % exe
args = [exe]
(app, subproc) = cache.launchApplication(args=args, name='ipy', wait=config.LONG_DELAY)
datetimepicker = DateTimePicker(app, subproc)
cache.addApplication(datetimepicker)
datetimepicker.dateTimePickerShowUpDownFrame.app = datetimepicker
return datetimepicker
class DateTimePicker(accessibles.Application):
"""class to represent the application"""
def __init__(self, accessible, subproc=None):
"""Get a reference to the datetimepicker_showupdown window"""
super(DateTimePicker, self).__init__(accessible, subproc)
self.findFrame(re.compile('^DateTimePicker'), logName='Date Time Picker Show Up Down')
| 34.980769 | 110 | 0.64431 |
794305d49d744c7860b694eb09b8846e05363fa7 | 1,646 | py | Python | dcase_task2/lasagne_wrapper/evaluation.py | f0k/dcase_task2 | e7246e2e2ffdd59efde3b571556eef4c3bcffe22 | [
"MIT"
] | 32 | 2018-10-08T13:00:50.000Z | 2020-11-24T14:55:51.000Z | dcase_task2/lasagne_wrapper/evaluation.py | f0k/dcase_task2 | e7246e2e2ffdd59efde3b571556eef4c3bcffe22 | [
"MIT"
] | 4 | 2019-02-25T09:51:46.000Z | 2019-05-17T17:03:01.000Z | dcase_task2/lasagne_wrapper/evaluation.py | f0k/dcase_task2 | e7246e2e2ffdd59efde3b571556eef4c3bcffe22 | [
"MIT"
] | 10 | 2018-11-16T16:41:16.000Z | 2021-09-01T07:23:50.000Z |
import numpy as np
def apk(actual, predicted, k=10):
"""
Computes the average precision at k.
This function computes the average prescision at k between two lists of
items.
Parameters
----------
actual : list
A list of elements that are to be predicted (order doesn't matter)
predicted : list
A list of predicted elements (order does matter)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average precision at k over the input lists
"""
if len(predicted)>k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i,p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i+1.0)
if not actual:
return 0.0
return score / min(len(actual), k)
def mapk(actual, predicted, k=10):
"""
Computes the mean average precision at k.
This function computes the mean average prescision at k between two lists
of lists of items.
Parameters
----------
actual : list
A list of lists of elements that are to be predicted
(order doesn't matter in the lists)
predicted : list
A list of lists of predicted elements
(order matters in the lists)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The mean average precision at k over the input lists
"""
return np.mean([apk(a,p,k) for a,p in zip(actual, predicted)])
| 27.898305 | 79 | 0.594168 |
794306e21f9e5ab6a80ac2d7daecada419798930 | 7,194 | py | Python | Tencent/Human Pose Estimation/models/loss_model_parallel.py | orange-eng/internship | c8c566df453d3a4bdf692338f74916ae15792fa1 | [
"Apache-2.0"
] | 2 | 2021-11-14T14:09:47.000Z | 2022-02-08T22:04:50.000Z | Tencent/Human Pose Estimation/models/loss_model_parallel.py | orange-eng/internship | c8c566df453d3a4bdf692338f74916ae15792fa1 | [
"Apache-2.0"
] | null | null | null | Tencent/Human Pose Estimation/models/loss_model_parallel.py | orange-eng/internship | c8c566df453d3a4bdf692338f74916ae15792fa1 | [
"Apache-2.0"
] | null | null | null | import time
import torch
from torch import nn
import torch.nn.functional as F
class MultiTaskLossParallel(nn.Module):
def __init__(self, opt, config, heatmap_weight=1, offset_weight=1, **kwargs):
super(MultiTaskLossParallel, self).__init__()
self.nstack = opt.nstack
self.batch_size = opt.batch_size
self.offset_start = config.offset_start
self.multi_task_weight = opt.multi_task_weight
self.scale_weight = opt.scale_weight
self.nstack_weight = opt.nstack_weight
self.heatmap_weight = heatmap_weight
self.offset_weight = offset_weight
def forward(self, pred_tuple, target_tuple):
"""
Compute the multi-task total loss
:param pred_tuple: [nstack * [(bacth,C,128,128), (bacth,C,64,64), (bacth,C,32,32),
(bacth,C,16,16)], (bacth,C,8,8)]
:param target_tuple: target tensors, i.e.,
mask_misses, heatmaps, offsets, mask_offsets,
[batch,1,128,128], [batch,44,128,128], [batch,36,128,128], [batch,36,128,128]
:return: scalar tensor
"""
# we use 4 stacks, 5 scales
# TODO: 是用5个不同scale好还是4个scale监督好?
pred_scale_tensors = [torch.cat([pred_tuple[j][i][None, ...] for j in range(self.nstack)], dim=0) for i in
range(5)] # concatenate the same scale output of different stacks
# different scale losses have different order of magnitudes owning to different pixel numbers (feature map size)
loss_scales = [self._loss_per_scale(pred_scale_tensors[i], target_tuple) * self.scale_weight[i] for i in
range(5)] # different scale losses of all nstack
loss_per_batch = sum(loss_scales) / sum(self.scale_weight)
return loss_per_batch # should divide the batch size in the main train
def _loss_per_scale(self, pred, target):
"""
Compute the loss on a particular scale.
:param pred: tensor (nstack, bacth, C, H, W)
:param target: mask_misses, heatmaps, offsets, mask_offsets of shape (N, C, H, W)
:return:
"""
# TODO: 没有平衡keypoint 和 body part两部分损失,可以在这里把heatmap进一步拆分
pred_heatmap = pred[:, :, :self.offset_start]
# pred_offset = pred[:, :, self.offset_start:]
gt_mask_misses = F.interpolate(target[0], size=pred.shape[-2:], mode='bilinear')
gt_heatmaps = F.adaptive_avg_pool2d(target[1], output_size=pred.shape[-2:])
# gt_offsets = F.adaptive_avg_pool2d(target[2], output_size=pred.shape[-2:])
# gt_mask_offsets = F.interpolate(target[3], size=pred.shape[-2:], mode='bilinear')
#
# F.adaptive_max_pool2d(target[3], output_size=pred.shape[-2:])
# ############# For debug ##############################
# heatmap = gt_heatmaps[0,...].cpu().numpy().squeeze()
# offset = gt_mask_offsets[0,...].cpu().numpy().squeeze()
#
# import matplotlib.pylab as plt
# # plt.imshow(heatmap[11,:,:]) # mask_all
# plt.imshow(heatmap[43, :,:]) # mask_all
# plt.show()
# #####################################################
heatmap_loss = self.l2_loss(pred_heatmap, gt_heatmaps[None, ...], gt_mask_misses[None, ...]
, nstack_weight=self.nstack_weight)
# offset_loss = self.l1_loss(pred_offset, gt_offsets[None, ...], gt_mask_offsets[None, ...],
# nstack_weight=self.nstack_weight)
#
# multi_task_loss = heatmap_loss * self.multi_task_weight[0] + offset_loss * self.multi_task_weight[1]
# return multi_task_loss / sum(self.multi_task_weight)
return heatmap_loss
@staticmethod
def focal_l2_loss(s, sxing, mask_miss, gamma=2, nstack_weight=[1, 1, 1, 1]):
"""
Compute the focal L2 loss between predicted and groundtruth score maps.
:param s: predicted tensor (nstack, batch, channel, height, width), predicted score maps
:param sxing: target tensor (nstack, batch, channel, height, width)
:param mask_miss: tensor (1, batch, 1, height, width)
:param gamma: focusing parameter
:return: a scalar tensor
"""
# eps = 1e-8 # 1e-12
# s = torch.clamp(s, eps, 1. - eps) # improve the stability of the focal loss
st = torch.where(torch.ge(sxing, 0.01), s, 1 - s)
factor = (1. - st) ** gamma
# multiplied by mask_miss via broadcast operation
out = (s - sxing) ** 2 * factor * mask_miss # type: torch.Tensor
# sum over the feature map, should divide by batch_size afterwards
loss_nstack = out.sum(dim=(1, 2, 3, 4)) # losses from nstack 1, 2, 3, 4...
assert len(loss_nstack) == len(nstack_weight), nstack_weight
print(' heatmap focal L2 loss per stack.......... ', loss_nstack.detach().cpu().numpy())
weight_loss = [loss_nstack[i] * nstack_weight[i] for i in range(len(nstack_weight))]
loss = sum(weight_loss) / sum(nstack_weight)
return loss
@staticmethod
def l1_loss(pred, target, mask_offset, nstack_weight=[1, 1, 1, 1]):
"""
Compute the L1 loss of offset feature maps
:param pred: predicted tensor (nstack, batch, channel, height, width), predicted feature maps
:param target: target tensor (nstack, batch, channel, height, width)
:param mask_offset: tensor (nstack, batch, channel, height, width)
:param nstack_weight:
:return:
"""
out = torch.abs(pred - target) * mask_offset # type: torch.Tensor
# sum over the feature map, should divide by batch afterwards
loss_nstack = out.sum(dim=(1, 2, 3, 4))
assert len(loss_nstack) == len(nstack_weight), nstack_weight
print(' offset L1 loss per stack >>>>>>>> ', loss_nstack.detach().cpu().numpy())
weight_loss = [loss_nstack[i] * nstack_weight[i] for i in range(len(nstack_weight))]
loss = sum(weight_loss) / sum(nstack_weight) # normalized loss by weights
return loss
@staticmethod
def l2_loss(s, sxing, mask_miss, nstack_weight=[1, 1, 1, 1]):
"""
Compute the L2 loss between predicted and groundtruth score maps.
:param s: predicted tensor (nstack, batch, channel, height, width), predicted score maps
:param sxing: target tensor (nstack, batch, channel, height, width)
:param mask_miss: tensor (nstack, batch, 1, height, width)
:return: a scalar tensor
"""
# multiplied by mask_miss via broadcast operation
# eps = 1e-8 # 1e-12 #
# s = torch.clamp(s, eps, 1 - eps)
out = (s - sxing) ** 2 * mask_miss # type: torch.Tensor
# sum over the feature map, should divide by batch afterwards
loss_nstack = out.sum(dim=(1, 2, 3, 4))
assert len(loss_nstack) == len(nstack_weight), nstack_weight
print(' heatmap L2 loss per stack......... ', loss_nstack.detach().cpu().numpy())
weight_loss = [loss_nstack[i] * nstack_weight[i] for i in range(len(nstack_weight))]
loss = sum(weight_loss) / sum(nstack_weight)
return loss
| 50.661972 | 120 | 0.613428 |
794307658fc7470fe6c5e0890b92cedef9cbdf9e | 571 | py | Python | utils/data.py | shivamswarnkar/Image-Generator | 55b6d066c84c615403e48c27e77ee017cf260955 | [
"MIT"
] | 8 | 2019-11-07T19:55:37.000Z | 2021-11-11T06:53:50.000Z | utils/data.py | shivamswarnkar/Image-Generator | 55b6d066c84c615403e48c27e77ee017cf260955 | [
"MIT"
] | 1 | 2021-07-02T23:44:22.000Z | 2021-07-10T08:00:12.000Z | utils/data.py | shivamswarnkar/Image-Generator | 55b6d066c84c615403e48c27e77ee017cf260955 | [
"MIT"
] | 2 | 2019-11-07T19:31:21.000Z | 2019-11-21T12:02:12.000Z | import torch
import torchvision.transforms as transforms
import torchvision.datasets as dset
def create_data_loader(args):
dataset = dset.ImageFolder(root=args.dataroot,
transform=transforms.Compose(
[
transforms.Resize(args.image_size),
transforms.CenterCrop(args.image_size),
transforms.ToTensor(),
transforms.Normalize(
(0.5,0.5,0.5),
(0.5, 0.5,0.5)
)
]))
# setting up data loader
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers
)
return dataloader
| 20.392857 | 51 | 0.728546 |
794307fb6baf23f358b3663013f93070f81b1449 | 2,568 | py | Python | change_world_names.py | fingerco/sims-4-caw | 1392b9d9c935ad3ce10feed97a2a37b7fd93749b | [
"Apache-2.0"
] | null | null | null | change_world_names.py | fingerco/sims-4-caw | 1392b9d9c935ad3ce10feed97a2a37b7fd93749b | [
"Apache-2.0"
] | null | null | null | change_world_names.py | fingerco/sims-4-caw | 1392b9d9c935ad3ce10feed97a2a37b7fd93749b | [
"Apache-2.0"
] | null | null | null | import ctypes
import ctypes.util
import re
import struct
from caw_memory_editors import MacOSX, CannotReadException
BASE_WORLD_NAME = "Von Haunt Estate".encode('utf-8')
REPLACE_WORLD_NAME = "Apple Fritters are tasty".encode('utf-8')
class ChangeWorldNames:
def __init__(self, process):
self.process = process
def run(self):
# self.find_lots()
a = self.process.allocate_bytes(len(BASE_WORLD_NAME)+1)
b = self.process.allocate_bytes(len(REPLACE_WORLD_NAME)+1)
self.process.write_bytes(a.value, ctypes.create_string_buffer(BASE_WORLD_NAME + b'\x00'), len(BASE_WORLD_NAME)+1)
self.process.write_bytes(b.value, ctypes.create_string_buffer(REPLACE_WORLD_NAME + b'\x00'), len(REPLACE_WORLD_NAME)+1)
print(a, a.value, self.process.read_bytes(a.value, bytes=8))
print(b, b.value, self.process.read_bytes(b.value, bytes=8))
b.value = a.value
print(a, a.value, self.process.read_bytes(a.value, bytes=8))
print(b, b.value, self.process.read_bytes(b.value, bytes=8))
def find_lots(self):
potential_lot_addrs = self.process.find_in_memory(BASE_WORLD_NAME)
return self.filter_to_relevant_lots(potential_lot_addrs)
def filter_to_relevant_lots(self, lot_addrs):
mem_regions = self.process.all_regions()
replace_addr = self.process.allocate_bytes(len(REPLACE_WORLD_NAME)+1)
self.process.write_bytes(replace_addr.value, ctypes.create_string_buffer(REPLACE_WORLD_NAME + b'\x00'), len(REPLACE_WORLD_NAME)+1)
replace_addr_bytes = struct.pack('L', replace_addr.value)
refs_to_name = []
for addr in lot_addrs:
print(addr)
addr_bytes = struct.pack('L', addr)
refs_to_name += self.process.find_in_memory(addr_bytes, mem_regions=mem_regions)
print("HMMM: " + str(struct.pack('L', addr)) + " - " + str(len(struct.pack('L', addr))) + " - " + str(refs_to_name))
print(refs_to_name)
print(replace_addr_bytes)
print(len(replace_addr_bytes))
for ref_addr in refs_to_name:
print("\n--1-----\n{}\n---1-----\n".format(self.process.read_bytes(ref_addr, bytes=len(replace_addr_bytes))))
self.process.write_bytes(ref_addr, ctypes.create_string_buffer(replace_addr_bytes), len(replace_addr_bytes))
print("\n---2----\n{}\n---2-----\n".format(self.process.read_bytes(ref_addr, bytes=len(replace_addr_bytes))))
sims_process = MacOSX("The Sims 4")
change_names = ChangeWorldNames(sims_process)
change_names.run()
| 42.098361 | 138 | 0.688474 |
794309a952fa3a98b704ad721865d32b845b39b2 | 7,886 | py | Python | tests/python/test_disassembler.py | navarrothiago/bcc | 91a1f2dbee713aac2161ba39de4d98f95e233a68 | [
"Apache-2.0"
] | 6 | 2020-01-09T23:01:43.000Z | 2020-04-25T01:06:03.000Z | tests/python/test_disassembler.py | navarrothiago/bcc | 91a1f2dbee713aac2161ba39de4d98f95e233a68 | [
"Apache-2.0"
] | null | null | null | tests/python/test_disassembler.py | navarrothiago/bcc | 91a1f2dbee713aac2161ba39de4d98f95e233a68 | [
"Apache-2.0"
] | 1 | 2019-02-26T10:06:31.000Z | 2019-02-26T10:06:31.000Z | #!/usr/bin/env python3
# Copyright (c) Clevernet
# Licensed under the Apache License, Version 2.0 (the "License")
# test program for the 'disassemble_func' and 'decode_table' methods
from bcc import BPF
from bcc import disassembler
import ctypes as ct
import random
from unittest import main, TestCase
class BPFInstr(ct.Structure):
_pack_ = 1
_fields_ = [('opcode', ct.c_uint8),
('dst', ct.c_uint8, 4),
('src', ct.c_uint8, 4),
('offset', ct.c_int16),
('imm', ct.c_int32)]
class TestDisassembler(TestCase):
opcodes = [(0x04, "%dst += %imm"),
(0x05, "goto %off <%jmp>"),
(0x07, "%dst += %imm"),
(0x0c, "%dst += %src"),
(0x0f, "%dst += %src"),
(0x14, "%dst -= %imm"),
(0x15, "if %dst == %imm goto pc%off <%jmp>"),
(0x17, "%dst -= %imm"),
#(0x18, "lddw"),
(0x1c, "%dst -= %src"),
(0x1d, "if %dst == %src goto pc%off <%jmp>"),
(0x1f, "%dst -= %src"),
(0x20, "r0 = *(u32*)skb[%imm]"),
(0x24, "%dst *= %imm"),
(0x25, "if %dst > %imm goto pc%off <%jmp>"),
(0x27, "%dst *= %imm"),
(0x28, "r0 = *(u16*)skb[%imm]"),
(0x2c, "%dst *= %src"),
(0x2d, "if %dst > %src goto pc%off <%jmp>"),
(0x2f, "%dst *= %src"),
(0x30, "r0 = *(u8*)skb[%imm]"),
(0x34, "%dst /= %imm"),
(0x35, "if %dst >= %imm goto pc%off <%jmp>"),
(0x37, "%dst /= %imm"),
(0x38, "r0 = *(u64*)skb[%imm]"),
(0x3c, "%dst /= %src"),
(0x3d, "if %dst >= %src goto pc%off <%jmp>"),
(0x3f, "%dst /= %src"),
(0x40, "r0 = *(u32*)skb[%src %sim]"),
(0x44, "%dst |= %ibw"),
(0x45, "if %dst & %imm goto pc%off <%jmp>"),
(0x47, "%dst |= %ibw"),
(0x48, "r0 = *(u16*)skb[%src %sim]"),
(0x4c, "%dst |= %src"),
(0x4d, "if %dst & %src goto pc%off <%jmp>"),
(0x4f, "%dst |= %src"),
(0x50, "r0 = *(u8*)skb[%src %sim]"),
(0x54, "%dst &= %ibw"),
(0x55, "if %dst != %imm goto pc%off <%jmp>"),
(0x57, "%dst &= %ibw"),
(0x58, "r0 = *(u64*)skb[%src %sim]"),
(0x5c, "%dst &= %src"),
(0x5d, "if %dst != %src goto pc%off <%jmp>"),
(0x5f, "%dst &= %src"),
(0x61, "%dst = *(u32*)(%src %off)"),
(0x62, "*(u32*)(%dst %off) = %imm"),
(0x63, "*(u32*)(%dst %off) = %src"),
(0x64, "%dst <<= %imm"),
(0x65, "if %dst s> %imm goto pc%off <%jmp>"),
(0x67, "%dst <<= %imm"),
(0x69, "%dst = *(u16*)(%src %off)"),
(0x6a, "*(u16*)(%dst %off) = %imm"),
(0x6b, "*(u16*)(%dst %off) = %src"),
(0x6c, "%dst <<= %src"),
(0x6d, "if %dst s> %src goto pc%off <%jmp>"),
(0x6f, "%dst <<= %src"),
(0x71, "%dst = *(u8*)(%src %off)"),
(0x72, "*(u8*)(%dst %off) = %imm"),
(0x73, "*(u8*)(%dst %off) = %src"),
(0x74, "%dst >>= %imm"),
(0x75, "if %dst s>= %imm goto pc%off <%jmp>"),
(0x77, "%dst >>= %imm"),
(0x79, "%dst = *(u64*)(%src %off)"),
(0x7a, "*(u64*)(%dst %off) = %imm"),
(0x7b, "*(u64*)(%dst %off) = %src"),
(0x7c, "%dst >>= %src"),
(0x7d, "if %dst s>= %src goto pc%off <%jmp>"),
(0x7f, "%dst >>= %src"),
(0x84, "%dst = ~ (u32)%dst"),
#(0x85, "call"),
(0x87, "%dst = ~ (u64)%dst"),
(0x94, "%dst %= %imm"),
(0x95, "exit"),
(0x97, "%dst %= %imm"),
(0x9c, "%dst %= %src"),
(0x9f, "%dst %= %src"),
(0xa4, "%dst ^= %ibw"),
(0xa5, "if %dst < %imm goto pc%off <%jmp>"),
(0xa7, "%dst ^= %ibw"),
(0xac, "%dst ^= %src"),
(0xad, "if %dst < %src goto pc%off <%jmp>"),
(0xaf, "%dst ^= %src"),
(0xb4, "%dst = %imm"),
(0xb5, "if %dst <= %imm goto pc%off <%jmp>"),
(0xb7, "%dst = %imm"),
(0xbc, "%dst = %src"),
(0xbd, "if %dst <= %src goto pc%off <%jmp>"),
(0xbf, "%dst = %src"),
(0xc4, "%dst s>>= %imm"),
(0xc5, "if %dst s< %imm goto pc%off <%jmp>"),
(0xc7, "%dst s>>= %imm"),
(0xcc, "%dst s>>= %src"),
(0xcd, "if %dst s< %src goto pc%off <%jmp>"),
(0xcf, "%dst s>>= %src"),
(0xd5, "if %dst s<= %imm goto pc%off <%jmp>"),
(0xdc, "%dst endian %src"),
(0xdd, "if %dst s<= %imm goto pc%off <%jmp>"),]
@classmethod
def build_instr(cls, op):
dst = random.randint(0, 0xf)
src = random.randint(0, 0xf)
offset = random.randint(0, 0xffff)
imm = random.randint(0, 0xffffffff)
return BPFInstr(op, dst, src, offset, imm)
@classmethod
def format_instr(cls, instr, fmt):
uimm = ct.c_uint32(instr.imm).value
return (fmt.replace("%dst", "r%d" % (instr.dst))
.replace("%src", "r%d" % (instr.src))
.replace("%imm", "%d" % (instr.imm))
.replace("%ibw", "0x%x" % (uimm))
.replace("%sim", "%+d" % (instr.imm))
.replace("%off", "%+d" % (instr.offset))
.replace("%jmp", "%d" % (instr.offset + 1)))
def test_func(self):
b = BPF(text="""
struct key_t {int a; short b; struct {int c:4; int d:8;} e;} __attribute__((__packed__));
BPF_HASH(test_map, struct key_t);
int test_func(void)
{
return 1;
}""")
self.assertEqual(
"""Disassemble of BPF program test_func:
0: (b7) r0 = 1
1: (95) exit""",
b.disassemble_func("test_func"))
def _assert_equal_ignore_fd_id(s1, s2):
# In first line of string like
# Layout of BPF map test_map (type HASH, FD 3, ID 0):
# Ignore everything from FD to end-of-line
# Compare rest of string normally
s1_lines = s1.split('\n')
s2_lines = s2.split('\n')
s1_first_cut = s1_lines[0]
s1_first_cut = s1_first_cut[0:s1_first_cut.index("FD")]
s2_first_cut = s2_lines[0]
s2_first_cut = s2_first_cut[0:s2_first_cut.index("FD")]
self.assertEqual(s1_first_cut, s2_first_cut)
s1_rest = '\n'.join(s1_lines[1:])
s2_rest = '\n'.join(s2_lines[1:])
self.assertEqual(s1_rest, s2_rest)
_assert_equal_ignore_fd_id(
"""Layout of BPF map test_map (type HASH, FD 3, ID 0):
struct {
int a;
short b;
struct {
int c:4;
int d:8;
} e;
} key;
unsigned long long value;""",
b.decode_table("test_map"))
def test_bpf_isa(self):
for op, instr_fmt in self.opcodes:
instr_fmt
if instr_fmt is None:
continue
instr = self.build_instr(op)
instr_str = ct.string_at(ct.addressof(instr), ct.sizeof(instr))
target_text = self.format_instr(instr, instr_fmt)
self.assertEqual(disassembler.disassemble_str(instr_str)[0],
"%4d: (%02x) %s" % (0, op, target_text))
if __name__ == "__main__":
main()
| 39.828283 | 101 | 0.410474 |
794309e12f179df3a940468097773d1e1ce2e1b6 | 1,170 | py | Python | micromagnetictests/calculatortests/stdprob4.py | ubermag/micromagnetictests | 60ad592b146f1ca5c35c897b64f281d62fa77699 | [
"BSD-3-Clause"
] | 2 | 2021-05-27T05:00:39.000Z | 2022-01-16T15:01:39.000Z | micromagnetictests/calculatortests/stdprob4.py | ubermag/micromagnetictests | 60ad592b146f1ca5c35c897b64f281d62fa77699 | [
"BSD-3-Clause"
] | 6 | 2021-06-25T09:42:35.000Z | 2022-03-15T00:09:10.000Z | micromagnetictests/calculatortests/stdprob4.py | ubermag/micromagnetictests | 60ad592b146f1ca5c35c897b64f281d62fa77699 | [
"BSD-3-Clause"
] | 1 | 2021-07-04T15:39:10.000Z | 2021-07-04T15:39:10.000Z | import discretisedfield as df
import micromagneticmodel as mm
def test_stdprob4(calculator):
name = 'stdprob4'
L, d, th = 500e-9, 125e-9, 3e-9 # (m)
cell = (5e-9, 5e-9, 3e-9) # (m)
p1 = (0, 0, 0)
p2 = (L, d, th)
region = df.Region(p1=p1, p2=p2)
mesh = df.Mesh(region=region, cell=cell)
Ms = 8e5 # (A/m)
A = 1.3e-11 # (J/m)
system = mm.System(name=name)
system.energy = mm.Exchange(A=A) + mm.Demag()
gamma0 = 2.211e5 # (m/As)
alpha = 0.02
system.dynamics = mm.Precession(gamma0=gamma0) + mm.Damping(alpha=alpha)
system.m = df.Field(mesh, dim=3, value=(1, 0.25, 0.1), norm=Ms)
md = calculator.MinDriver()
md.drive(system) # updates system.m in-place
H = (-24.6e-3/mm.consts.mu0, 4.3e-3/mm.consts.mu0, 0)
system.energy += mm.Zeeman(H=H)
td = calculator.TimeDriver()
td.drive(system, t=1e-9, n=200)
t = system.table.data['t'].values
my = system.table.data['my'].values
assert abs(min(t) - 5e-12) < 1e-20
assert abs(max(t) - 1e-9) < 1e-20
# Eye-norm test.
assert 0.7 < max(my) < 0.8
assert -0.5 < min(my) < -0.4
calculator.delete(system)
| 24.893617 | 76 | 0.580342 |
79430b8d3558441f6bb681a67f6226e315ffb8b0 | 1,806 | py | Python | tests/gunicorn/test_gunicorn_3.py | hypertrace/pythonagent | 283e18c61807f4ae653d147be9ff1424b0b0a6eb | [
"Apache-2.0"
] | 4 | 2021-05-19T16:16:26.000Z | 2022-01-16T04:48:43.000Z | tests/gunicorn/test_gunicorn_2.py | hypertrace/pythonagent | 283e18c61807f4ae653d147be9ff1424b0b0a6eb | [
"Apache-2.0"
] | 48 | 2021-04-27T07:25:48.000Z | 2021-08-30T21:27:27.000Z | tests/gunicorn/test_gunicorn_3.py | hypertrace/pythonagent | 283e18c61807f4ae653d147be9ff1424b0b0a6eb | [
"Apache-2.0"
] | null | null | null | import sys
import os
import logging
import traceback
import json
import pytest
import requests
from werkzeug.serving import make_server
import time
import atexit
import threading
import datetime
def setup_custom_logger(name):
try:
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
handler = logging.FileHandler('agent.log', mode='a')
handler.setFormatter(formatter)
screen_handler = logging.StreamHandler(stream=sys.stdout)
screen_handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
logger.addHandler(screen_handler)
return logger
except:
logger.error('Failed to customize logger: exception=%s, stacktrace=%s',
sys.exc_info()[0],
traceback.format_exc())
def test_run():
try:
logger = setup_custom_logger(__file__)
logger.info('Running test calls.')
logger.info('Making test call to /dbtest/full-test')
startTime = datetime.datetime.now()
for x in range(1000): # Run 1000 requests
r1 = requests.get('http://localhost:8000/dbtest/full-test')
logger.debug('Reading /dbtest/full-test response.')
a1 = r1.json()['a']
assert a1 == 'a'
logger.debug('r1 result: ' + str(a1))
logger.info('Exiting from flask + mysql instrumentation test.')
endTime = datetime.datetime.now()
elapsedTime= endTime - startTime
logger.info('elapsedTime: ' + str(elapsedTime))
logger.info('time/request: ' + str(elapsedTime/1000))
return 0
except:
logger.error('Failed to run flask + mysql instrumentation wrapper test: exception=%s, stacktrace=%s',
sys.exc_info()[0],
traceback.format_exc())
raise sys.exc_info()[0]
| 32.836364 | 105 | 0.684939 |
79430b9ec6b24047004ffe840a04ef0184cdbf02 | 13,700 | py | Python | mmdet/models/dense_heads/deformable_detr_head.py | hyperlist/mmdetection | ba4918de7fb21a96edc373584fa21a17d098a843 | [
"Apache-2.0"
] | null | null | null | mmdet/models/dense_heads/deformable_detr_head.py | hyperlist/mmdetection | ba4918de7fb21a96edc373584fa21a17d098a843 | [
"Apache-2.0"
] | null | null | null | mmdet/models/dense_heads/deformable_detr_head.py | hyperlist/mmdetection | ba4918de7fb21a96edc373584fa21a17d098a843 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import paddle
import paddle.nn as nn
from mmcv.cnn import Linear, bias_init_with_prob, constant_init
from mmcv.runner import force_fp32
from mmdet.core import multi_apply
from mmdet.models.utils.transformer import inverse_sigmoid
from ..builder import HEADS
from .detr_head import DETRHead
@HEADS.register_module()
class DeformableDETRHead(DETRHead):
"""Head of DeformDETR: Deformable DETR: Deformable Transformers for End-to-
End Object Detection.
Code is modified from the `official github repo
<https://github.com/fundamentalvision/Deformable-DETR>`_.
More details can be found in the `paper
<https://arxiv.org/abs/2010.04159>`_ .
Args:
with_box_refine (bool): Whether to refine the reference points
in the decoder. Defaults to False.
as_two_stage (bool) : Whether to generate the proposal from
the outputs of encoder.
transformer (obj:`ConfigDict`): ConfigDict is used for building
the Encoder and Decoder.
"""
def __init__(self,
*args,
with_box_refine=False,
as_two_stage=False,
transformer=None,
**kwargs):
self.with_box_refine = with_box_refine
self.as_two_stage = as_two_stage
if self.as_two_stage:
transformer['as_two_stage'] = self.as_two_stage
super(DeformableDETRHead, self).__init__(
*args, transformer=transformer, **kwargs)
def _init_layers(self):
"""Initialize classification branch and regression branch of head."""
fc_cls = Linear(self.embed_dims, self.cls_out_channels)
reg_branch = []
for _ in range(self.num_reg_fcs):
reg_branch.append(Linear(self.embed_dims, self.embed_dims))
reg_branch.append(nn.ReLU())
reg_branch.append(Linear(self.embed_dims, 4))
reg_branch = nn.Sequential(*reg_branch)
def _get_clones(module, N):
return nn.LayerList([copy.deepcopy(module) for i in range(N)])
# last reg_branch is used to generate proposal from
# encode feature map when as_two_stage is True.
num_pred = (self.transformer.decoder.num_layers + 1) if \
self.as_two_stage else self.transformer.decoder.num_layers
if self.with_box_refine:
self.cls_branches = _get_clones(fc_cls, num_pred)
self.reg_branches = _get_clones(reg_branch, num_pred)
else:
self.cls_branches = nn.LayerList(
[fc_cls for _ in range(num_pred)])
self.reg_branches = nn.LayerList(
[reg_branch for _ in range(num_pred)])
if not self.as_two_stage:
self.query_embedding = nn.Embedding(self.num_query,
self.embed_dims * 2)
def init_weights(self):
"""Initialize weights of the DeformDETR head."""
self.transformer.init_weights()
if self.loss_cls.use_sigmoid:
bias_init = bias_init_with_prob(0.01)
for m in self.cls_branches:
nn.init.constant_(m.bias, bias_init)
for m in self.reg_branches:
constant_init(m[-1], 0, bias=0)
nn.init.constant_(self.reg_branches[0][-1].bias.data[2:], -2.0)
if self.as_two_stage:
for m in self.reg_branches:
nn.init.constant_(m[-1].bias.data[2:], 0.0)
def forward(self, mlvl_feats, img_metas):
"""Forward function.
Args:
mlvl_feats (tuple[Tensor]): Features from the upstream
network, each is a 4D-tensor with shape
(N, C, H, W).
img_metas (list[dict]): List of image information.
Returns:
all_cls_scores (Tensor): Outputs from the classification head, \
shape [nb_dec, bs, num_query, cls_out_channels]. Note \
cls_out_channels should includes background.
all_bbox_preds (Tensor): Sigmoid outputs from the regression \
head with normalized coordinate format (cx, cy, w, h). \
Shape [nb_dec, bs, num_query, 4].
enc_outputs_class (Tensor): The score of each point on encode \
feature map, has shape (N, h*w, num_class). Only when \
as_two_stage is True it would be returned, otherwise \
`None` would be returned.
enc_outputs_coord (Tensor): The proposal generate from the \
encode feature map, has shape (N, h*w, 4). Only when \
as_two_stage is True it would be returned, otherwise \
`None` would be returned.
"""
batch_size = mlvl_feats[0].size(0)
input_img_h, input_img_w = img_metas[0]['batch_input_shape']
img_masks = mlvl_feats[0].new_ones(
(batch_size, input_img_h, input_img_w))
for img_id in range(batch_size):
img_h, img_w, _ = img_metas[img_id]['img_shape']
img_masks[img_id, :img_h, :img_w] = 0
mlvl_masks = []
mlvl_positional_encodings = []
for feat in mlvl_feats:
mlvl_masks.append(
F.interpolate(img_masks[None],
size=feat.shape[-2:]).to(paddle.bool).squeeze(0))
mlvl_positional_encodings.append(
self.positional_encoding(mlvl_masks[-1]))
query_embeds = None
if not self.as_two_stage:
query_embeds = self.query_embedding.weight
hs, init_reference, inter_references, \
enc_outputs_class, enc_outputs_coord = self.transformer(
mlvl_feats,
mlvl_masks,
query_embeds,
mlvl_positional_encodings,
reg_branches=self.reg_branches if self.with_box_refine else None, # noqa:E501
cls_branches=self.cls_branches if self.as_two_stage else None # noqa:E501
)
hs = hs.permute(0, 2, 1, 3)
outputs_classes = []
outputs_coords = []
for lvl in range(hs.shape[0]):
if lvl == 0:
reference = init_reference
else:
reference = inter_references[lvl - 1]
reference = inverse_sigmoid(reference)
outputs_class = self.cls_branches[lvl](hs[lvl])
tmp = self.reg_branches[lvl](hs[lvl])
if reference.shape[-1] == 4:
tmp += reference
else:
assert reference.shape[-1] == 2
tmp[..., :2] += reference
outputs_coord = tmp.sigmoid()
outputs_classes.append(outputs_class)
outputs_coords.append(outputs_coord)
outputs_classes = paddle.stack(outputs_classes)
outputs_coords = paddle.stack(outputs_coords)
if self.as_two_stage:
return outputs_classes, outputs_coords, \
enc_outputs_class, \
enc_outputs_coord.sigmoid()
else:
return outputs_classes, outputs_coords, \
None, None
@force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))
def loss(self,
all_cls_scores,
all_bbox_preds,
enc_cls_scores,
enc_bbox_preds,
gt_bboxes_list,
gt_labels_list,
img_metas,
gt_bboxes_ignore=None):
""""Loss function.
Args:
all_cls_scores (Tensor): Classification score of all
decoder layers, has shape
[nb_dec, bs, num_query, cls_out_channels].
all_bbox_preds (Tensor): Sigmoid regression
outputs of all decode layers. Each is a 4D-tensor with
normalized coordinate format (cx, cy, w, h) and shape
[nb_dec, bs, num_query, 4].
enc_cls_scores (Tensor): Classification scores of
points on encode feature map , has shape
(N, h*w, num_classes). Only be passed when as_two_stage is
True, otherwise is None.
enc_bbox_preds (Tensor): Regression results of each points
on the encode feature map, has shape (N, h*w, 4). Only be
passed when as_two_stage is True, otherwise is None.
gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
img_metas (list[dict]): List of image meta information.
gt_bboxes_ignore (list[Tensor], optional): Bounding boxes
which can be ignored for each image. Default None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert gt_bboxes_ignore is None, \
f'{self.__class__.__name__} only supports ' \
f'for gt_bboxes_ignore setting to None.'
num_dec_layers = len(all_cls_scores)
all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)]
all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]
all_gt_bboxes_ignore_list = [
gt_bboxes_ignore for _ in range(num_dec_layers)
]
img_metas_list = [img_metas for _ in range(num_dec_layers)]
losses_cls, losses_bbox, losses_iou = multi_apply(
self.loss_single, all_cls_scores, all_bbox_preds,
all_gt_bboxes_list, all_gt_labels_list, img_metas_list,
all_gt_bboxes_ignore_list)
loss_dict = dict()
# loss of proposal generated from encode feature map.
if enc_cls_scores is not None:
binary_labels_list = [
paddle.zeros_like(gt_labels_list[i])
for i in range(len(img_metas))
]
enc_loss_cls, enc_losses_bbox, enc_losses_iou = \
self.loss_single(enc_cls_scores, enc_bbox_preds,
gt_bboxes_list, binary_labels_list,
img_metas, gt_bboxes_ignore)
loss_dict['enc_loss_cls'] = enc_loss_cls
loss_dict['enc_loss_bbox'] = enc_losses_bbox
loss_dict['enc_loss_iou'] = enc_losses_iou
# loss from the last decoder layer
loss_dict['loss_cls'] = losses_cls[-1]
loss_dict['loss_bbox'] = losses_bbox[-1]
loss_dict['loss_iou'] = losses_iou[-1]
# loss from other decoder layers
num_dec_layer = 0
for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1],
losses_bbox[:-1],
losses_iou[:-1]):
loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i
loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i
loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i
num_dec_layer += 1
return loss_dict
@force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))
def get_bboxes(self,
all_cls_scores,
all_bbox_preds,
enc_cls_scores,
enc_bbox_preds,
img_metas,
rescale=False):
"""Transform network outputs for a batch into bbox predictions.
Args:
all_cls_scores (Tensor): Classification score of all
decoder layers, has shape
[nb_dec, bs, num_query, cls_out_channels].
all_bbox_preds (Tensor): Sigmoid regression
outputs of all decode layers. Each is a 4D-tensor with
normalized coordinate format (cx, cy, w, h) and shape
[nb_dec, bs, num_query, 4].
enc_cls_scores (Tensor): Classification scores of
points on encode feature map , has shape
(N, h*w, num_classes). Only be passed when as_two_stage is
True, otherwise is None.
enc_bbox_preds (Tensor): Regression results of each points
on the encode feature map, has shape (N, h*w, 4). Only be
passed when as_two_stage is True, otherwise is None.
img_metas (list[dict]): Meta information of each image.
rescale (bool, optional): If True, return boxes in original
image space. Default False.
Returns:
list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \
The first item is an (n, 5) tensor, where the first 4 columns \
are bounding box positions (tl_x, tl_y, br_x, br_y) and the \
5-th column is a score between 0 and 1. The second item is a \
(n,) tensor where each item is the predicted class label of \
the corresponding box.
"""
cls_scores = all_cls_scores[-1]
bbox_preds = all_bbox_preds[-1]
result_list = []
for img_id in range(len(img_metas)):
cls_score = cls_scores[img_id]
bbox_pred = bbox_preds[img_id]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self._get_bboxes_single(cls_score, bbox_pred,
img_shape, scale_factor,
rescale)
result_list.append(proposals)
return result_list
| 42.946708 | 98 | 0.588394 |
79430beb466d9d12cfb98928ffea27957fc738a0 | 2,263 | py | Python | project/fsm/ms-fsm/setup.py | juanlucruz/tng-communications-pilot | 3d40d631b62e14b609cc58eee58e843a97553653 | [
"Apache-2.0"
] | null | null | null | project/fsm/ms-fsm/setup.py | juanlucruz/tng-communications-pilot | 3d40d631b62e14b609cc58eee58e843a97553653 | [
"Apache-2.0"
] | 76 | 2018-06-16T10:46:24.000Z | 2019-11-19T08:39:59.000Z | project/fsm/ms-fsm/setup.py | juanlucruz/tng-communications-pilot | 3d40d631b62e14b609cc58eee58e843a97553653 | [
"Apache-2.0"
] | 12 | 2018-05-14T09:01:47.000Z | 2019-08-21T08:03:44.000Z | """
Copyright (c) 2015 SONATA-NFV, 2017 5GTANGO
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV, 5GTANGO
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
This work has been performed in the framework of the 5GTANGO project,
funded by the European Commission under Grant number 761493 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the 5GTANGO
partner consortium (www.5gtango.eu).
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ms',
version='1.0',
description='SONATA FSM/SSM template',
long_description=long_description,
# Author details
author='Thomas Soenen',
author_email='[email protected]',
# Choose your license
license='Apache 2.0',
# What does your project relate to?
keywords='NFV orchestrator',
packages=find_packages(),
install_requires=[],
setup_requires=[],
entry_points={
'console_scripts': ['ms=ms.__main__:main'],
},
) | 31 | 72 | 0.760495 |
79430bf3dd6cc400aa619f299f9520f2cb181cac | 3,385 | py | Python | e621/reports/api.py | adjspecies/explore621 | 0ec946d28ed54d11569aa237f721001f74e7f1be | [
"MIT"
] | 3 | 2019-10-12T13:32:22.000Z | 2021-11-18T19:17:16.000Z | e621/reports/api.py | adjspecies/explore621 | 0ec946d28ed54d11569aa237f721001f74e7f1be | [
"MIT"
] | 6 | 2018-12-11T20:38:26.000Z | 2021-06-10T21:01:45.000Z | e621/reports/api.py | adjspecies/explore621 | 0ec946d28ed54d11569aa237f721001f74e7f1be | [
"MIT"
] | 4 | 2018-12-11T06:19:59.000Z | 2022-02-17T00:29:15.000Z | import json
import re
from django.http import HttpResponse
from .models import (
Report,
Run,
)
from .runners import RUNNERS
help_text_re = re.compile(r'\n ')
def _success(obj):
response = HttpResponse(json.dumps({
'status': 'success',
'message': 'ok',
'result': obj,
}), content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response
def _error(message, obj=None, status=400):
responseObj = {
'status': 'error',
'message': message,
}
if obj is not None:
responseObj['result'] = obj
response = HttpResponse(
json.dumps(responseObj),
content_type='application/json',
status=status)
response['Access-Control-Allow-Origin'] = '*'
return response
def list_reports(request):
response = []
for report in Report.objects.all():
response.append({
'id': report.id,
'title': report.title,
'description': report.description,
'frequency': report.frequency,
'frequency_display': report.get_frequency_display(),
'runner': report.runner,
'runner_help_text': help_text_re.sub(
'\n', RUNNERS[report.runner].help_text),
'attributes': report.attributes,
'max_stored_runs': report.max_stored_runs,
'requires_datum_models': report.requires_datum_models,
'runs': [run.id for run in report.run_set.all()],
})
return _success(response)
def show_report(request, report_id):
try:
report = Report.objects.get(pk=report_id)
try:
last_run = report.run_set.order_by('-id')[0]
last_run_obj = {
'id': last_run.id,
'started': str(last_run.started),
'finished': str(last_run.finished),
'duration':
(last_run.finished - last_run.started).total_seconds(),
'result': json.loads(last_run.result),
}
except:
last_run_obj = {}
response = {
'id': report.id,
'title': report.title,
'description': report.description,
'frequency': report.frequency,
'frequency_display': report.get_frequency_display(),
'runner': report.runner,
'runner_help_text': help_text_re.sub(
'\n', RUNNERS[report.runner].help_text),
'attributes': report.attributes,
'max_stored_runs': report.max_stored_runs,
'requires_datum_models': report.requires_datum_models,
'runs': [run.id for run in report.run_set.all()],
'last_run': last_run_obj,
}
return _success(response)
except Report.DoesNotExist:
return _error('report {} not found'.format(report_id), status=404)
def show_run(request, report_id, run_id):
try:
run = Run.objects.get(pk=run_id)
response = {
'id': run.id,
'started': str(run.started),
'finished': str(run.finished),
'duration': (run.finished - run.started).total_seconds(),
'result': json.loads(run.result),
}
return _success(response)
except Run.DoesNotExist:
return _error('run {} not found'.format(run_id), status=404)
| 32.864078 | 75 | 0.574594 |
79430c27608b54e55c99ede043800bc145f0a378 | 2,261 | py | Python | drone_system/forms.py | mriduldhall/DroneNav | 724718b96e37510c1d1bab942f05f46eec099175 | [
"MIT"
] | null | null | null | drone_system/forms.py | mriduldhall/DroneNav | 724718b96e37510c1d1bab942f05f46eec099175 | [
"MIT"
] | null | null | null | drone_system/forms.py | mriduldhall/DroneNav | 724718b96e37510c1d1bab942f05f46eec099175 | [
"MIT"
] | null | null | null | from django import forms
from django.forms import ModelForm
from user_system.models import users
from .models import drones
class BookForm(ModelForm):
class Meta:
model = drones
fields = ['origin', 'destination']
widgets = {
'origin': forms.Select(
attrs={
'class': 'custom-select my-1 mr-sm-2',
},
),
'destination': forms.Select(
attrs={
'class': 'custom-select my-1 mr-sm-2',
},
)
}
def save(self, commit=True):
return super(BookForm, self).save(commit=commit)
class FutureBook(ModelForm):
time = forms.TimeField(widget=forms.TimeInput(attrs={'class': 'form-control', 'placeholder': 'Time(Format: HH:MM in 24 hour notation)'}))
class Meta:
model = drones
fields = ['origin', 'destination']
widgets = {
'origin': forms.Select(
attrs={
'class': 'custom-select my-1 mr-sm-2',
},
),
'destination': forms.Select(
attrs={
'class': 'custom-select my-1 mr-sm-2',
},
)
}
def save(self, commit=True):
return super(FutureBook, self).save(commit=commit)
class ChangePassword(ModelForm):
new_password = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'New Password'}))
repeat_password = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Repeat Password'}))
class Meta:
model = users
fields = ['password']
widgets = {
'password': forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Current Password',
}
),
}
class DeleteAccount(ModelForm):
class Meta:
model = users
fields = ['password']
widgets = {
'password': forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Password',
}
),
}
| 28.2625 | 141 | 0.495798 |
79430c2e031190489013e316c967eb15859690e0 | 5,180 | py | Python | python/rapidstream/BE/InitialSlotPlacement.py | Licheng-Guo/RapidStream | 19d0d8c0f7766fdfb612252483f59660e21fddc0 | [
"MIT"
] | 69 | 2021-12-16T12:58:10.000Z | 2022-03-22T00:45:13.000Z | python/rapidstream/BE/InitialSlotPlacement.py | Licheng-Guo/RapidStream | 19d0d8c0f7766fdfb612252483f59660e21fddc0 | [
"MIT"
] | 1 | 2021-12-16T16:43:34.000Z | 2021-12-16T16:43:34.000Z | python/rapidstream/BE/InitialSlotPlacement.py | Licheng-Guo/RapidStream | 19d0d8c0f7766fdfb612252483f59660e21fddc0 | [
"MIT"
] | 10 | 2021-12-17T18:18:19.000Z | 2022-03-17T15:56:15.000Z | import argparse
import logging
import json
import math
import os
from rapidstream.BE.Utilities import getAnchorTimingReportScript
from rapidstream.BE.GenAnchorConstraints import getSlotInitPlacementPblock
from rapidstream.BE.Utilities import loggingSetup
loggingSetup()
def getPlacementScript(slot_name):
script = []
script.append(f'set_param general.maxThreads 8')
dcp_path = get_synth_dcp(slot_name)
script.append(f'open_checkpoint {dcp_path}')
# in case the reused synth checkpoint has a different clock
script.append(f'create_clock -name ap_clk -period {args.clock_period} [get_pins test_bufg/O]')
# add floorplanning constraints
script += getSlotInitPlacementPblock(hub, slot_name)
# in the reuse mode, we are not sure if the reused synth checkpoints have inverted or not
if not args.invert_non_laguna_anchor_clock:
script.append('set_property IS_INVERTED 0 [ get_pins -filter {NAME =~ *C} -of_objects [get_cells *q0_reg* ] ]')
script.append(f'opt_design')
# placement
script.append(f'place_design')
script.append(f'phys_opt_design')
# write out the ctrl wrapper only for anchor placement
script.append(f'write_checkpoint -cell {slot_name}_ctrl_U0 {init_place_dir}/{slot_name}/{slot_name}_placed_no_anchor.dcp')
script.append(f'write_checkpoint {init_place_dir}/{slot_name}/{slot_name}_placed.dcp')
# get the timing report of anchors. At this point the timing report is meaningless
# however, we use the report to extract the number of LUTs on the timing paths
script += getAnchorTimingReportScript(report_prefix='init_placement')
script.append(f'exec touch {init_place_dir}/{slot_name}/{slot_name}_placed.dcp.done.flag') # signal that the DCP generation is finished
return script
def setupSlotInitPlacement():
for slot_name in hub['SlotIO'].keys():
os.mkdir(f'{init_place_dir}/{slot_name}')
script = getPlacementScript(slot_name)
open(f'{init_place_dir}/{slot_name}/place_slot.tcl', 'w').write('\n'.join(script))
def generateParallelScript(hub, user_name, server_list):
"""
spread the tasks to multiple servers
broadcast the results to all servers
"""
place = []
vivado = f'VIV_VER={args.vivado_version} vivado -mode batch -source place_slot.tcl'
parse_timing_report = 'python3.6 -m rapidstream.BE.TimingReportParser init_placement'
for slot_name in hub['SlotIO'].keys():
cd = f'cd {init_place_dir}/{slot_name}/'
guard = get_guard(slot_name)
# broadcast the results to all servers
transfer = []
for server in server_list:
transfer.append(f'rsync_with_retry.sh --target-server {server} --user-name {user_name} --dir-to-sync {init_place_dir}/{slot_name}/')
transfer_str = " && ".join(transfer)
command = f'{guard} && {cd} && {vivado} && {parse_timing_report} && {transfer_str}'
place.append(command)
num_job_server = math.ceil(len(place) / len(server_list) )
for i, server in enumerate(server_list):
local_tasks = place[i * num_job_server: (i+1) * num_job_server]
open(f'{init_place_dir}/parallel_init_slot_placement_{server}.txt', 'w').write('\n'.join(local_tasks))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--hub_path", type=str, required=True)
parser.add_argument("--base_dir", type=str, required=True)
parser.add_argument("--vivado_version", type=str, required=True)
parser.add_argument("--clock_period", type=float, required=True)
parser.add_argument("--invert_non_laguna_anchor_clock", type=int, required=True)
parser.add_argument("--path_to_reuse_synth_dcp", type=str, nargs="?", default="", help="Path to the synth checkpoints that have been uniquefied")
parser.add_argument("--server_list_in_str", type=str, required=True, help="e.g., \"u5 u15 u17 u18\"")
parser.add_argument("--user_name", type=str, required=True)
parser.add_argument("--skip_synthesis", action="store_true")
args = parser.parse_args()
hub_path = args.hub_path
base_dir = args.base_dir
user_name = args.user_name
server_list = args.server_list_in_str.split()
# depends on whether we use the uniquified synth checkpoints
if args.path_to_reuse_synth_dcp:
get_synth_dcp = lambda slot_name : f'{args.path_to_reuse_synth_dcp}/{slot_name}/{slot_name}_synth_unique_2021.1.dcp'
# note that in order to measure the e2e runtime, we run the synthesis again
# just that we will start placement from the previous synthesized checkpoints that has been renamed.
if args.skip_synthesis:
get_guard = lambda slot_name : f'sleep 1'
else:
get_guard = lambda slot_name : f'until [[ -f {synth_dir}/{slot_name}/{slot_name}_synth.dcp.done.flag ]] ; do sleep 10; done'
else:
get_synth_dcp = lambda slot_name : f'{synth_dir}/{slot_name}/{slot_name}_synth.dcp'
get_guard = lambda slot_name : f'until [[ -f {synth_dir}/{slot_name}/{slot_name}_synth.dcp.done.flag ]] ; do sleep 10; done'
hub = json.loads(open(hub_path, 'r').read())
synth_dir = f'{base_dir}/slot_synth'
init_place_dir = f'{base_dir}/init_slot_placement'
os.mkdir(init_place_dir)
setupSlotInitPlacement()
generateParallelScript(hub, user_name, server_list) | 40.46875 | 147 | 0.741699 |
79430c9a2be7102fd88e50fb6b297e9e77ca14eb | 1,001 | py | Python | anyser/__init__.py | Cologler/anyser-python | 52afa0a62003adcfe269f47d81863e00381d8ff9 | [
"MIT"
] | null | null | null | anyser/__init__.py | Cologler/anyser-python | 52afa0a62003adcfe269f47d81863e00381d8ff9 | [
"MIT"
] | null | null | null | anyser/__init__.py | Cologler/anyser-python | 52afa0a62003adcfe269f47d81863e00381d8ff9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020~2999 - Cologler <[email protected]>
# ----------
#
# ----------
import os
import importlib
from .err import FormatNotFoundError, SerializeError, NotSupportError
from .abc import ISerializer
from .core import ComplexSerializer
from .g import (
load,
loads, loadb, loadf,
dumps, dumpb, dumpf,
register_format, get_available_formats,
)
def _import_impls():
impls_root = os.path.join(os.path.dirname(__file__), 'impls')
for name in os.listdir(impls_root):
if name.endswith('.py') and not name.startswith('_'):
try:
importlib.import_module('.impls.' + name[:-3], __name__)
except ModuleNotFoundError:
pass
_import_impls()
__all__ = (
'FormatNotFoundError', 'SerializeError', 'NotSupportError',
'ISerializer',
'ComplexSerializer',
'load',
'loads', 'dumps', 'loadf',
'loadb', 'dumpb', 'dumpf',
'register_format', 'get_available_formats',
)
| 24.414634 | 72 | 0.635365 |
79430e254a8a042962939346d76fbd215d2aea63 | 2,583 | py | Python | ppApiConfig.py | GSA/PricesPaidAPI | 0899d86d20e20501e15054b2fdf62decac49fa3a | [
"Unlicense"
] | 3 | 2015-02-04T17:34:08.000Z | 2017-12-12T16:43:40.000Z | ppApiConfig.py | GSA/PricesPaidAPI | 0899d86d20e20501e15054b2fdf62decac49fa3a | [
"Unlicense"
] | null | null | null | ppApiConfig.py | GSA/PricesPaidAPI | 0899d86d20e20501e15054b2fdf62decac49fa3a | [
"Unlicense"
] | 2 | 2017-07-15T00:14:48.000Z | 2019-11-01T16:17:11.000Z | # These configuration are necessary if you are using Bottle as your webserver
BottlePortNumber = 8080
BottleHostname = 'localhost'
SolrDeleteExistingData = 'F'
PathToDataFiles = "../cookedData/EDW"
PathToArchiveInputFiles = "../cookedData/ArchiveFiles/InputFiles"
PathToArchiveErrorFiles = "../cookedData/ArchiveFiles/ErrorFiles"
PathToArchiveSplitFiles = "../cookedData/ArchiveFiles/SplitFiles"
PathToActualInputFiles = '../cookedData'
URLToPPSearchApi = "http://localhost/api"
URLToPPSearchApiSolr = "http://localhost/apisolr"
URLToSolr = 'http://localhost:8983/solr'
RelativePathToHashesFile = "../configuration/p3api.hashes.txt"
# I'm going to use a 10-minute timeout
TokenTimeout = 600 * 2
TokenTimeout = 300
LIMIT_NUMBER_BAD_LOGINS = 5
# We'll make them wait one hour if they have 5 bad logins.
LIMIT_TIME_TO_RETRY = 60*60
MAXIMUM_NUMBER_TO_LOAD = 1000*5000
LIMT_NUM_MATCHING_TRANSACTIONS = 1000
# CAS_SERVER = 'http://127.0.0.1:8099'
CAS_SERVER = 'https://login.max.gov'
# CAS_RETURN_SERVICE_URL = 'http://127.0.0.1/apisolr/ReturnLoginViaMax'
CAS_RETURN_SERVICE_URL = 'https://pricespaid.acquisition.gov/gui/ReturnLoginViaMax'
CAS_CREATE_SESSION_IF_AUTHENTICATED = 'https://pricespaid.acquisition.gov/apisolr/ReturnSessionViaMax'
# This should be in the form of a python "requests" proxies dictionary
# CAS_SERVER_PROXY = {
# "http": "http://10.10.1.10:3128",
# "https": "http://10.10.1.10:1080",
# }
CAS_PROXY = {
"https": "ftp-proxy.fss.gsa.gov:3128",
}
CAS_LEVEL_OF_ASSURANCE = "assurancelevel3"
CAS_LEVEL_OF_ASSURANCE_PREDICATE_LOA3 = lambda loa,piv: {
("http://idmanagement.gov/icam/2009/12/saml_2.0_profile/assurancelevel3" == loa)
}
CAS_LEVEL_OF_ASSURANCE_PREDICATE_LOA2 = lambda loa,piv: {
("http://idmanagement.gov/icam/2009/12/saml_2.0_profile/assurancelevel2" == loa)
or
("http://idmanagement.gov/icam/2009/12/saml_2.0_profile/assurancelevel3" == loa)
}
CAS_LEVEL_OF_ASSURANCE_PREDICATE_LOA2_AND_PIV = lambda loa,piv: {
(("http://idmanagement.gov/icam/2009/12/saml_2.0_profile/assurancelevel2" == loa)
or
("http://idmanagement.gov/icam/2009/12/saml_2.0_profile/assurancelevel3" == loa))
and
("urn:max:fips-201-pivcard" == piv)
}
CAS_PIV_CARD = lambda loa,piv: {
("urn:max:fips-201-pivcard" == piv)
}
CAS_PASSWORD_OR_PIV = lambda loa, piv: {
("urn:max:fips-201-pivcard" == piv)
or
("urn:oasis:names:tc:SAML:1.0:am:password" == piv)
}
CAS_LEVEL_3 = lambda loa, piv: {
("urn:max:am:secureplus:federated-saml2:assurancelevel3" == piv)
}
CAS_LEVEL_OF_ASSURANCE_PREDICATE = CAS_PASSWORD_OR_PIV
| 32.696203 | 102 | 0.745645 |
79430eddbde460f583f7cfc0e94d6809b237c4ba | 937 | py | Python | cloudworker.py | fredericorecsky/cloudworker | 481775eb7c2c46e63d635cbbbf69a049d03fa42a | [
"Apache-2.0"
] | 1 | 2021-04-09T19:59:14.000Z | 2021-04-09T19:59:14.000Z | cloudworker.py | fredericorecsky/cloudworker | 481775eb7c2c46e63d635cbbbf69a049d03fa42a | [
"Apache-2.0"
] | null | null | null | cloudworker.py | fredericorecsky/cloudworker | 481775eb7c2c46e63d635cbbbf69a049d03fa42a | [
"Apache-2.0"
] | null | null | null |
import logging
from queues import gcppubsub
from queues import gcprun
from datetime import date, datetime
def execute( message ):
start = datetime.now().strftime("%d%m%Y %H:%M:%S")
process = gcprun.Process(message.data)
message.ack()
try:
if process.Run():
logging.info( "executed the command:", process.command )
else:
logging.error( "Not able to run the payload", message.data )
except Exception as e:
print( e )
end = datetime.now().strftime("%d%m%Y %H:%M:%S")
if process.result is not None:
print( process.result['returncode'], start, end, process.result['runtime'], process.command, *process.arguments )
if process.result['returncode'] != 0:
print( process.stdout )
print( process.stderr )
print( "Cloud worker started")
consumer = gcppubsub.Consumer()
consumer.Consume( execute )
| 27.558824 | 121 | 0.61793 |
79430f2f9f7b3614dccb8581c23c089db7f7ff48 | 6,721 | py | Python | pyuvsim/tests/test_telescope.py | RadioAstronomySoftwareGroup/pyuvsim | 1ef8cdbfd98fc6f49b598f75d759852407601ebf | [
"BSD-3-Clause"
] | 26 | 2019-01-17T09:02:24.000Z | 2022-02-22T07:49:36.000Z | pyuvsim/tests/test_telescope.py | RadioAstronomySoftwareGroup/pyuvsim | 1ef8cdbfd98fc6f49b598f75d759852407601ebf | [
"BSD-3-Clause"
] | 335 | 2018-07-20T19:09:38.000Z | 2022-03-30T13:55:17.000Z | pyuvsim/tests/test_telescope.py | RadioAstronomySoftwareGroup/pyuvsim | 1ef8cdbfd98fc6f49b598f75d759852407601ebf | [
"BSD-3-Clause"
] | 3 | 2018-08-11T02:33:18.000Z | 2021-06-17T11:19:31.000Z |
import os
import copy
from astropy.coordinates import EarthLocation
import pytest
from pyuvdata import UVBeam
import pyuvdata.tests as uvtest
import pyuvsim
from pyuvsim.data import DATA_PATH as SIM_DATA_PATH
herabeam_default = os.path.join(SIM_DATA_PATH, 'HERA_NicCST.uvbeam')
# Ignore warnings of pending sigma deprecation
@pytest.mark.filterwarnings('ignore:Achromatic gaussian')
@pytest.fixture(scope='module')
def beam_objs():
uvb = UVBeam()
uvb.read_beamfits(herabeam_default)
uvb.extra_keywords['beam_path'] = herabeam_default
uvb2 = UVBeam()
uvb2.read_beamfits(herabeam_default)
uvb2.extra_keywords['beam_path'] = herabeam_default
beams = [uvb, uvb2]
beams.append(pyuvsim.AnalyticBeam('uniform'))
diameter_m = 14.
beams.append(pyuvsim.AnalyticBeam('airy', diameter=diameter_m))
sigma = 0.03
beams.append(pyuvsim.AnalyticBeam('gaussian', sigma=sigma))
ref_freq, alpha = 100e6, -0.5
beams.append(pyuvsim.AnalyticBeam('gaussian', sigma=sigma,
ref_freq=ref_freq, spectral_index=alpha))
return beams
@pytest.mark.filterwarnings('ignore:Achromatic gaussian')
def test_convert_loop(beam_objs):
beams = beam_objs
beams[0].freq_interp_kind = 'linear'
beams[1].freq_interp_kind = 'cubic'
# Should warn about inconsistent params on UVBeams.
with uvtest.check_warnings(UserWarning, match="Conflicting settings for"):
beamlist = pyuvsim.BeamList(beams)
# Convert beams to strings:
# Fail, because UVBeams are inconsistent.
with pytest.raises(ValueError, match='Conflicting settings for'):
beamlist.set_str_mode()
beams[1].freq_interp_kind = 'linear'
beamlist.set_str_mode()
# check that _obj_to_str on a string beam works
beamlist2 = copy.deepcopy(beamlist)
beamlist2._obj_to_str(beamlist2[0])
assert beamlist2 == beamlist
assert beamlist.uvb_params['freq_interp_kind'] == 'linear'
for bs in beamlist:
assert isinstance(bs, str)
assert beamlist._obj_beam_list == []
# Convert strings to beams. Need to set additional parameters for comparison.
beamlist._set_params_on_uvbeams(beams)
beamlist.set_obj_mode()
for bi, b in enumerate(beamlist):
assert b == beams[bi]
assert beamlist._str_beam_list == []
# Reset UVBeams
beams[0].freq_interp_kind = None
beams[1].freq_interp_kind = None
@pytest.mark.filterwarnings('ignore:Achromatic gaussian')
def test_object_mode(beam_objs):
beams = beam_objs
newbeams = copy.deepcopy(beams)
beamlist = pyuvsim.BeamList(newbeams)
beamlist[0].freq_interp_kind = 'cubic'
uvb = copy.deepcopy(newbeams[0])
uvb.freq_interp_kind = 'quartic'
# Warn if inserted object mismatches.
with uvtest.check_warnings(UserWarning, match="Conflicting settings for"):
beamlist.append(uvb)
assert len(beamlist) == 7
# Error if converting to string mode with mismatched keywords:
with pytest.raises(ValueError, match='Conflicting settings '):
beamlist.set_str_mode()
beamlist._set_params_on_uvbeams(beamlist._obj_beam_list)
# Error if converting to string mode without beam_paths:
beamlist[0].extra_keywords.pop('beam_path')
with pytest.raises(ValueError, match='Need to set '):
beamlist.set_str_mode()
# Insert string -- Converts to object
new_anabeam = 'analytic_gaussian_sig=3.0'
beamlist[-1] = new_anabeam
assert isinstance(beamlist[-1], pyuvsim.AnalyticBeam)
assert beamlist[-1].sigma == 3.0
@pytest.mark.filterwarnings('ignore:Achromatic gaussian')
def test_string_mode(beam_objs):
newbeams = copy.deepcopy(beam_objs)
beamlist = pyuvsim.BeamList(newbeams)
beamlist.set_str_mode()
uvb = newbeams[0]
uvb.freq_interp_kind = 'quartic'
with pytest.raises(ValueError, match='UVBeam parameters do not'):
beamlist.append(uvb)
uvb.freq_interp_kind = beamlist.uvb_params['freq_interp_kind']
beamlist.append(uvb)
assert isinstance(beamlist[-1], str)
beamlist.set_obj_mode()
# Check that parameters are set properly.
try:
new_pars = beamlist._scrape_uvb_params(beamlist._obj_beam_list, strict=True)
assert new_pars == beamlist.uvb_params
except ValueError:
assert False
@pytest.mark.filterwarnings('ignore:Achromatic gaussian')
def test_comparison(beam_objs):
beamlist = pyuvsim.BeamList(beam_objs)
beamlist.set_str_mode()
beamlist2 = pyuvsim.BeamList(beamlist._str_beam_list)
assert beamlist == beamlist2
beamlist.set_obj_mode()
beamlist2.set_obj_mode()
assert beamlist == beamlist2
def test_no_overwrite(beam_objs):
# Ensure UVBeam keywords are not overwritten by BeamList.uvb_params
# while in object mode.
newbeams = copy.deepcopy(beam_objs)
beamlist = pyuvsim.BeamList(newbeams)
assert beamlist.uvb_params['freq_interp_kind'] == 'cubic'
uvb = copy.deepcopy(newbeams[0])
uvb.freq_interp_kind = 'quintic'
beamlist.append(uvb)
assert uvb.freq_interp_kind == 'quintic'
assert beamlist.uvb_params['freq_interp_kind'] == 'cubic'
def test_beamlist_errors(beam_objs):
newbeams = copy.deepcopy(beam_objs)
beamlist = pyuvsim.BeamList(newbeams)
# Try to make a BeamList with a mixture of strings and objects.
newlist = copy.deepcopy(beamlist._obj_beam_list)
newlist[2] = beamlist._obj_to_str(newlist[2])
with pytest.raises(ValueError, match='Invalid beam list:'):
pyuvsim.BeamList(newlist)
# Try to append an invalid beam path while in object mode.
beam_path = 'invalid_file.uvbeam'
with pytest.raises(ValueError, match='Invalid file path'):
beamlist.append(beam_path)
# test error on beams with different x_orientation
newbeams[0].x_orientation = None
with pytest.raises(
ValueError, match="UVBeam x_orientations do not match among beams in list."
):
pyuvsim.BeamList(newbeams)
# test warning on beams with different x_orientation
newbeams[0].x_orientation = None
newbeams[1].x_orientation = None
with uvtest.check_warnings(
UserWarning,
match="All polarized beams have x_orientation set to None. This will make it "
"hard to interpret the polarizations of the simulated visibilities.",
):
pyuvsim.BeamList(newbeams)
# Compare Telescopes with beamlists of different lengths
del newbeams[0]
array_location = EarthLocation(lat='-30d43m17.5s', lon='21d25m41.9s',
height=1073.)
tel0 = pyuvsim.Telescope('tel0', array_location, newbeams)
tel1 = pyuvsim.Telescope('tel1', array_location, beam_objs)
assert tel0 != tel1
| 31.406542 | 86 | 0.715519 |
79430fa8f52fefca05e19b8eeb3dea398f76558c | 1,362 | py | Python | assignment3/assignment3-4.py | 2sbsbsb/localization | 99089ae9c7eff274d5c9d5063d87ffd067180c95 | [
"MIT"
] | 6 | 2020-12-04T11:00:23.000Z | 2022-01-29T13:56:08.000Z | assignment3/assignment3-4.py | 2sbsbsb/localization | 99089ae9c7eff274d5c9d5063d87ffd067180c95 | [
"MIT"
] | null | null | null | assignment3/assignment3-4.py | 2sbsbsb/localization | 99089ae9c7eff274d5c9d5063d87ffd067180c95 | [
"MIT"
] | 6 | 2020-12-04T11:00:54.000Z | 2022-01-30T17:58:40.000Z | import matplotlib.pyplot as plt
import math
import numpy as np
import random as r
class Robot:
def __init__(self, pos):
self.pos = pos
self.pole_dist = 0
class Particle(Robot):
def __init__(self, pos):
Robot.__init__(self, pos)
self.weight = 0
self.measurement_sigma = 0.5
def probability_density_function(self, mu, x):
### STUDENT CODE START
### STUDENT CODE END
def update_weight(self, robot_dist):
### STUDENT CODE START
### STUDENT CODE START
# Plot Weights for a range of robot measurements.
particle = Particle(0.0)
x = np.arange(-5, 5, 0.01)
y = np.zeros(len(x))
for i in range(len(x)):
particle.update_weight(x[i])
y[i] = particle.probability_density_function(0, x[i])
plt.plot(x, y, '-r')
plt.grid(True)
plt.show()
# Integrate left side to calculate probablity.
sum_probability = 0
for i in range(int(len(y) / 2)):
sum_probability += y[i]
print("If Probability is close to 0.5, then PDF works.")
print(round(sum_probability * 0.01, 2))
print()
# Update Particle Weigth based on robot measurement.
robot_dist = 3.0
particle.pole_dist = 3.0
particle.update_weight(robot_dist)
print("Particle Weight: " + str(round(particle.weight, 2)))
plt.plot(x, y, '-r')
plt.plot([-5, 5], [particle.weight, particle.weight], '-b')
plt.grid(True)
plt.show()
| 23.482759 | 59 | 0.665932 |
79430fc0305f592db8059104365ed8e8b1f8ad8d | 1,103 | py | Python | 567_Permutation_in_String.py | joshlyman/Josh-LeetCode | cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed | [
"MIT"
] | null | null | null | 567_Permutation_in_String.py | joshlyman/Josh-LeetCode | cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed | [
"MIT"
] | null | null | null | 567_Permutation_in_String.py | joshlyman/Josh-LeetCode | cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed | [
"MIT"
] | null | null | null | # For each window representing a substring of s2 of length len(s1), we want to check if the count of the window is
# equal to the count of s1. Here, the count of a string is the list of: [the number of a's it has, the number of b's,... ,
# the number of z's.]
# We can maintain the window by deleting the value of s2[i - len(s1)] when it gets larger than len(s1). After, we only
# need to check if it is equal to the target. Working with list values of [0, 1,..., 25] instead of 'a'-'z' makes it
# easier to count later.
class Solution:
def checkInclusion(self, s1: str, s2: str) -> bool:
A = [ord(x) - ord('a') for x in s1]
B = [ord(x) - ord('a') for x in s2]
target = [0] * 26
for x in A:
target[x] += 1
window = [0] * 26
for i, x in enumerate(B):
window[x] += 1
if i >= len(A):
window[B[i - len(A)]] -= 1
if window == target:
return True
return False
# Time: O(l1 + 26*(l2-l1)), where l1 is the length of l1 and l2 is the length of l2
# Space:O(1)
| 35.580645 | 123 | 0.563917 |
79430fd14831c370a1c8e38c2b4103a992945fcd | 1,938 | py | Python | em desuso/leitura.py | iagorosa/Pattern_Recognition | 29998e67e8313504651ccb58f99a36606e855fe1 | [
"Apache-2.0"
] | null | null | null | em desuso/leitura.py | iagorosa/Pattern_Recognition | 29998e67e8313504651ccb58f99a36606e855fe1 | [
"Apache-2.0"
] | null | null | null | em desuso/leitura.py | iagorosa/Pattern_Recognition | 29998e67e8313504651ccb58f99a36606e855fe1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 05 12:15:33 2019
@author: iagorosa
"""
import scipy.io as scio
from PIL import Image as pil
import numpy as np
import pandas as pd
#mat = scio.loadmat('Yale_32x32.mat') #leitura de arquivo .mat no python.
#Retorna um dicionario com 5 chaves onde
mat = scio.loadmat('./dataset/ORL_32x32.mat')
m = mat['fea'] # a chave 'fea' no dicionario mat contem uma imagem por linha em um vetor com 32x32=1024 posicoes por imagem
p = mat['gnd']
labels = {
'centerlight': 0,
'glasses': 1,
'happy': 2,
'leftlight': 3,
'noglasses': 4,
'normal': 5,
'rightlight': 6,
'sad': 7,
'sleepy': 8,
'surprised': 9,
'wink': 10
}
''' YALE
#person = np.array(list(range(1, 16))*11).reshape(165,1)
person = np.array([int(i//len(labels)+1) for i in range(len(m))]).reshape(165, 1)
label = np.array(list(labels.values())*15).reshape(165,1)
mm = np.concatenate([person, label, m], axis=1).astype('uint8')
np.savetxt("Yale_32x32.csv", mm, header='person,label', comments = '', delimiter=',', fmt="%8u")
'''
#### ORL
label = np.array(list(range(len(p[p==1])))*p[-1][0]).reshape(m.shape[0],1)
mm = np.concatenate([p, label, m], axis=1).astype('uint8')
np.savetxt("./dataset/ORL_32x32.csv", mm, header='label,label', comments = '', delimiter=',', fmt="%8u")
#%%
# Caso queira visualizar qualquer imagem do vetor, descomente abaixo. Mude a posicao de m[] para uma imagem diferentes.
'''
for i in range(11):
m0 = m[10+i*11].reshape(32,32).T
# m0 = m[i].reshape(32,32).T
img = pil.fromarray(m0, 'L')
img.show()
'''
#%%
'''
for i in range(11):
m0 = mm[10+i*11, 2:].reshape(32,32).T
# m0 = m[i].reshape(32,32).T
img = pil.fromarray(m0, 'L')
img.show()
'''
#%% | 28.925373 | 124 | 0.566047 |
7943112210b134c00e064afae3dc76447a3b0dcb | 627 | py | Python | tests/test_walker.py | reimannsum/DangerBot | 2d892b99b9f5582a752a46186291a23ab63eec55 | [
"MIT"
] | null | null | null | tests/test_walker.py | reimannsum/DangerBot | 2d892b99b9f5582a752a46186291a23ab63eec55 | [
"MIT"
] | 1 | 2019-01-29T18:36:04.000Z | 2019-01-30T13:00:38.000Z | tests/test_walker.py | reimannsum/DangerBot | 2d892b99b9f5582a752a46186291a23ab63eec55 | [
"MIT"
] | null | null | null | from os import path
from dangerbot.walker import Walker
def test_write():
walker1 = Walker()
walker1.login2(path.abspath(path.join('data', 'swansborough-park.html')))
assert walker1.write_log == """Log of MaltonMapper1 at [67, 52]
Location: Swansborough Park in Roftwood
AP: 37 Dead? True
Zombies: Zed: 0 Ded:0
"""
walker1.login2(path.abspath(path.join('data', 'warehouse.html')))
assert walker1.write_log == """Log of MaltonMapper1 at [59, 62]
Location: a warehouse [59, 62] in Tollyton
AP: 33 Dead? True
Condition: very strongly barricaded
Zombies: Zed: 0 Ded:0
"""
def test_move():
pass
| 25.08 | 77 | 0.69697 |
79431176cc9284a8ce419a05cdeffce665dab67c | 4,235 | py | Python | demisto_sdk/commands/test_content/tests/test_context_test.py | cpforbes/demisto-sdk | 4fe122e6f614e605f96bd47aa089b215dc7c2c5f | [
"MIT"
] | null | null | null | demisto_sdk/commands/test_content/tests/test_context_test.py | cpforbes/demisto-sdk | 4fe122e6f614e605f96bd47aa089b215dc7c2c5f | [
"MIT"
] | null | null | null | demisto_sdk/commands/test_content/tests/test_context_test.py | cpforbes/demisto-sdk | 4fe122e6f614e605f96bd47aa089b215dc7c2c5f | [
"MIT"
] | null | null | null | from functools import partial
from demisto_sdk.commands.common.constants import PB_Status
from demisto_sdk.commands.test_content.TestContentClasses import (
TestConfiguration, TestContext, TestPlaybook)
from demisto_sdk.commands.test_content.tests.build_context_test import (
generate_content_conf_json, generate_integration_configuration,
generate_secret_conf_json, generate_test_configuration,
get_mocked_build_context)
from demisto_sdk.commands.test_content.tests.DemistoClientMock import \
DemistoClientMock
from demisto_sdk.commands.test_content.tests.server_context_test import \
generate_mocked_server_context
def test_is_runnable_on_this_instance(mocker):
"""
Given:
- A test configuration configured to run only on instances that uses docker as container engine
When:
- The method _is_runnable_on_current_server_instance is invoked from the TestContext class
Then:
- Ensure that it returns False when the test is running on REHL instance that uses podman
- Ensure that it returns True when the test is running on a regular Linux instance that uses docker
"""
test_playbook_configuration = TestConfiguration(
generate_test_configuration(playbook_id='playbook_runnable_only_on_docker',
runnable_on_docker_only=True), default_test_timeout=30)
test_context_builder = partial(TestContext,
build_context=mocker.MagicMock(),
playbook=TestPlaybook(mocker.MagicMock(),
test_playbook_configuration),
client=mocker.MagicMock())
test_context = test_context_builder(server_context=mocker.MagicMock(is_instance_using_docker=False))
assert not test_context._is_runnable_on_current_server_instance()
test_context = test_context_builder(server_context=mocker.MagicMock(is_instance_using_docker=True))
assert test_context._is_runnable_on_current_server_instance()
def test_second_playback_enforcement(mocker, tmp_path):
"""
Given:
- A mockable test
When:
- The mockable test fails on the second playback
Then:
- Ensure that it exists in the failed_playbooks set
- Ensure that it does not exists in the succeeded_playbooks list
"""
class RunIncidentTestMock:
call_count = 0
count_response_mapping = {
1: PB_Status.FAILED, # The first playback run
2: PB_Status.COMPLETED, # The record run
3: PB_Status.FAILED # The second playback run
}
@staticmethod
def run_incident_test(*_):
# First playback run
RunIncidentTestMock.call_count += 1
return RunIncidentTestMock.count_response_mapping[RunIncidentTestMock.call_count]
filtered_tests = ['mocked_playbook']
tests = [generate_test_configuration(playbook_id='mocked_playbook',
integrations=['mocked_integration'])]
integrations_configurations = [generate_integration_configuration('mocked_integration')]
secret_test_conf = generate_secret_conf_json(integrations_configurations)
content_conf_json = generate_content_conf_json(tests=tests)
build_context = get_mocked_build_context(mocker,
tmp_path,
secret_conf_json=secret_test_conf,
content_conf_json=content_conf_json,
filtered_tests_content=filtered_tests)
mocked_demisto_client = DemistoClientMock(integrations=['mocked_integration'])
server_context = generate_mocked_server_context(build_context, mocked_demisto_client, mocker)
mocker.patch('demisto_sdk.commands.test_content.TestContentClasses.TestContext._run_incident_test',
RunIncidentTestMock.run_incident_test)
server_context.execute_tests()
assert 'mocked_playbook (Second Playback)' in build_context.tests_data_keeper.failed_playbooks
assert 'mocked_playbook' not in build_context.tests_data_keeper.succeeded_playbooks
| 51.024096 | 107 | 0.704132 |
794311b373bf88ac4860ad006808628b3d03d5e9 | 7,593 | py | Python | elasticsearch/core/queryset.py | racker/slogger | bf076156d324c6d1d57dfdf36286f16e5aff2788 | [
"Apache-2.0"
] | null | null | null | elasticsearch/core/queryset.py | racker/slogger | bf076156d324c6d1d57dfdf36286f16e5aff2788 | [
"Apache-2.0"
] | null | null | null | elasticsearch/core/queryset.py | racker/slogger | bf076156d324c6d1d57dfdf36286f16e5aff2788 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 Rackspace
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from utils import Facet, build_query, parse_query
class ElasticsearchQueryset(object):
"""
works a lot like django querysets in that they are lazily evaluated,
you can slice, and you chain infinite .limit()s and .filter()s. You
also use them just like django querysets - list(queryset) forces an
evaluation and you can iterate over them.
don't use this class directly
TODO: The entire result is stored in RAM, we should probably be able to
trade ram for more queries with some iterator magic
TODO: This doesn't keep track of position in the results list, so if you
slice a queryset after it's been evaluated it isn't smart enough to
avoid a refresh even though it may have the results already cached
TODO: Facets are kind of tangental to the inderactions with the documents,
should this be fixed somehow?
"""
def __init__(self, model, query=None):
self._model = model
self._client = model._client
self._query = query
if type(self._query) != list:
self._query = [self._query]
self._index = model._get_index()
self._doctype = model._get_doctype()
self._need_refresh = True
# Initialize local state for Queryset
self._raw_response = {}
self._time_took = None
self._timed_out = False
self._results = []
self._facets = None
self._faceted_on = []
self._total_results = None
self._order_by = None
self._size = 100
self._offset = 0 # start at the beginning by default
def __list__(self):
"""
forces query evaluation
"""
return self.results
def __iter__(self):
"""
forces query evaluation
"""
#FIXME: we should make a way to iterate over a queryset but not load the entire
#result into ram
for v in self.__list__():
yield v
def __repr__(self):
return str(self.__list__())
def __getitem__(self, index):
# sanitycheck straight from django queryset
if not isinstance(index, (slice, int, long)):
raise TypeError
assert ((not isinstance(index, slice) and (index >= 0))
or (isinstance(index, slice) and (index.start is None or index.start >= 0)
and (index.stop is None or index.stop >= 0))), "Negative indexing is not supported."
self._need_refresh = True
if type(index) == slice:
if index.start:
self._offset = index.start
if index.stop:
self._size = index.stop - self._offset
return self
else:
# evaluate the queryset if needed and try to index the result
# list, throw if out of range
# TODO: need_refresh being set to true above means that refresh
# will always be needed
return self.results[index]
def _parse_facets(self, response):
"""
helper that parses out facets from raw responses
@param: response - raw elasticsearch search response
@returns: dict - parsed document like so - {'tag': [('agent', 3), ('db', 1)], 'provider_type': [(5, 4)]}
"""
facets = {}
for k, v in response.get('facets', {}).iteritems():
fl = []
for facet in v['terms']:
fl.append((facet['term'], facet['count']))
facets[k] = fl
return facets
def _parse_results(self, response):
"""
helper that parses out results from raw responses
@param: response - raw elasticsearch search response
@returns: list of documents
"""
self._total_results = response.get('hits', {}).get('total', 0)
results = []
if self._total_results:
for hit in response['hits']['hits']:
results.append(self._model(**hit['_source']))
return results
def _parse_raw_response(self, response):
"""
parse out results from raw responses and set up class private vars
@param: response - raw elasticsearch search response
@returns: None
"""
self._raw_response = response
# some stats
self._time_took = response.get('took')
self._timed_out = response.get('timed_out')
# parse out the list of results
self._results = self._parse_results(response)
# parse out any facets
self._facets = self._parse_facets(response)
self._need_refresh = False
def _refresh(self):
"""
evaluates the current query and updates class vars
"""
query = build_query(self._query, facets=self._faceted_on)
response = self._client.search(self._index, self._doctype, query, order_by=self._order_by, size=self._size, offset=self._offset)
self._parse_raw_response(response)
def filter(self, query_string=None, **kwargs):
queries = parse_query(query_string, **kwargs)
self._query.extend(queries)
self._need_refresh = True
return self
def order_by(self, order_by):
"""
sorts the current query
@param: order_by - string - field name. Sorts by ascending by default, prepend a '-' to sort by descending
"""
order = 'asc'
if order_by[0] == '-':
order = 'desc'
order_by = order_by[1:]
self._order_by = '%s:%s' % (order_by, order)
self._need_refresh = True
return self
def facet(self, facet):
"""
adds a facet
@param: facet - Facet object or list of objects
@returns: ElasticsearchQueryset - self
"""
if facet and type(facet) != list:
facet = [facet]
for f in facet:
self._faceted_on.append(Facet(str(f)))
return self
def limit(self, limit):
"""
limits the size of the queryset
@param: query - ElasticsearchQuery-derived object or list of objects
@returns: ElasticsearchFilter - self
"""
self._size = limit
return self
def count(self):
"""
returns current total size of the queryset
@returns: int - number of results
TODO: use the count api
"""
if self._need_refresh:
self._refresh()
return int(len(self._results))
@property
def results(self):
"""
evaluates the query if needed and returns results
@returns: list of documents
"""
if self._need_refresh:
self._refresh()
return self._results
@property
def facets(self):
"""
evaluates the query if needed and returns facets
@returns: dict of facets like so {'tag': [('agent', 3), ('db', 1)], 'provider_type': [(5, 4)]}
"""
if self._need_refresh:
self._refresh()
return self._facets
| 31.246914 | 136 | 0.5995 |
794311be4e4575a6f4d0c55ef9b40a59bd747f2a | 1,347 | py | Python | deeplearning4j-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/weights/scripts/embedding_conv1d_extended.py | dileeshvar/deeplearning4j | 973ebc9fd3522c3d725b81f0caaebeadb46481ad | [
"Apache-2.0"
] | 1 | 2021-11-10T12:29:21.000Z | 2021-11-10T12:29:21.000Z | deeplearning4j-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/weights/scripts/embedding_conv1d_extended.py | dileeshvar/deeplearning4j | 973ebc9fd3522c3d725b81f0caaebeadb46481ad | [
"Apache-2.0"
] | null | null | null | deeplearning4j-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/weights/scripts/embedding_conv1d_extended.py | dileeshvar/deeplearning4j | 973ebc9fd3522c3d725b81f0caaebeadb46481ad | [
"Apache-2.0"
] | 1 | 2018-08-02T10:49:43.000Z | 2018-08-02T10:49:43.000Z | import keras
from keras.models import Sequential, save_model
from keras.layers import Embedding, Convolution1D, Flatten, Dense, Dropout
import keras.backend as K
import numpy as np
base_path = "../../../../../../../../resources/weights/"
backend = K.backend()
version = keras.__version__
major_version = int(version[0])
n_in = 4
n_out = 6
output_dim = 5
input_length = 10
mb = 42
kernel = 3
embedding_dim = 50
max_words = 200
input_length = 10
model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=input_length))
model.add(Convolution1D(128, kernel_size=3, activation='relu'))
model.add(Convolution1D(64, kernel_size=3, activation='relu'))
model.add(Convolution1D(32, kernel_size=3, activation='relu'))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(128, activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.compile(loss='mse', optimizer='adam')
input_array = np.random.randint(n_in, size=(mb, input_length))
output_array = model.predict(input_array)
assert output_array.shape == (mb, 1)
print("Saving model with embedding into several Conv1D layers into Flatten and Dense for backend {} and keras major version {}".format(backend, major_version))
model.save("{}embedding_conv1d_extended_{}_{}.h5".format(base_path, backend, major_version))
| 30.613636 | 159 | 0.755011 |
794311c790a1e41d552298c03f498c3e24de04b3 | 249 | py | Python | manage.py | vigzmv/AB-Split-Tester | 36a1c4c7b99f933b3f0f4be1219d0292f4ff6955 | [
"MIT"
] | 1 | 2016-07-26T18:03:42.000Z | 2016-07-26T18:03:42.000Z | manage.py | htadg/innovisionNSIT | 6e1537eeb5a8459d3032389f61e7cc4617b477b6 | [
"MIT"
] | null | null | null | manage.py | htadg/innovisionNSIT | 6e1537eeb5a8459d3032389f61e7cc4617b477b6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Tester.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.636364 | 70 | 0.771084 |
79431207d0c4c6074327393511cb6c6eeaff52b0 | 101 | py | Python | model/SenderType.py | adhocmaster/netmad | fe6c115d71ebeb8c689cdd1b8bed80ac35757681 | [
"MIT"
] | null | null | null | model/SenderType.py | adhocmaster/netmad | fe6c115d71ebeb8c689cdd1b8bed80ac35757681 | [
"MIT"
] | null | null | null | model/SenderType.py | adhocmaster/netmad | fe6c115d71ebeb8c689cdd1b8bed80ac35757681 | [
"MIT"
] | null | null | null | from enum import Enum
class SenderType(Enum):
Noob = "Noob"
Tahoe = "Tahoe"
BBR = "BBR" | 14.428571 | 23 | 0.60396 |
7943123965a955c41882c54aaa52435f865c88bd | 1,601 | py | Python | setup.py | sbienkow/eg | 96142a74f4416b4a7000c85032c070df713b849e | [
"MIT"
] | 2 | 2019-11-08T15:04:33.000Z | 2020-10-28T03:13:43.000Z | setup.py | sbienkow/eg | 96142a74f4416b4a7000c85032c070df713b849e | [
"MIT"
] | null | null | null | setup.py | sbienkow/eg | 96142a74f4416b4a7000c85032c070df713b849e | [
"MIT"
] | null | null | null | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# Also bump at eg.eg_util.VERSION
VERSION = '1.1.1'
LONG_DESCRIPTION = """
eg provides examples at the command line.
Many commands can be difficult to remember. Man pages work great, but can
provide too much information to be useful at a glance. You'll likely still have
to turn to the internet for some examples.
eg tries to minimize that need by providing useful examples at the command line.
`eg find` will give you useful examples of the find command right in the
terminal.
eg is extensible. If you have a particular command you like to use (like a
specific use of awk, resetting a home server, etc) you can add these to a custom
directory and eg will show you those results first when you type the relevant
command.
eg is colorful. By default eg uses colors. This is pretty. You can customize
these colors to whatever scheme you want.
See the webpage for more information.
"""
# The version here must match the version in the code itself. Currently they
# have to be updated in both places.
config = {
'name': 'eg',
'description': 'Examples at the command line',
'long_description': LONG_DESCRIPTION,
'author': 'Sam Sudar',
'url': 'https://github.com/srsudar/eg',
'license': 'MIT',
'author_email': '[email protected]',
'version': VERSION,
'install_requires': [],
'test_requires': ['mock', 'pytest'],
'packages': ['eg'],
'scripts': ['bin/eg'],
'package_data': {
'eg': ['examples/*']
},
'zip_safe': False,
}
setup(**config)
| 30.207547 | 80 | 0.705184 |
7943126343af57807ba2de95b03d0f73effcc8eb | 1,395 | py | Python | blockChain/modularDivision.py | khairanabila/pythonApps | f90b8823f939b98f7bf1dea7ed35fe6e22e2f730 | [
"MIT"
] | 1 | 2021-08-18T15:54:30.000Z | 2021-08-18T15:54:30.000Z | blockChain/modularDivision.py | khairanabila/pythonApps | f90b8823f939b98f7bf1dea7ed35fe6e22e2f730 | [
"MIT"
] | null | null | null | blockChain/modularDivision.py | khairanabila/pythonApps | f90b8823f939b98f7bf1dea7ed35fe6e22e2f730 | [
"MIT"
] | null | null | null | from typing import Tuple
def modular_division(a: int, b: int, n: int) -> int:
assert n > 1 and a > 0 and greatest_common_divisor(a, n) == 1
(d, t, s) = extend_gcd(n, a)
return x
def invert_modulo(a: int, n: int) -> int:
(b, x) = extend_euclid(a ,n)
if b < 0:
b = (b % n + n) % n
return b
def modular_division(a: int, b: int, n: int) -> int:
s = invert_modulo(a, n)
x = (b *s) % n
return x
def extend_gcd(a: int, b:int) -> Tuple[int, int, int]:
assert a >= 0 and b >= 0
if b == 0:
d, x, y = a, 1, 0
else:
(d, p, q) = extend_gcd(b, a%b)
assert a % d == 0 and b %d == 0
assert d == a * x + b * y
return (d, x, y)
def extended_euclid(a: int, b: int) -> Tuple[int, int]:
if b == 0:
return (1, 0)
(x, y) = extended_euclid(b, a % b)
k = a // b
return (y, x - k * y)
def greatest_common_divisor(a: int, b: int) -> int:
if a < b:
a, b = b, a
while a % b != 0:
a, b = b, a % b
return b
if __name__ == "__main__":
from doctest import testmod
testmod(name="modular_division", verbose=True)
testmod(name="modular_division2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_gcd", verbose=True)
testmod(name="extended_euclid", verbose=True)
testmod(name="greatest_common_divisor", verbose=True)
| 24.051724 | 65 | 0.551971 |
7943128d5a6c12eaec8a4457d03a793052f4230a | 3,187 | py | Python | home/migrations/0001_initial.py | CooloiStudio/Turanga.deskxd.com | 64d7a5e3128c85d1d3be38d36913d79b16cff537 | [
"MIT"
] | null | null | null | home/migrations/0001_initial.py | CooloiStudio/Turanga.deskxd.com | 64d7a5e3128c85d1d3be38d36913d79b16cff537 | [
"MIT"
] | null | null | null | home/migrations/0001_initial.py | CooloiStudio/Turanga.deskxd.com | 64d7a5e3128c85d1d3be38d36913d79b16cff537 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-28 09:42
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BasePage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('remark', models.TextField(null=True)),
],
),
migrations.CreateModel(
name='Languages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.IntegerField(unique=True)),
('name', models.CharField(max_length=50)),
('text', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=100)),
('sort', models.IntegerField(unique=True)),
('url', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='MenuInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.Languages')),
('menu', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.Menu')),
],
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=100)),
('sort', models.IntegerField(unique=True)),
('img', models.CharField(max_length=500)),
('url', models.CharField(max_length=100)),
('basepage', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.BasePage')),
],
),
migrations.CreateModel(
name='SectionInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, null=True)),
('subtitle', models.CharField(max_length=200, null=True)),
('text', models.TextField(null=True)),
('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.Languages')),
('section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.Section')),
],
),
]
| 42.493333 | 114 | 0.564481 |
794312d62dd8f6d18cbd420bb102244c240394da | 3,706 | py | Python | twitter_app_auth/tests/api/test_invalid_creds.py | rohitkadam19/API-Automation | 96dea5686d1196a942594f82b532bf5eedcc1ec2 | [
"Apache-2.0"
] | 1 | 2018-11-02T14:31:14.000Z | 2018-11-02T14:31:14.000Z | twitter_app_auth/tests/api/test_invalid_creds.py | rohitkadam19/API-Automation | 96dea5686d1196a942594f82b532bf5eedcc1ec2 | [
"Apache-2.0"
] | null | null | null | twitter_app_auth/tests/api/test_invalid_creds.py | rohitkadam19/API-Automation | 96dea5686d1196a942594f82b532bf5eedcc1ec2 | [
"Apache-2.0"
] | null | null | null | import json
import logging
from twitterauth.session import Session
from twitterauth.test import APITestCase
from twitterauth.configs import settings
from twitterauth.utils import helper
from twitterauth.utils import payload
LOGGER = logging.getLogger("twitter")
invalid_creds_err_msg = "Unable to verify your credentials"
class TestInvalidUserCredentials(APITestCase):
@classmethod
def setUpClass(self):
self.session = Session().get_session()
self.base_url = settings.api.url
def test_oauth_with_invalid_creds(self):
"""
Verify twitter auth API returns error when passed
invalid credentials.
"""
headers = payload.get_oauth_headers("invalid_creds")
data = payload.get_oauth_data()
response = self.session.post(self.base_url + "/oauth2/token",
data=data,
headers=headers)
assert response.status_code == 403
LOGGER.info(response.text)
# Assert error message for invalid credentials
parsed_response = json.loads(response.text)
assert parsed_response["errors"][0]["message"] == \
invalid_creds_err_msg
def test_oauth_with_invalid_consumer_key(self):
"""
Verify twitter auth API returns error when passed
invalid consumer key.
"""
headers = payload.get_oauth_headers(
helper.getBase64Value(
consumer_key="invalid_consumer_key"
))
data = payload.get_oauth_data()
response = self.session.post(self.base_url + "/oauth2/token",
data=data,
headers=headers)
assert response.status_code == 403
LOGGER.info(response.text)
# Assert error message for invalid credentials
parsed_response = json.loads(response.text)
assert parsed_response["errors"][0]["message"] == \
invalid_creds_err_msg
def test_oauth_with_invalid_secret_key(self):
"""
Verify twitter auth API returns error when passed
invalid secret key.
"""
headers = payload.get_oauth_headers(
helper.getBase64Value(
secret_key="invalid_secret_key"
))
data = payload.get_oauth_data()
response = self.session.post(self.base_url + "/oauth2/token",
data=data,
headers=headers)
assert response.status_code == 403
LOGGER.info(response.text)
# Assert error message for invalid credentials
parsed_response = json.loads(response.text)
assert parsed_response["errors"][0]["message"] == \
invalid_creds_err_msg
def test_oauth_with_other_acc_consumer_key(self):
"""
Verify twitter auth API returns error when passed
consumer key of one account and secret key of other account.
"""
headers = payload.get_oauth_headers(
helper.getBase64Value(
consumer_key=settings.api.other_acc_consumer_key
))
data = payload.get_oauth_data()
response = self.session.post(self.base_url + "/oauth2/token",
data=data,
headers=headers)
assert response.status_code == 403
LOGGER.info(response.text)
# Assert error message for invalid credentials
parsed_response = json.loads(response.text)
assert parsed_response["errors"][0]["message"] == \
invalid_creds_err_msg
| 35.295238 | 71 | 0.601997 |
7943142e3d973e1da0cdaf9463a79c69b04c4f78 | 12,582 | py | Python | nova/api/validation/parameter_types.py | mertakozcan/nova | 6e4ab9714cc0ca147f61997aa7b68f88185ade5c | [
"Apache-2.0"
] | 1 | 2016-07-18T22:05:01.000Z | 2016-07-18T22:05:01.000Z | nova/api/validation/parameter_types.py | woraser/nova | fc3890667e4971e3f0f35ac921c2a6c25f72adec | [
"Apache-2.0"
] | null | null | null | nova/api/validation/parameter_types.py | woraser/nova | fc3890667e4971e3f0f35ac921c2a6c25f72adec | [
"Apache-2.0"
] | 1 | 2021-11-12T03:55:41.000Z | 2021-11-12T03:55:41.000Z | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common parameter types for validating request Body.
"""
import copy
import functools
import re
import unicodedata
import six
from nova.i18n import _
from nova.objects import tag
_REGEX_RANGE_CACHE = {}
def memorize(func):
@functools.wraps(func)
def memorizer(*args, **kwargs):
global _REGEX_RANGE_CACHE
key = "%s:%s:%s" % (func.__name__, hash(str(args)), hash(str(kwargs)))
value = _REGEX_RANGE_CACHE.get(key)
if value is None:
value = func(*args, **kwargs)
_REGEX_RANGE_CACHE[key] = value
return value
return memorizer
def _reset_cache():
global _REGEX_RANGE_CACHE
_REGEX_RANGE_CACHE = {}
def single_param(schema):
"""Macro function for use in JSONSchema to support query parameters that
should have only one value.
"""
ret = multi_params(schema)
ret['maxItems'] = 1
return ret
def multi_params(schema):
"""Macro function for use in JSONSchema to support query parameters that
may have multiple values.
"""
return {'type': 'array', 'items': schema}
# NOTE: We don't check actual values of queries on params
# which are defined as the following common_param.
# Please note those are for backward compatible existing
# query parameters because previously multiple parameters
# might be input and accepted.
common_query_param = multi_params({'type': 'string'})
common_query_regex_param = multi_params({'type': 'string', 'format': 'regex'})
class ValidationRegex(object):
def __init__(self, regex, reason):
self.regex = regex
self.reason = reason
def _is_printable(char):
"""determine if a unicode code point is printable.
This checks if the character is either "other" (mostly control
codes), or a non-horizontal space. All characters that don't match
those criteria are considered printable; that is: letters;
combining marks; numbers; punctuation; symbols; (horizontal) space
separators.
"""
category = unicodedata.category(char)
return (not category.startswith("C") and
(not category.startswith("Z") or category == "Zs"))
def _get_all_chars():
for i in range(0xFFFF):
yield six.unichr(i)
# build a regex that matches all printable characters. This allows
# spaces in the middle of the name. Also note that the regexp below
# deliberately allows the empty string. This is so only the constraint
# which enforces a minimum length for the name is triggered when an
# empty string is tested. Otherwise it is not deterministic which
# constraint fails and this causes issues for some unittests when
# PYTHONHASHSEED is set randomly.
@memorize
def _build_regex_range(ws=True, invert=False, exclude=None):
"""Build a range regex for a set of characters in utf8.
This builds a valid range regex for characters in utf8 by
iterating the entire space and building up a set of x-y ranges for
all the characters we find which are valid.
:param ws: should we include whitespace in this range.
:param exclude: any characters we want to exclude
:param invert: invert the logic
The inversion is useful when we want to generate a set of ranges
which is everything that's not a certain class. For instance,
produce all all the non printable characters as a set of ranges.
"""
if exclude is None:
exclude = []
regex = ""
# are we currently in a range
in_range = False
# last character we found, for closing ranges
last = None
# last character we added to the regex, this lets us know that we
# already have B in the range, which means we don't need to close
# it out with B-B. While the later seems to work, it's kind of bad form.
last_added = None
def valid_char(char):
if char in exclude:
result = False
elif ws:
result = _is_printable(char)
else:
# Zs is the unicode class for space characters, of which
# there are about 10 in this range.
result = (_is_printable(char) and
unicodedata.category(char) != "Zs")
if invert is True:
return not result
return result
# iterate through the entire character range. in_
for c in _get_all_chars():
if valid_char(c):
if not in_range:
regex += re.escape(c)
last_added = c
in_range = True
else:
if in_range and last != last_added:
regex += "-" + re.escape(last)
in_range = False
last = c
else:
if in_range:
regex += "-" + re.escape(c)
return regex
valid_name_regex_base = '^(?![%s])[%s]*(?<![%s])$'
valid_name_regex = ValidationRegex(
valid_name_regex_base % (
_build_regex_range(ws=False, invert=True),
_build_regex_range(),
_build_regex_range(ws=False, invert=True)),
_("printable characters. Can not start or end with whitespace."))
# This regex allows leading/trailing whitespace
valid_name_leading_trailing_spaces_regex_base = (
"^[%(ws)s]*[%(no_ws)s]+[%(ws)s]*$|"
"^[%(ws)s]*[%(no_ws)s][%(no_ws)s%(ws)s]+[%(no_ws)s][%(ws)s]*$")
valid_az_name_regex = ValidationRegex(
valid_name_regex_base % (
_build_regex_range(ws=False, invert=True),
_build_regex_range(exclude=[':']),
_build_regex_range(ws=False, invert=True)),
_("printable characters except :."
"Can not start or end with whitespace."))
# az's name disallow ':'.
valid_az_name_leading_trailing_spaces_regex = ValidationRegex(
valid_name_leading_trailing_spaces_regex_base % {
'ws': _build_regex_range(exclude=[':']),
'no_ws': _build_regex_range(ws=False, exclude=[':'])},
_("printable characters except :, "
"with at least one non space character"))
valid_name_leading_trailing_spaces_regex = ValidationRegex(
valid_name_leading_trailing_spaces_regex_base % {
'ws': _build_regex_range(),
'no_ws': _build_regex_range(ws=False)},
_("printable characters with at least one non space character"))
valid_name_regex_obj = re.compile(valid_name_regex.regex, re.UNICODE)
valid_description_regex_base = '^[%s]*$'
valid_description_regex = valid_description_regex_base % (
_build_regex_range())
boolean = {
'type': ['boolean', 'string'],
'enum': [True, 'True', 'TRUE', 'true', '1', 'ON', 'On', 'on',
'YES', 'Yes', 'yes',
False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off', 'off',
'NO', 'No', 'no'],
}
none = {
'enum': ['None', None, {}]
}
name_or_none = {
'oneOf': [
{'type': 'string', 'minLength': 1, 'maxLength': 255},
{'type': 'null'},
]
}
positive_integer = {
'type': ['integer', 'string'],
'pattern': '^[0-9]*$', 'minimum': 1, 'minLength': 1
}
non_negative_integer = {
'type': ['integer', 'string'],
'pattern': '^[0-9]*$', 'minimum': 0, 'minLength': 1
}
# This only be used by nova-network specific APIs. It will be removed when
# those API removed.
positive_integer_with_empty_str = {
'type': ['integer', 'string'],
'pattern': '^[0-9]*$', 'minimum': 1,
}
hostname = {
'type': 'string', 'minLength': 1, 'maxLength': 255,
# NOTE: 'host' is defined in "services" table, and that
# means a hostname. The hostname grammar in RFC952 does
# not allow for underscores in hostnames. However, this
# schema allows them, because it sometimes occurs in
# real systems.
'pattern': '^[a-zA-Z0-9-._]*$',
}
hostname_or_ip_address = {
# NOTE: Allow to specify hostname, ipv4 and ipv6.
'type': 'string', 'maxLength': 255,
'pattern': '^[a-zA-Z0-9-_.:]*$'
}
name = {
# NOTE: Nova v2.1 API contains some 'name' parameters such
# as keypair, server, flavor, aggregate and so on. They are
# stored in the DB and Nova specific parameters.
# This definition is used for all their parameters.
'type': 'string', 'minLength': 1, 'maxLength': 255,
'format': 'name'
}
az_name = {
'type': 'string', 'minLength': 1, 'maxLength': 255,
'format': 'az_name'
}
az_name_with_leading_trailing_spaces = {
'type': 'string', 'minLength': 1, 'maxLength': 255,
'format': 'az_name_with_leading_trailing_spaces'
}
name_with_leading_trailing_spaces = {
'type': 'string', 'minLength': 1, 'maxLength': 255,
'format': 'name_with_leading_trailing_spaces'
}
description = {
'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255,
'pattern': valid_description_regex,
}
# TODO(stephenfin): This is no longer used and should be removed
tcp_udp_port = {
'type': ['integer', 'string'], 'pattern': '^[0-9]*$',
'minimum': 0, 'maximum': 65535,
'minLength': 1
}
project_id = {
'type': 'string', 'minLength': 1, 'maxLength': 255,
'pattern': '^[a-zA-Z0-9-]*$'
}
server_id = {
'type': 'string', 'format': 'uuid'
}
image_id = {
'type': 'string', 'format': 'uuid'
}
image_id_or_empty_string = {
'oneOf': [
{'type': 'string', 'format': 'uuid'},
{'type': 'string', 'maxLength': 0}
]
}
volume_id = {
'type': 'string', 'format': 'uuid'
}
volume_type = {
'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255
}
network_id = {
'type': 'string', 'format': 'uuid'
}
network_port_id = {
'type': 'string', 'format': 'uuid'
}
admin_password = {
# NOTE: admin_password is the admin password of a server
# instance, and it is not stored into nova's data base.
# In addition, users set sometimes long/strange string
# as password. It is unnecessary to limit string length
# and string pattern.
'type': 'string',
}
flavor_ref = {
'type': ['string', 'integer'], 'minLength': 1
}
metadata = {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:. ]{1,255}$': {
'type': 'string', 'maxLength': 255
}
},
'additionalProperties': False
}
metadata_with_null = copy.deepcopy(metadata)
metadata_with_null['patternProperties']['^[a-zA-Z0-9-_:. ]{1,255}$']['type'] =\
['string', 'null']
mac_address = {
'type': 'string',
'pattern': '^([0-9a-fA-F]{2})(:[0-9a-fA-F]{2}){5}$'
}
ip_address = {
'type': 'string',
'oneOf': [
{'format': 'ipv4'},
{'format': 'ipv6'}
]
}
ipv4 = {
'type': 'string', 'format': 'ipv4'
}
ipv6 = {
'type': 'string', 'format': 'ipv6'
}
cidr = {
'type': 'string', 'format': 'cidr'
}
volume_size = {
'type': ['integer', 'string'],
'pattern': '^[0-9]+$',
'minimum': 1,
# maximum's value is limited to db constant's MAX_INT
# (in nova/db/constants.py)
'maximum': 0x7FFFFFFF
}
disk_config = {
'type': 'string',
'enum': ['AUTO', 'MANUAL']
}
accessIPv4 = {
'type': 'string',
'format': 'ipv4',
}
accessIPv6 = {
'type': 'string',
'format': 'ipv6',
}
flavor_param_positive = copy.deepcopy(volume_size)
flavor_param_non_negative = copy.deepcopy(volume_size)
flavor_param_non_negative['minimum'] = 0
personality = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'path': {'type': 'string'},
'contents': {
'type': 'string',
'format': 'base64'
}
},
'additionalProperties': False,
}
}
tag = {
"type": "string",
"minLength": 1, "maxLength": tag.MAX_TAG_LENGTH,
"pattern": "^[^,/]*$"
}
pagination_parameters = {
'limit': multi_params(non_negative_integer),
'marker': multi_params({'type': 'string'})
}
# The trusted_certs list is restricted to a maximum of 50 IDs.
# "null" is allowed to unset/reset trusted certs during rebuild.
trusted_certs = {
"type": ["array", "null"],
"minItems": 1,
"maxItems": 50,
"uniqueItems": True,
"items": {
"type": "string",
"minLength": 1,
}
}
| 25.521298 | 79 | 0.623987 |
794315780d205e71e57b6539a5d7a33d404b81f5 | 32,909 | py | Python | pypureclient/flashblade/FB_2_1/api/directory_services_api.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flashblade/FB_2_1/api/directory_services_api.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flashblade/FB_2_1/api/directory_services_api.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.1, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
class DirectoryServicesApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api21_directory_services_get_with_http_info(
self,
continuation_token=None, # type: str
filter=None, # type: str
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.DirectoryServiceGetResponse
"""GET directory-services
List directory service configuration information for the array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_directory_services_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str continuation_token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:param str filter: Exclude resources that don't match the specified criteria.
:param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.
:param int limit: Limit the size of the response to the specified number of resources. A `limit` of `0` can be used to get the number of resources without getting all of the resources. It will be returned in the `total_item_count` field. If a client asks for a page size larger than the maximum number, the request is still valid. In that case the server just returns the maximum number of items, disregarding the client's page size request.
:param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.
:param int offset: The offset of the first resource to return from a collection.
:param list[str] sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name). NOTE: If you provide a sort you will not get a `continuation_token` in the response.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: DirectoryServiceGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api21_directory_services_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api21_directory_services_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/directory-services', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DirectoryServiceGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_directory_services_patch_with_http_info(
self,
directory_service=None, # type: models.DirectoryService
ids=None, # type: List[str]
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.DirectoryServiceResponse
"""PATCH directory-services
Modifies and tests the directory service configuration.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_directory_services_patch_with_http_info(directory_service, async_req=True)
>>> result = thread.get()
:param DirectoryService directory_service: (required)
:param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.
:param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: DirectoryServiceResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'directory_service' is set
if directory_service is None:
raise TypeError("Missing the required parameter `directory_service` when calling `api21_directory_services_patch`")
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'directory_service' in params:
body_params = params['directory_service']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/directory-services', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DirectoryServiceResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_directory_services_roles_get_with_http_info(
self,
continuation_token=None, # type: str
ids=None, # type: List[str]
filter=None, # type: str
limit=None, # type: int
offset=None, # type: int
role_ids=None, # type: List[str]
role_names=None, # type: List[str]
sort=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.DirectoryServiceRolesGetResponse
"""GET directory-service/roles
Return array's RBAC group configuration settings for manageability.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_directory_services_roles_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str continuation_token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `role_names` or `role_ids` query parameters.
:param str filter: Exclude resources that don't match the specified criteria.
:param int limit: Limit the size of the response to the specified number of resources. A `limit` of `0` can be used to get the number of resources without getting all of the resources. It will be returned in the `total_item_count` field. If a client asks for a page size larger than the maximum number, the request is still valid. In that case the server just returns the maximum number of items, disregarding the client's page size request.
:param int offset: The offset of the first resource to return from a collection.
:param list[str] role_ids: A comma-separated list of role_ids. If after filtering, there is not at least one resource that matches each of the elements of `role_ids`, then an error is returned. This cannot be provided together with the `ids` or `role_names` query parameters.
:param list[str] role_names: A comma-separated list of role_names. If there is not at least one resource that matches each of the elements of `role_names`, then an error is returned. This cannot be provided together with the `ids` or `role_ids` query parameters.
:param list[str] sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name). NOTE: If you provide a sort you will not get a `continuation_token` in the response.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: DirectoryServiceRolesGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if role_ids is not None:
if not isinstance(role_ids, list):
role_ids = [role_ids]
if role_names is not None:
if not isinstance(role_names, list):
role_names = [role_names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api21_directory_services_roles_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api21_directory_services_roles_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'role_ids' in params:
query_params.append(('role_ids', params['role_ids']))
collection_formats['role_ids'] = 'csv'
if 'role_names' in params:
query_params.append(('role_names', params['role_names']))
collection_formats['role_names'] = 'csv'
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/directory-services/roles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DirectoryServiceRolesGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_directory_services_roles_patch_with_http_info(
self,
directory_service_roles=None, # type: models.DirectoryServiceRole
ids=None, # type: List[str]
role_ids=None, # type: List[str]
role_names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.DirectoryServiceRolesResponse
"""PATCH directory-service/roles
Update an RBAC group configuration setting for manageability.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_directory_services_roles_patch_with_http_info(directory_service_roles, async_req=True)
>>> result = thread.get()
:param DirectoryServiceRole directory_service_roles: (required)
:param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `role_names` or `role_ids` query parameters.
:param list[str] role_ids: A comma-separated list of role_ids. If after filtering, there is not at least one resource that matches each of the elements of `role_ids`, then an error is returned. This cannot be provided together with the `ids` or `role_names` query parameters.
:param list[str] role_names: A comma-separated list of role_names. If there is not at least one resource that matches each of the elements of `role_names`, then an error is returned. This cannot be provided together with the `ids` or `role_ids` query parameters.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: DirectoryServiceRolesResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if role_ids is not None:
if not isinstance(role_ids, list):
role_ids = [role_ids]
if role_names is not None:
if not isinstance(role_names, list):
role_names = [role_names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'directory_service_roles' is set
if directory_service_roles is None:
raise TypeError("Missing the required parameter `directory_service_roles` when calling `api21_directory_services_roles_patch`")
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'role_ids' in params:
query_params.append(('role_ids', params['role_ids']))
collection_formats['role_ids'] = 'csv'
if 'role_names' in params:
query_params.append(('role_names', params['role_names']))
collection_formats['role_names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'directory_service_roles' in params:
body_params = params['directory_service_roles']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/directory-services/roles', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DirectoryServiceRolesResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_directory_services_test_get_with_http_info(
self,
filter=None, # type: str
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
sort=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.TestResultGetResponse
"""GET directory-services/test
Test the configured directory services on the array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_directory_services_test_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str filter: Exclude resources that don't match the specified criteria.
:param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.
:param int limit: Limit the size of the response to the specified number of resources. A `limit` of `0` can be used to get the number of resources without getting all of the resources. It will be returned in the `total_item_count` field. If a client asks for a page size larger than the maximum number, the request is still valid. In that case the server just returns the maximum number of items, disregarding the client's page size request.
:param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.
:param list[str] sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name). NOTE: If you provide a sort you will not get a `continuation_token` in the response.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: TestResultGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api21_directory_services_test_get`, must be a value greater than or equal to `1`")
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/directory-services/test', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TestResultGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_directory_services_test_patch_with_http_info(
self,
filter=None, # type: str
ids=None, # type: List[str]
names=None, # type: List[str]
sort=None, # type: List[str]
directory_service=None, # type: models.DirectoryService
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.TestResultResponse
"""PATCH directory-service/test
Test the configured directory services on the array. Optionally, provide modifications which will be used to perform the tests, but will not be applied to the current configuration.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_directory_services_test_patch_with_http_info(async_req=True)
>>> result = thread.get()
:param str filter: Exclude resources that don't match the specified criteria.
:param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.
:param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.
:param list[str] sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name). NOTE: If you provide a sort you will not get a `continuation_token` in the response.
:param DirectoryService directory_service: An optional directory service configuration that, if provided, will be used to overwrite aspects of the existing directory service objects when performing tests.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: TestResultResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'directory_service' in params:
body_params = params['directory_service']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/directory-services/test', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TestResultResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
| 48.681953 | 449 | 0.641101 |
79431807bf162402f8003bee7a54069474209e8b | 237 | py | Python | 2015/07/supreme-dissent-20150702/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 14 | 2015-05-08T13:41:51.000Z | 2021-02-24T12:34:55.000Z | 2015/07/supreme-dissent-20150702/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | null | null | null | 2015/07/supreme-dissent-20150702/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 7 | 2015-04-04T04:45:54.000Z | 2021-02-18T11:12:48.000Z | #!/usr/bin/env python
COPY_GOOGLE_DOC_KEY = '1Nj53xe9i0TWRbEIK22bXMdu0qkUyFZsbQBJExn6Pki8'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
| 23.7 | 77 | 0.805907 |
7943185d834409070f68f8d5be7f0487b9f1c581 | 7,330 | py | Python | navutils/menu.py | tboulogne/django-navutils | 5882f72153de6ed70ed11ce97c89c395e3ed022e | [
"BSD-3-Clause"
] | null | null | null | navutils/menu.py | tboulogne/django-navutils | 5882f72153de6ed70ed11ce97c89c395e3ed022e | [
"BSD-3-Clause"
] | null | null | null | navutils/menu.py | tboulogne/django-navutils | 5882f72153de6ed70ed11ce97c89c395e3ed022e | [
"BSD-3-Clause"
] | null | null | null | try:
# Django 1.10+
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from persisting_theory import Registry
class Menus(Registry):
""" Keep a reference to all menus"""
look_into = 'menu'
def prepare_name(self, data, name=None):
return data.id
registry = Menus()
register = registry.register
class Menu(Registry):
"""A collection of nodes"""
def __init__(self, id, *args, **kwargs):
self.id = id
self.template = kwargs.pop('template', 'navutils/menu.html')
self.context = kwargs.pop('context', {})
super(Menu, self).__init__(*args, **kwargs)
def prepare_name(self, data, name=None):
return data.id
def get_context(self, context):
context.update(self.context)
return context
class Node(object):
parent = None
def __init__(self, id, label, pattern_name=None, url=None, weight=0, title=None,
template='navutils/node.html', children=[], css_class=None,
reverse_kwargs=[], attrs={}, link_attrs={}, context={}, **kwargs):
"""
:param str id: a unique identifier for further retrieval
:param str label: a label for the node, that will be displayed in templates
:param str pattern_name: the name of a django url, such as `myapp:index` to use
as a link for the node. It will be automatically reversed.
:param str url: a URL to use as a link for the node
:param int weight: The importance of the node. Higher is more\
important, default to ``0``.
:param list reverse_kwargs: A list of strings that the pattern_name will\
accept when reversing. Defaults to ``[]``
:param list children: A list of children :py:class:`Node` instances\
that will be considered as submenus of this instance.\ You can also pass\
a callable that will return an iterable of menu nodes.
Defaults to ``[]``.
:param str css_class: a CSS class that will be applied to the node when
rendering
:param str template: the template that will be used to render the node.\
defaults to `navutils/menu/node.html`
:param dict node_attrs: a dictionnary of attributes to apply to the node
html
:param dict link_attrs: a dictionnary of attributes to apply to the node
link html
"""
if pattern_name and url:
raise ValueError('MenuNode accepts either a url or a pattern_name arg, but not both')
if not pattern_name and not url:
raise ValueError('MenuNode needs either a url or a pattern_name arg')
self._id = id
self.pattern_name = pattern_name
self.url = url
self.label = label
self.weight = weight
self.template = template
self.css_class = css_class
self.reverse_kwargs = reverse_kwargs
self.link_attrs = link_attrs
self.attrs = attrs
self.context = context
self.kwargs = kwargs
if 'class' in self.attrs:
raise ValueError('CSS class is handled via the css_class argument, don\'t use attrs for this purpose')
self._children = children
if not hasattr(self._children, '__call__'):
self._children = []
for node in children:
self.add(node)
def get_context(self, context):
context.update(self.context)
return context
@property
def children(self):
if hasattr(self._children, '__call__'):
return self._children()
return self._children
def get_url(self, **kwargs):
"""
:param kwargs: a dictionary of values that will be used for reversing,\
if the corresponding key is present in :py:attr:`self.reverse_kwargs\
<Node.reverse_kwargs>`
:return: The target URL of the node, after reversing (if needed)
"""
if self.pattern_name:
args = False
if len(self.pattern_name)>1:
args = self.pattern_name[1]
return reverse(self.pattern_name[0], args=args)
return self.url
def add(self, node):
"""
Add a new node to the instance children and sort them by weight.
:param node: A node instance
"""
node.parent = self
self._children.append(node)
self._children = sorted(
self._children,
key=lambda i: i.weight,
reverse=True
)
def is_viewable_by(self, user, context={}):
return True
@property
def id(self):
if self.parent:
return '{0}:{1}'.format(self.parent.id, self._id)
return self._id
@property
def depth(self):
return 0 if not self.parent else self.parent.depth + 1
def __repr__(self):
return '<MenuNode {0}>'.format(self.label)
def is_current(self, current):
return self.id == current
def has_current(self, current, viewable_children):
return any([child.is_current(current) for child in viewable_children])
class AnonymousNode(Node):
"""Only viewable by anonymous users"""
def is_viewable_by(self, user, context={}):
try:
return not user.is_authenticated()
except TypeError:
# Django 2.0+
return not user.is_authenticated
class AuthenticatedNode(Node):
"""Only viewable by authenticated users"""
def is_viewable_by(self, user, context={}):
try:
return user.is_authenticated()
except TypeError:
# Django 2.0+
return user.is_authenticated
class StaffNode(AuthenticatedNode):
"""Only viewable by staff members / admins"""
def is_viewable_by(self, user, context={}):
return user.is_staff or user.is_superuser
class PermissionNode(Node):
"""Require that user has given permission to display"""
def __init__(self, *args, **kwargs):
self.permission = kwargs.pop('permission')
super(PermissionNode, self).__init__(*args, **kwargs)
def is_viewable_by(self, user, context={}):
return user.has_perm(self.permission)
class AllPermissionsNode(Node):
"""Require user has all given permissions to display"""
def __init__(self, *args, **kwargs):
self.permissions = kwargs.pop('permissions')
super(AllPermissionsNode, self).__init__(*args, **kwargs)
def is_viewable_by(self, user, context={}):
return all([user.has_perm(perm) for perm in self.permissions])
class AnyPermissionsNode(Node):
"""Require user has one of the given permissions to display"""
def __init__(self, *args, **kwargs):
self.permissions = kwargs.pop('permissions')
super(AnyPermissionsNode, self).__init__(*args, **kwargs)
def is_viewable_by(self, user, context={}):
for permission in self.permissions:
has_perm = user.has_perm(permission)
if has_perm:
return True
return False
class PassTestNode(Node):
def __init__(self, *args, **kwargs):
self.test = kwargs.pop('test')
super(PassTestNode, self).__init__(*args, **kwargs)
def is_viewable_by(self, user, context={}):
return self.test(user, context=context)
| 32.149123 | 115 | 0.627012 |
7943198892d546f166be494c3247ac8cef0da437 | 20,444 | py | Python | tests/test_autocomplete.py | rallytime/fast-autocomplete | 9d56bad2b9a8c747a1fd945aaf415d6cb48a422a | [
"MIT"
] | null | null | null | tests/test_autocomplete.py | rallytime/fast-autocomplete | 9d56bad2b9a8c747a1fd945aaf415d6cb48a422a | [
"MIT"
] | null | null | null | tests/test_autocomplete.py | rallytime/fast-autocomplete | 9d56bad2b9a8c747a1fd945aaf415d6cb48a422a | [
"MIT"
] | null | null | null | import csv
import json
import os
import pytest
from pprint import pprint
from typing import NamedTuple
from fast_autocomplete.misc import read_csv_gen
from fast_autocomplete import AutoComplete, DrawGraphMixin
from fast_autocomplete.dwg import FindStep
current_dir = os.path.dirname(os.path.abspath(__file__))
WHAT_TO_PRINT = {'word', 'results', 'expected_results', 'result',
'find_steps', 'expected_steps', 'search_results', 'search_results_immutable'}
class Info(NamedTuple):
make: 'Info' = None
model: 'Info' = None
original_key: 'Info' = None
count: int = 0
def get(self, key, default=None):
return getattr(self, key, default)
__get__ = get
def parameterize_cases(cases):
return [tuple(i.values()) for i in cases]
def print_results(local_vars):
common = WHAT_TO_PRINT & set(local_vars.keys())
for key in common:
print(f'- {key}:')
pprint(local_vars[key])
def get_words(path):
file_path = os.path.join(current_dir, path)
csv_gen = read_csv_gen(file_path, csv_func=csv.DictReader)
words = {}
for line in csv_gen:
make = line['make'].lower()
model = line['model'].lower()
if make != model:
local_words = [model, '{} {}'.format(make, model)]
while local_words:
word = local_words.pop()
if word not in words:
words[word] = dict(line)
if make not in words:
words[make] = {"make": make}
words['truck'] = {'make': 'truck'}
return words
WIKIPEDIA_WORDS = get_words('fixtures/makes_models_from_wikipedia.csv')
SHORT_WORDS = get_words('fixtures/makes_models_short.csv')
SHORT_WORDS_UNICODE = get_words('fixtures/makes_models_in_farsi_short.csv')
SHORT_WORDS_IMMUTABLE_INFO = {key: Info(**value) for key, value in SHORT_WORDS.items()}
with open(os.path.join(current_dir, 'fixtures/synonyms.json'), 'r') as the_file:
SYNONYMS = json.loads(the_file.read())
class TestAutocomplete:
@pytest.mark.parametrize("word, max_cost, size, expected_results", [
('bmw', 2, 3, {0: [['bmw']], 1: [['bmw 1 series'], ['bmw e28'], ['bmw e30'], ['bmw e34']]}),
('beemer', 2, 3, {}),
('honda covic', 2, 3, {0: [['honda']], 1: [['honda', 'civic'], ['honda', 'civic type r']]}),
])
def test_search_without_synonyms(self, word, max_cost, size, expected_results):
auto_complete = AutoComplete(words=WIKIPEDIA_WORDS)
results, find_steps = auto_complete._find(word, max_cost, size)
results = dict(results)
print_results(locals())
assert expected_results == results
@pytest.mark.parametrize("word, max_cost, size, expected_results", [
('بی ام و', 2, 3, {0: [['بی ام و']], 1: [['بی ام و 1 series'], ['بی ام و 2 series']]}),
])
def test_search_unicode_without_synonyms(self, word, max_cost, size, expected_results):
auto_complete = AutoComplete(
words=SHORT_WORDS_UNICODE,
valid_chars_for_string='اآبپتثجچحخدذرزژسشصضطظعغفقکگلمنوهی')
results, find_steps = auto_complete._find(word, max_cost, size)
results = dict(results)
print_results(locals())
assert expected_results == results
STEP_DESCENDANTS_ONLY = [FindStep.descendants_only]
STEP_FUZZY_FOUND = [FindStep.fuzzy_try, FindStep.fuzzy_found]
SEARCH_CASES = [
{'word': ' ',
'max_cost': 3,
'size': 3,
'expected_find_results': {1: [['1 series'], ['bmw 1 series'], ['spirior'], ['honda spirior']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['1 series'], ['bmw 1 series'], ['spirior']],
},
{'word': '',
'max_cost': 3,
'size': 3,
'expected_find_results': {1: [['1 series'], ['bmw 1 series'], ['spirior'], ['honda spirior']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['1 series'], ['bmw 1 series'], ['spirior']],
},
{'word': 'c',
'max_cost': 3,
'size': 3,
'expected_find_results': {0: [['c']], 1: [['charger'], ['chrysler charger'], ['chrysler d'], ['crown']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['c'], ['charger'], ['chrysler charger']],
},
{'word': 'ca',
'max_cost': 3,
'size': 3,
'expected_find_results': {1: [['california'], ['caddy'], ['camry'], ['cabriolet']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['california'], ['caddy'], ['camry']],
},
{'word': 'camr',
'max_cost': 3,
'size': 6,
'expected_find_results': {1: [['camry']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['camry']],
},
{'word': '4d',
'max_cost': 3,
'size': 3,
'expected_find_results': {1: [['4runner'], ['4c']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['4runner'], ['4c']],
},
{'word': '2018 alpha ',
'max_cost': 3,
'size': 3,
'expected_find_results': {0: [['2018']],
2: [['2018', 'alfa romeo'],
['2018', 'alfa romeo 2300'],
['2018', 'alfa romeo montreal'],
['2018', 'alfa romeo 90'],
['2018', 'alfa romeo gtv']]},
'expected_steps': STEP_FUZZY_FOUND,
'expected_find_and_sort_results': [['2018'], ['2018', 'alfa romeo'], ['2018', 'alfa romeo 2300']],
},
{'word': '2018 alpha romeo 4d',
'max_cost': 3,
'size': 4,
'expected_find_results': {0: [['2018']],
1: [['2018', 'alfa romeo 2300'],
['2018', 'alfa romeo montreal'],
['2018', 'alfa romeo 90'],
['2018', 'alfa romeo gtv'],
['2018', 'alfa romeo 6c']],
2: [['2018', 'alfa romeo', 'ameo']]},
'expected_steps': [FindStep.fuzzy_try, FindStep.fuzzy_found, {FindStep.rest_of_fuzzy_round2: [FindStep.fuzzy_try, FindStep.fuzzy_found]}, FindStep.not_enough_results_add_some_descandants],
'expected_find_and_sort_results': [['2018'],
['2018', 'alfa romeo 2300'],
['2018', 'alfa romeo montreal'],
['2018', 'alfa romeo 90']],
},
{'word': '2018 alpha',
'max_cost': 3,
'size': 3,
'expected_find_results': {0: [['2018']],
2: [['2018', 'alfa romeo'],
['2018', 'alfa romeo 2300'],
['2018', 'alfa romeo montreal'],
['2018', 'alfa romeo 90'],
['2018', 'alfa romeo gtv']]},
'expected_steps': STEP_FUZZY_FOUND,
'expected_find_and_sort_results': [['2018'], ['2018', 'alfa romeo'], ['2018', 'alfa romeo 2300']],
},
{'word': '2018 alfa',
'max_cost': 3,
'size': 3,
'expected_find_results': {0: [['2018', 'alfa romeo']],
1: [['2018', 'alfa romeo 2300'],
['2018', 'alfa romeo montreal'],
['2018', 'alfa romeo 90'],
['2018', 'alfa romeo gtv']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['2018', 'alfa romeo'], ['2018', 'alfa romeo 2300'], ['2018', 'alfa romeo montreal']],
},
{'word': '2018 alfg',
'max_cost': 3,
'size': 3,
'expected_find_results': {0: [['2018']],
1: [['2018', 'alfa romeo 2300'],
['2018', 'alfa romeo montreal'],
['2018', 'alfa romeo 90'],
['2018', 'alfa romeo gtv']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['2018'], ['2018', 'alfa romeo 2300'], ['2018', 'alfa romeo montreal']],
},
{'word': '2018 glfa',
'max_cost': 3,
'size': 3,
'expected_find_results': {0: [['2018']], 1: [['2018', 'gla']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['2018'], ['2018', 'gla']],
},
{'word': '2018 doyota',
'max_cost': 3,
'size': 3,
'expected_find_results': {0: [['2018']],
1: [['2018', 'toyota'],
['2018', 'toyota crown'],
['2018', 'toyota prius'],
['2018', 'toyota avalon'],
['2018', 'toyota dyna']]},
'expected_steps': STEP_FUZZY_FOUND,
'expected_find_and_sort_results': [['2018'], ['2018', 'toyota'], ['2018', 'toyota crown']],
},
{'word': '2018 doyota camr',
'max_cost': 3,
'size': 3,
'expected_find_results': {0: [['2018']],
1: [['2018', 'toyota', 'camry'],
['2018', 'dyna'],
['2018', 'dauphine'],
['2018', 'drifter']]},
'expected_steps': [FindStep.fuzzy_try, FindStep.fuzzy_found, {FindStep.rest_of_fuzzy_round2: [FindStep.descendants_only]}, FindStep.not_enough_results_add_some_descandants],
'expected_find_and_sort_results': [['2018'], ['2018', 'toyota', 'camry'], ['2018', 'dyna']],
},
{'word': '2018 beemer',
'max_cost': 3,
'size': 3,
'expected_find_results': {0: [['2018', 'bmw']],
1: [['2018', 'bmw 1 series'],
['2018', 'bmw e28'],
['2018', 'bmw e30'],
['2018', 'bmw e34']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['2018', 'bmw'], ['2018', 'bmw 1 series'], ['2018', 'bmw e28']],
},
{'word': '2018 beener',
'max_cost': 3,
'size': 3,
'expected_find_results': {0: [['2018']],
1: [['2018', 'bmw 1 series'],
['2018', 'bmw e28'],
['2018', 'bmw e30'],
['2018', 'bmw e34']]},
'expected_steps': [FindStep.fuzzy_try, FindStep.not_enough_results_add_some_descandants],
'expected_find_and_sort_results': [['2018'], ['2018', 'bmw 1 series'], ['2018', 'bmw e28']],
},
{'word': 'vw bea',
'max_cost': 3,
'size': 3,
'expected_find_results': {0: [['volkswagen']], 1: [['volkswagen beetle']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['volkswagen'], ['volkswagen beetle']],
},
{'word': 'toyota camry 2018',
'max_cost': 3,
'size': 5,
'expected_find_results': {0: [['toyota camry', '2018']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['toyota camry', '2018']],
},
{'word': 'type r',
'max_cost': 3,
'size': 5,
'expected_find_results': {0: [['type r']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['type r']],
},
{'word': 'truck',
'max_cost': 3,
'size': 5,
'expected_find_results': {0: [['truck']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['truck']],
},
{'word': 'trucks',
'max_cost': 3,
'size': 5,
'expected_find_results': {0: [['truck']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['truck']],
},
{'word': '1se',
'max_cost': 3,
'size': 5,
'expected_find_results': {1: [['1 series']]},
'expected_steps': STEP_DESCENDANTS_ONLY,
'expected_find_and_sort_results': [['1 series']],
},
]
SEARCH_CASES_PARAMS = parameterize_cases(SEARCH_CASES)
class TestAutocompleteWithSynonyms:
@pytest.mark.parametrize("word, max_cost, size, expected_find_results, expected_steps, expected_find_and_sort_results", SEARCH_CASES_PARAMS)
def test_find(self, word, max_cost, size, expected_find_results, expected_steps, expected_find_and_sort_results):
expected_results = expected_find_results
auto_complete = AutoComplete(words=WIKIPEDIA_WORDS, synonyms=SYNONYMS)
results, find_steps = auto_complete._find(word, max_cost, size)
results = dict(results)
print_results(locals())
assert expected_results == results
assert expected_steps == find_steps
@pytest.mark.parametrize("word, max_cost, size, expected_find_results, expected_steps, expected_find_and_sort_results", SEARCH_CASES_PARAMS)
def test__find_and_sort(self, word, max_cost, size, expected_find_results, expected_steps, expected_find_and_sort_results):
expected_results = expected_find_and_sort_results
auto_complete = AutoComplete(words=WIKIPEDIA_WORDS, synonyms=SYNONYMS)
results = auto_complete._find_and_sort(word, max_cost, size)
results = list(results)
search_results = auto_complete.search(word, max_cost, size)
print_results(locals())
assert expected_results == results
if word.strip():
assert expected_results == search_results
else:
assert [] == search_results
@pytest.mark.parametrize("word", [
'alf',
])
def test_immutable_info(self, word):
auto_complete = AutoComplete(words=SHORT_WORDS, synonyms=SYNONYMS)
auto_complete_immutable = AutoComplete(words=SHORT_WORDS_IMMUTABLE_INFO, synonyms=SYNONYMS)
search_results = auto_complete._find(word, max_cost=3, size=3)
search_results_immutable = auto_complete_immutable._find(word, max_cost=3, size=3)
print_results(locals())
assert search_results_immutable == search_results
class AutoCompleteWithSynonymsShort(DrawGraphMixin, AutoComplete):
pass
class AutoCompleteWithSynonymsShortWithAnim(AutoCompleteWithSynonymsShort):
DRAW_POPULATION_ANIMATION = True
DRAW_POPULATION_ANIMATION_PATH = os.path.join(current_dir, 'animation/short_.svg')
DRAW_POPULATION_ANIMATION_FILENO_PADDING = 6
class TestAutoCompleteWithSynonymsShortGraphDraw:
def test_draw_graph(self):
auto_complete = AutoCompleteWithSynonymsShort(words=SHORT_WORDS)
file_path = os.path.join(current_dir, 'AutoCompleteWithSynonymsShort_Graph.svg')
auto_complete.draw_graph(file_path)
def test_draw_graph_animation(self):
AutoCompleteWithSynonymsShortWithAnim(words=SHORT_WORDS)
class TestPrefixAndDescendants:
@pytest.mark.parametrize("word, expected_matched_prefix_of_last_word, expected_rest_of_word, expected_matched_words, expected_node_path", [
('2018 alpha blah blah', 'al', 'pha blah blah', ['2018'], 'a,l'),
('2018 alpha ', 'al', 'pha ', ['2018'], 'a,l'),
('2018 alfa', '', '', ['2018', 'alfa romeo'], 'a,l,f,a'),
('2018 alf', 'alf', '', ['2018'], 'a,l,f'),
('2018 alfa romeo', '', '', ['2018', 'alfa romeo'], 'a,l,f,a, ,r,o,m,e,o'),
('1 series bmw 2007 2018', '', '', ['1 series', 'bmw', '2007', '2018'], '2,0,1,8'),
('200 chrysler', '', '', ['200', 'chrysler'], 'c,h,r,y,s,l,e,r'),
('200 chrysler 200', '', '', ['200', 'chrysler 200'], 'c,h,r,y,s,l,e,r, ,2,0,0'),
('chrysler 2007', '', '', ['chrysler', '2007'], '2,0,0,7'),
('type r', '', '', ['type r'], 't,y,p,e, ,r'),
])
def test_prefix_autofill(self, word, expected_matched_prefix_of_last_word,
expected_rest_of_word, expected_matched_words, expected_node_path):
auto_complete = AutoComplete(words=WIKIPEDIA_WORDS, synonyms=SYNONYMS)
matched_prefix_of_last_word, rest_of_word, node, matched_words = auto_complete._prefix_autofill(word)
print(f'word: {word}')
print(f'expected_matched_prefix_of_last_word: {expected_matched_prefix_of_last_word}')
print(f'matched_prefix_of_last_word: {matched_prefix_of_last_word}')
print(f'expected_rest_of_word: {expected_rest_of_word}')
print(f'rest_of_word: {rest_of_word}')
print(f'node: {node}')
print(f'expected_matched_words: {expected_matched_words}')
print(f'matched_words: {matched_words}')
expected_node = auto_complete._dwg
for k in expected_node_path.split(','):
expected_node = expected_node[k]
assert expected_node is node
assert expected_matched_prefix_of_last_word == matched_prefix_of_last_word
assert expected_rest_of_word == rest_of_word
assert expected_matched_words == matched_words
@pytest.mark.parametrize("word, expected_results", [
('2018 alpha ', ['alfa', 'alfa rl', 'alfa rm']),
('1 series bmw 2', ['bmw 2 series']),
('2018 alfa', ['alfa rl', 'alfa rm', 'alfa 4c']),
])
def test_get_descendants_nodes(self, word, expected_results):
auto_complete = AutoComplete(words=WIKIPEDIA_WORDS, synonyms=SYNONYMS)
matched_prefix_of_last_word, rest_of_word, node, matched_words = auto_complete._prefix_autofill(word)
size = 2
found_words_gen = node.get_descendants_nodes(size=size)
found_words = [_node.word for _node in found_words_gen][:size + 1]
print(f'word: {word}')
print(f'expected_results: {expected_results}')
print(f'found_words: {found_words}')
assert expected_results == list(found_words)
@pytest.mark.parametrize("word, expected_results", [
('r', ['rc', 'rx', 'r8', 'rl', 'rm', 'rav4', 'r107', 'r129', 'r170', 'r171', 'r230']),
('benz', []),
])
def test_get_all_descendent_words_for_condition(self, word, expected_results):
auto_complete = AutoComplete(words=WIKIPEDIA_WORDS, synonyms=SYNONYMS)
def condition(word_info):
return 'model' in word_info
size = 10
results = auto_complete.get_all_descendent_words_for_condition(word=word, size=size, condition=condition)
print_results(locals())
assert expected_results == results[:size + 1]
class TestOther:
@pytest.mark.parametrize("word, expected_results", [
('bmw', ['bmw']),
('al', ['alfa romeo']),
])
def test_get_all_descendent_words_for_condition(self, word, expected_results):
auto_complete = AutoComplete(words=WIKIPEDIA_WORDS, synonyms=SYNONYMS, full_stop_words=['bmw', 'alfa romeo'])
results = auto_complete.get_tokens_flat_list(word, max_cost=0, size=3)
print_results(locals())
assert expected_results == results
@pytest.mark.parametrize("word, expected_results", [
('bmw', {'make': 'bmw'}),
('bMw', {'make': 'bmw'}),
('al', None),
])
def test_get_word_context(self, word, expected_results):
auto_complete = AutoComplete(words=WIKIPEDIA_WORDS, synonyms=SYNONYMS, full_stop_words=['bmw', 'alfa romeo'])
results = auto_complete.get_word_context(word)
print_results(locals())
assert expected_results == results
@pytest.mark.parametrize("word, update_dict, expected_results, expected_new_count", [
('toyota a', None, [['toyota'], ['toyota avalon'], ['toyota aurion'], ['toyota auris']], None),
('toyota a', {'word': 'toyota aygo', 'count': 10000}, [['toyota'], ['toyota aygo'], ['toyota avalon'], ['toyota aurion']], 10000),
('toyota a', {'word': 'toyota aurion', 'offset': -6000}, [['toyota'], ['toyota avalon'], ['toyota auris'], ['toyota aygo']], 94),
])
def test_update_count_of_word(self, word, update_dict, expected_results, expected_new_count):
auto_complete = AutoComplete(words=WIKIPEDIA_WORDS, synonyms=SYNONYMS, full_stop_words=['bmw', 'alfa romeo'])
if update_dict:
new_count = auto_complete.update_count_of_word(**update_dict)
assert expected_new_count == new_count
assert expected_new_count == auto_complete.get_count_of_word(update_dict['word'])
results = auto_complete.search(word, max_cost=2, size=4)
print_results(locals())
assert expected_results == results
| 42.859539 | 193 | 0.587263 |
794319aac7ed4a0d16b845ae39c49b775f1ecfff | 571 | py | Python | Arrays/Left_Rotation.py | govindak-umd/The_HackerRank_Interview_Preparation_Kit | e82ad7258bd90786d9ec8373536c006f3c71f9f3 | [
"BSD-3-Clause"
] | null | null | null | Arrays/Left_Rotation.py | govindak-umd/The_HackerRank_Interview_Preparation_Kit | e82ad7258bd90786d9ec8373536c006f3c71f9f3 | [
"BSD-3-Clause"
] | null | null | null | Arrays/Left_Rotation.py | govindak-umd/The_HackerRank_Interview_Preparation_Kit | e82ad7258bd90786d9ec8373536c006f3c71f9f3 | [
"BSD-3-Clause"
] | 1 | 2020-11-07T14:51:05.000Z | 2020-11-07T14:51:05.000Z | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the rotLeft function below.
def rotLeft(a, d):
len_a = len(a)
if d < len_a:
a_left = a[d:]
a_right = a[:d]
new_a = a_left + a_right
return new_a
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nd = input().split()
n = int(nd[0])
d = int(nd[1])
a = list(map(int, input().rstrip().split()))
result = rotLeft(a, d)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
| 15.432432 | 48 | 0.563923 |
79431b7fe4870565a2e07718689da2f463ed4652 | 648 | py | Python | exercicios-Python/ex058.py | pedrosimoes-programmer/exercicios-python | 150de037496d63d76086678d87425a8ccfc74573 | [
"MIT"
] | null | null | null | exercicios-Python/ex058.py | pedrosimoes-programmer/exercicios-python | 150de037496d63d76086678d87425a8ccfc74573 | [
"MIT"
] | null | null | null | exercicios-Python/ex058.py | pedrosimoes-programmer/exercicios-python | 150de037496d63d76086678d87425a8ccfc74573 | [
"MIT"
] | null | null | null | from random import randint
from time import sleep
c = 1
print('=' * 20, '=' * 20)
print('O computador pensou em um número, tente adivinhá-lo!')
print('=' * 20, '=' * 20)
n = randint(0, 10)
r = int(input('Digite o número que o computador pensou: '))
print('PROCESSANDO...')
sleep(0.5)
while r != n:
if r < n:
print('Mais, tente de novo!')
else:
print('Menos, tente de novo!')
r = int(input('Digite o número que o computador pensou: '))
print('PROCESSANDO...')
sleep(0.5)
c += 1
print('''Parabéns! O computador pensou no número {} e você acertou!
Você precisou de {} tentativas para acertar!'''.format(n, c))
| 29.454545 | 68 | 0.618827 |
79431b87ec98f2a683d93558d51b1bf9d52f83e1 | 100 | py | Python | src/accounts/models.py | jayesh96/writingsbykaur | 7cd48b52e21f96bec9640d8b3a73a48c9e5ecdcd | [
"MIT"
] | 2 | 2018-05-31T16:21:06.000Z | 2019-11-28T11:58:12.000Z | FullContact/models.py | KamalAwasthi/FullContact | fa2e9f29079064b015848d980ddbb8da51f323c9 | [
"Apache-2.0"
] | 11 | 2020-06-05T18:31:32.000Z | 2022-03-11T23:24:41.000Z | src/accounts/models.py | jayesh96/writingsbykaur | 7cd48b52e21f96bec9640d8b3a73a48c9e5ecdcd | [
"MIT"
] | 2 | 2018-02-12T16:37:08.000Z | 2019-11-28T11:58:24.000Z | from __future__ import unicode_literals
from django.db import models
# Create your models here.
| 12.5 | 39 | 0.8 |
79431bfc763000e962361be8cb774ba79fc3b51b | 7,128 | py | Python | omnidet/losses/distance_loss.py | AtlasGooo2/WoodScape | 597d9dda472c09bafea58ea69853948d63197eca | [
"MIT"
] | 348 | 2019-09-05T09:32:23.000Z | 2022-03-31T14:09:10.000Z | omnidet/losses/distance_loss.py | AtlasGooo2/WoodScape | 597d9dda472c09bafea58ea69853948d63197eca | [
"MIT"
] | 76 | 2019-10-14T12:29:55.000Z | 2022-03-26T02:00:50.000Z | omnidet/losses/distance_loss.py | AtlasGooo2/WoodScape | 597d9dda472c09bafea58ea69853948d63197eca | [
"MIT"
] | 78 | 2019-09-14T01:28:22.000Z | 2022-03-29T03:59:09.000Z | """
Loss function for Distance Estimation for OmniDet.
# author: Varun Ravi Kumar <[email protected]>
Parts of the code adapted from https://github.com/nianticlabs/monodepth2
Please refer to the license of the above repo.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; Authors provide no warranty with the software
and are not liable for anything.
"""
import torch
import torch.nn as nn
class InverseWarp:
def __init__(self, args):
self.ego_mask = args.ego_mask
self.frame_idxs = args.frame_idxs
self.num_scales = args.num_scales
self.min_distance = args.min_distance
self.max_distance = args.max_distance
def warp(self, inputs, outputs) -> None:
raise NotImplementedError("Invalid InverseWarp Attempted!")
def scale_norm(self, norm):
"""Convert network's sigmoid output into norm prediction"""
return self.min_distance + self.max_distance * norm
class PhotometricReconstructionLoss(nn.Module):
def __init__(self, inverse_warp_object: InverseWarp, args):
"""Loss function for unsupervised monocular distance
:param args: input params from config file
"""
super().__init__()
self.warp = inverse_warp_object
self.frame_idxs = args.frame_idxs
self.num_scales = args.num_scales
self.crop = args.crop
self.seed = 1e-7
self.disable_auto_mask = args.disable_auto_mask
self.clip_loss = args.clip_loss_weight
self.ssim_weight = args.ssim_weight
self.reconstr_weight = args.reconstr_weight
self.smooth_weight = args.smooth_weight
def norm_smoothness(self, norm: torch.Tensor, img: torch.Tensor) -> torch.Tensor:
"""Smoothens the output distance map or distance map
:param norm: Depth map of the target image -- [B x 1 x H x W]
:param img: Images from the image_stack -- [B x 3 x H x W]
:return Mean value of the smoothened image
"""
norm_gradients_x = torch.abs(norm[:, :, :, :-1] - norm[:, :, :, 1:])
norm_gradients_y = torch.abs(norm[:, :, :-1, :] - norm[:, :, 1:, :])
image_gradients_x = torch.mean(torch.abs(img[:, :, :, :-1] - img[:, :, :, 1:]), 1, keepdim=True)
image_gradients_y = torch.mean(torch.abs(img[:, :, :-1, :] - img[:, :, 1:, :]), 1, keepdim=True)
norm_gradients_x *= torch.exp(-image_gradients_x)
norm_gradients_y *= torch.exp(-image_gradients_y)
return norm_gradients_x.mean() + norm_gradients_y.mean()
@staticmethod
def ssim(x, y):
"""Computes a differentiable structured image similarity measure."""
x = nn.ReflectionPad2d(1)(x)
y = nn.ReflectionPad2d(1)(y)
c1 = 0.01 ** 2
c2 = 0.03 ** 2
mu_x = nn.AvgPool2d(kernel_size=3, stride=1)(x)
mu_y = nn.AvgPool2d(kernel_size=3, stride=1)(y)
sigma_x = nn.AvgPool2d(kernel_size=3, stride=1)(x ** 2) - mu_x ** 2
sigma_y = nn.AvgPool2d(kernel_size=3, stride=1)(y ** 2) - mu_y ** 2
sigma_xy = nn.AvgPool2d(kernel_size=3, stride=1)(x * y) - mu_x * mu_y
ssim_n = (2 * mu_x * mu_y + c1) * (2 * sigma_xy + c2)
ssim_d = (mu_x ** 2 + mu_y ** 2 + c1) * (sigma_x + sigma_y + c2)
return torch.clamp((1 - ssim_n / ssim_d) / 2, 0, 1)
def compute_reprojection_loss(self, predicted, target, ego_mask=None):
"""Computes reprojection loss between predicted and target images"""
if type(ego_mask) == torch.Tensor:
l1_loss = (torch.abs(target - predicted) * ego_mask).mean(1, True)
ssim_error = self.ssim(predicted, target)
ssim_loss = (ssim_error * ego_mask).mean(1, True)
else:
l1_loss = torch.abs(target - predicted).mean(1, True)
ssim_loss = self.ssim(predicted, target).mean(1, True)
reprojection_loss = self.ssim_weight * ssim_loss + self.reconstr_weight * l1_loss
if self.clip_loss:
mean, std = reprojection_loss.mean(), reprojection_loss.std()
reprojection_loss = torch.clamp(reprojection_loss, max=float(mean + self.clip_loss * std))
return reprojection_loss, l1_loss, ssim_loss
def compute_losses(self, inputs, outputs):
"""Compute the reprojection and smoothness losses"""
losses = dict()
total_loss = 0
target = inputs[("color", 0, 0)]
for scale in range(self.num_scales):
loss = 0
# --- PHOTO-METRIC LOSS ---
reprojection_loss = list()
for frame_id in self.frame_idxs[1:]:
pred = outputs[("color", frame_id, scale)]
if self.crop:
ego_mask = outputs[("ego_mask", frame_id, scale)]
else:
ego_mask = outputs[("ego_mask", frame_id, scale)] * inputs["mask", 0]
outputs[("ego_mask", frame_id, scale)] = ego_mask
reproj_loss, l1, ssim = self.compute_reprojection_loss(pred, target, ego_mask)
reprojection_loss.append(reproj_loss)
reprojection_loss = torch.cat(reprojection_loss, 1)
# --- AUTO MASK ---
if not self.disable_auto_mask:
identity_reprojection_loss = list()
for frame_id in self.frame_idxs[1:]:
target = inputs[("color", 0, 0)]
pred = inputs[("color", frame_id, 0)]
reproj_loss, l1, ssim = self.compute_reprojection_loss(pred, target)
identity_reprojection_loss.append(reproj_loss)
identity_reprojection_loss = torch.cat(identity_reprojection_loss, 1)
# add random numbers to break ties
identity_reprojection_loss += torch.randn(identity_reprojection_loss.shape).to(
device=identity_reprojection_loss.device) * 1e-5
combined = torch.cat((identity_reprojection_loss, reprojection_loss), dim=1)
else:
combined = reprojection_loss
# --- COMPUTING MIN FOR MONOCULAR APPROACH ---
if combined.shape[1] == 1:
forward_optimise = combined
else:
forward_optimise, forward_idxs = torch.min(combined, dim=1)
loss += forward_optimise.mean()
# --- SMOOTHNESS LOSS ---
inv_norm = 1 / outputs[("norm", 0)]
normalized_norm = (inv_norm / (inv_norm.mean([2, 3], True) + self.seed))
smooth_loss = self.norm_smoothness(normalized_norm, inputs[("color", 0, 0)])
loss += self.smooth_weight * smooth_loss / (2 ** scale)
total_loss += loss
losses[f"distance_loss/{scale}"] = loss
total_loss /= self.num_scales
losses["distance_loss"] = total_loss
return losses
def forward(self, inputs, outputs):
"""Loss function for self-supervised norm and pose on monocular videos"""
self.warp.warp(inputs, outputs)
losses = self.compute_losses(inputs, outputs)
return losses, outputs
| 42.682635 | 104 | 0.61041 |
79431c85ae1ddd6fa4be03937f11f92f2836cb4d | 897 | py | Python | qiskit_nature/properties/second_quantization/electronic/bases/__init__.py | SooluThomas/qiskit-nature | 0d509525b68b76d0f2d613d0e7409b9ea65cfcc0 | [
"Apache-2.0"
] | null | null | null | qiskit_nature/properties/second_quantization/electronic/bases/__init__.py | SooluThomas/qiskit-nature | 0d509525b68b76d0f2d613d0e7409b9ea65cfcc0 | [
"Apache-2.0"
] | 1 | 2021-08-25T13:31:41.000Z | 2021-08-25T13:31:41.000Z | qiskit_nature/properties/second_quantization/electronic/bases/__init__.py | LaurinFischer/qiskit-nature | 7baf7c7f8c3d18e082e90bc1e593c47aa2f698ca | [
"Apache-2.0"
] | 3 | 2021-07-02T06:57:58.000Z | 2021-07-06T12:32:38.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
r"""
Electronic Bases (:mod:`qiskit_nature.properties.electronic_structure.bases`)
=============================================================================
.. currentmodule:: qiskit_nature.properties.electronic_structure.bases
"""
from .electronic_basis import ElectronicBasis
from .electronic_basis_transform import ElectronicBasisTransform
__all__ = [
"ElectronicBasis",
"ElectronicBasisTransform",
]
| 33.222222 | 77 | 0.710145 |
79431cc4d49f5d4c67e31197ba713fd6f0f02021 | 5,774 | py | Python | synolopy/cgi.py | hwiyel/synolopy | 107f7d534df8e4f8c254136dbe289e76ebcd5164 | [
"MIT"
] | 40 | 2015-03-25T14:07:39.000Z | 2021-01-28T21:23:49.000Z | synolopy/cgi.py | hwiyel/synolopy | 107f7d534df8e4f8c254136dbe289e76ebcd5164 | [
"MIT"
] | 11 | 2015-05-16T15:46:23.000Z | 2018-08-25T11:55:30.000Z | synolopy/cgi.py | hwiyel/synolopy | 107f7d534df8e4f8c254136dbe289e76ebcd5164 | [
"MIT"
] | 21 | 2015-02-01T13:44:13.000Z | 2020-12-17T13:43:07.000Z | import requests
from Queue import Queue
from urlparse import urljoin
from urllib import urlencode
from synolopy.errors import *
TIMEOUT = 10
# ------------------------------------------------------------------------------
# Tools
# ------------------------------------------------------------------------------
def _url_formatter(url):
if not url.endswith('/'):
return url+'/'
return url
# ------------------------------------------------------------------------------
# Common Gateway Interface URL building.
# ------------------------------------------------------------------------------
def _with_validation(func, *args, **kwargs):
def inner(*args, **kwargs):
obj = args[0]
manager = obj.base().validation_manager
if manager:
return manager.validate(func(*args, **kwargs))
return func(*args, **kwargs)
return inner
class PathElement(object):
"""
Object representation of any path URL node.
"""
def __init__(self, path, parent, auth=False):
self._path = path
self._auth = auth
self.__parent__ = parent
if parent:
setattr(parent, path.lower(), self)
def base(self):
"""
Gives the base element of an URL (starting with `http://`).
"""
return self.__parent__.base()
def parents(self):
"""
Returns an simple FIFO queue with the ancestors and itself.
"""
q = self.__parent__.parents()
q.put(self)
return q
def path(self):
"""
Gives a ready to use path element (ease join).
"""
return _url_formatter(self._path)
def url(self):
"""
Returns the whole URL from the base to this node.
"""
path = None
nodes = self.parents()
while not nodes.empty():
path = urljoin(path, nodes.get().path())
return path
def auth_required(self):
"""
If any ancestor required an authentication, this node needs it too.
"""
if self._auth:
return self._auth, self
return self.__parent__.auth_required()
class BaseConsumer(PathElement):
"""
Root element for the CGI services tree.
"""
def __init__(self, url):
super(BaseConsumer, self).__init__(url, None)
self.session_manager = None
self.validation_manager = None
def parents(self):
q = Queue()
q.put(self)
return q
def auth_required(self):
return self._auth, None
def base(self):
return self
class CGI(PathElement):
"""
Object representation of a CGI, with useful methods to request and valid
returned data and
"""
def __init__(self, path, parent, **kwargs):
super(CGI, self).__init__(path, parent)
self.params = kwargs
def path(self):
return self._path
def url(self, method=None, **kwargs):
base = super(CGI, self).url()
base = '{path}.cgi'.format(path=base)
params = self.params
if method:
params['method'] = method
params.update(kwargs)
if params:
return '{url}?{params}'.format(url=base, params=urlencode(params))
return base
@_with_validation
def request(self, method, **kwargs):
url = self.url(method, **kwargs)
auth, node = self.auth_required()
if auth:
manager = self.base().session_manager
if not manager:
raise CGIException(
'Authentication is required by %s but no session manager '
'has been defined' % node.path()
)
session = manager.session(node) or manager.credentials(node)
return requests.get(url, cookies=session, timeout=TIMEOUT)
else:
return requests.get(url, timeout=TIMEOUT)
# ------------------------------------------------------------------------------
# Factory tool
# ------------------------------------------------------------------------------
class CGIFactory(object):
"""
Allows to build a CGI consumer from a python dictionary.
"""
@staticmethod
def build(data):
base = BaseConsumer(data['URL'])
CGIFactory._build_path(data, base)
CGIFactory._build_cgi(data, base)
return base
@staticmethod
def _build_path(data, parent):
path_set = data['PATH'] if 'PATH' in data else dict()
for path, content in path_set.iteritems():
auth = content['AUTH'] if 'AUTH' in content else False
pe = PathElement(path, parent, auth)
CGIFactory._build_path(content, pe)
CGIFactory._build_cgi(content, pe)
@staticmethod
def _build_cgi(data, parent):
cgi_set = data['CGI'] if 'CGI' in data else dict()
for cgi, content in cgi_set.iteritems():
CGI(cgi, parent, **content)
# ------------------------------------------------------------------------------
# Managers
# ------------------------------------------------------------------------------
class SessionManager(object):
def __init__(self, login, password, consumer):
self.login = login
self.password = password
self.api = consumer
self._sessions = dict()
def session(self, node, session=None):
if not session:
try:
return self._sessions[node.path]
except KeyError:
return None
self._sessions[node.path] = session
def credentials(self, node):
raise NotImplementedError
class ValidationManager(object):
@staticmethod
def validate(response):
raise NotImplementedError | 27.235849 | 80 | 0.519744 |
79431cf81fe8bdd4505dbbb59e283db27f28707f | 30 | py | Python | dirtyclean/__init__.py | paultopia/dirtyclean | 1b93b29e070b53afede22ff28497fd68f28d0326 | [
"MIT"
] | 2 | 2017-12-04T16:58:57.000Z | 2021-03-02T04:59:54.000Z | dirtyclean/__init__.py | paultopia/dirtyclean | 1b93b29e070b53afede22ff28497fd68f28d0326 | [
"MIT"
] | null | null | null | dirtyclean/__init__.py | paultopia/dirtyclean | 1b93b29e070b53afede22ff28497fd68f28d0326 | [
"MIT"
] | null | null | null | from .dirtyclean import clean
| 15 | 29 | 0.833333 |
79431d5829f0a106adb3c6735093419736949f81 | 694 | py | Python | jumpgate/common/hooks/core.py | Neetuj/jumpgate | 509c1d43a5f4b91c7f3ad5c0cd34abf61bb0a3ee | [
"MIT"
] | null | null | null | jumpgate/common/hooks/core.py | Neetuj/jumpgate | 509c1d43a5f4b91c7f3ad5c0cd34abf61bb0a3ee | [
"MIT"
] | null | null | null | jumpgate/common/hooks/core.py | Neetuj/jumpgate | 509c1d43a5f4b91c7f3ad5c0cd34abf61bb0a3ee | [
"MIT"
] | null | null | null | import json
import uuid
import falcon.status_codes
from jumpgate.common.hooks import request_hook, response_hook
@response_hook(False)
def hook_format(req, resp):
body = resp.body
if body is not None and not resp.content_type:
resp.content_type = 'application/json'
resp.body = json.dumps(body)
if isinstance(resp.status, int):
resp.status = getattr(falcon.status_codes,
'HTTP_%s' % resp.status,
resp.status)
resp.set_header('X-Compute-Request-Id', req.env['REQUEST_ID'])
@request_hook(False)
def hook_set_uuid(req, resp, kwargs):
req.env['REQUEST_ID'] = 'req-' + str(uuid.uuid1())
| 25.703704 | 66 | 0.645533 |
79431de009f6a2f8746b71f98d584040e4df143f | 18,511 | py | Python | nflgame/update_players.py | ca3tech/nflgame | cbb59332b305c10229604c677054b1a41b7830c9 | [
"Unlicense"
] | null | null | null | nflgame/update_players.py | ca3tech/nflgame | cbb59332b305c10229604c677054b1a41b7830c9 | [
"Unlicense"
] | null | null | null | nflgame/update_players.py | ca3tech/nflgame | cbb59332b305c10229604c677054b1a41b7830c9 | [
"Unlicense"
] | null | null | null | # Here's an outline of how this program works.
# Firstly, we load a dictionary mapping GSIS identifier to a dictionary of
# player meta data. This comes from either the flag `json-update-file` or
# nflgame's "players.json" file. We then build a reverse map from profile
# identifier (included in player meta data) to GSIS identifier.
#
# We then look at all players who have participated in the last week of
# play. Any player in this set that is not in the aforementioned mapping
# has his GSIS identifier and name (e.g., `T.Brady`) added to a list of
# players to update.
#
# (N.B. When the initial mappings are empty, then every player who recorded
# a statistic since 2009 is added to this list.)
#
# For each player in the list to update, we need to obtain the profile
# identifier. This is done by sending a single HEAD request to the
# `gsis_profile` URL. The URL is a redirect to their canonical profile page,
# with which we extract the profile id. We add this mapping to both of the
# mappings discussed previously. (But note that the meta data in the GSIS
# identifier mapping is incomplete.)
#
# We now fetch the roster lists for each of the 32 teams from NFL.com.
# The roster list contains all relevant meta data *except* the GSIS identifier.
# However, since we have a profile identifier for each player (which is
# included in the roster list), we can connect player meta data with a
# particular GSIS identifier. If we happen to see a player on the roster that
# isn't in the mapping from profile identifier to GSIS identifier, then we need
# to do a full GET request on that player's profile to retrieve the GSIS
# identifier. (This occurs when a player has been added to a roster but hasn't
# recorded any statistics. e.g., Rookies, benchwarmers or offensive linemen.)
#
# We overwrite the initial dictionary of player meta data for each player in
# the roster data, including adding new entries for new players. We then save
# the updated mapping from GSIS identifier to player meta data to disk as JSON.
# (The JSON dump is sorted by key so that diffs are meaningful.)
#
# This approach requires a few thousand HEAD requests to NFL.com on the first
# run. But after that, most runs will only require 32 requests for the roster
# list (small potatoes) and perhaps a few HEAD/GET requests if there happens to
# be a new player found.
from __future__ import absolute_import, division, print_function
import argparse
import json
import multiprocessing.pool
import os
import re
import sys
import traceback
import httplib2
from bs4 import BeautifulSoup
try:
import lxml.html # noqa
PARSER = 'lxml'
except ImportError:
try:
import html5lib # noqa
PARSER = 'html5lib'
except ImportError:
PARSER = 'html.parser'
import nflgame
import nflgame.live
import nflgame.player
urls = {
'roster': 'http://www.nfl.com/teams/roster?team=%s',
'gsis_profile': 'http://www.nfl.com/players/profile?id=%s',
}
def new_http():
http = httplib2.Http(timeout=10)
http.follow_redirects = True
return http
def initial_mappings(conf):
metas, reverse = {}, {}
try:
with open(conf.json_update_file) as fp:
metas = json.load(fp)
for gsis_id, meta in metas.items():
reverse[meta['profile_id']] = gsis_id
except IOError as e:
eprint('Could not open "%s": %s' % (conf.json_update_file, e))
# Delete some keys in every entry. We do this to stay fresh.
# e.g., any player with "team" set should be actively on a roster.
for k in metas:
metas[k].pop('team', None)
metas[k].pop('status', None)
metas[k].pop('position', None)
return metas, reverse
def profile_id_from_url(url):
if url is None:
return None
m = re.search('/([0-9]+)/', url)
return None if m is None else int(m.group(1))
def profile_url(gsis_id):
resp, content = new_http().request(urls['gsis_profile'] % gsis_id, 'HEAD')
if resp['status'] != '301':
return None
loc = resp['location']
if not loc.startswith('http://'):
loc = 'http://www.nfl.com' + loc
return loc
def gsis_id(profile_url):
resp, content = new_http().request(profile_url, 'GET')
if resp['status'] != '200':
return None
m = re.search('GSIS\s+ID:\s+([0-9-]+)', content)
if m is None:
return None
gid = m.group(1).strip()
if len(gid) != 10: # Can't be valid...
return None
return gid
def roster_soup(team):
resp, content = new_http().request(urls['roster'] % team, 'GET')
if resp['status'] != '200':
return None
return BeautifulSoup(content, PARSER)
def try_int(s):
try:
return int(s)
except ValueError:
return 0
def first_int(s):
m = re.search('[0-9]+', s)
if m is None:
return 0
return int(m.group(0))
def first_word(s):
m = re.match('\S+', s)
if m is None:
return ''
return m.group(0)
def height_as_inches(txt):
# Defaults to 0 if `txt` isn't parseable.
feet, inches = 0, 0
pieces = re.findall('[0-9]+', txt)
if len(pieces) >= 1:
feet = try_int(pieces[0])
if len(pieces) >= 2:
inches = try_int(pieces[1])
return feet * 12 + inches
def meta_from_soup_row(team, soup_row):
tds, data = [], []
for td in soup_row.find_all('td'):
tds.append(td)
data.append(td.get_text().strip())
profile_url = 'http://www.nfl.com%s' % tds[1].a['href']
name = tds[1].a.get_text().strip()
if ',' not in name:
last_name, first_name = name, ''
else:
last_name, first_name = map(lambda s: s.strip(), name.split(','))
return {
'team': team,
'profile_id': profile_id_from_url(profile_url),
'profile_url': profile_url,
'number': try_int(data[0]),
'first_name': first_name,
'last_name': last_name,
'full_name': '%s %s' % (first_name, last_name),
'position': data[2],
'status': data[3],
'height': height_as_inches(data[4]),
'weight': first_int(data[5]),
'birthdate': data[6],
'years_pro': try_int(data[7]),
'college': data[8],
}
def meta_from_profile_html(html):
if not html:
return html
try:
soup = BeautifulSoup(html, PARSER)
pinfo = soup.find(id='player-bio').find(class_='player-info')
# Get the full name and split it into first and last.
# Assume that if there are no spaces, then the name is the last name.
# Otherwise, all words except the last make up the first name.
# Is that right?
name = pinfo.find(class_='player-name').get_text().strip()
name_pieces = name.split(' ')
if len(name_pieces) == 1:
first, last = '', name
else:
first, last = ' '.join(name_pieces[0:-1]), name_pieces[-1]
meta = {
'first_name': first,
'last_name': last,
'full_name': name,
}
# The position is only in the <title>... Weird.
title = soup.find('title').get_text()
m = re.search(',\s+([A-Z]+)', title)
if m is not None:
meta['position'] = m.group(1)
# Look for a whole bunch of fields in the format "Field: Value".
search = pinfo.get_text()
fields = {'Height': 'height', 'Weight': 'weight', 'Born': 'birthdate',
'College': 'college'}
for f, key in fields.items():
m = re.search('%s:\s+([\S ]+)' % f, search)
if m is not None:
meta[key] = m.group(1)
if key == 'height':
meta[key] = height_as_inches(meta[key])
elif key == 'weight':
meta[key] = first_int(meta[key])
elif key == 'birthdate':
meta[key] = first_word(meta[key])
# Experience is a little weirder...
m = re.search('Experience:\s+([0-9]+)', search)
if m is not None:
meta['years_pro'] = int(m.group(1))
return meta
except AttributeError:
return None
def players_from_games(existing, games):
for g in games:
if g is None:
continue
for d in g.drives:
for p in d.plays:
for player in p.players:
if player.playerid not in existing:
yield player.playerid, player.name
def eprint(*args, **kwargs):
kwargs['file'] = sys.stderr
print(*args, **kwargs)
def progress(cur, total):
ratio = 100 * (float(cur) / float(total))
eprint('\r%d/%d complete. (%0.2f%%)' % (cur, total, ratio), end='')
def progress_done():
eprint('\nDone!')
def run():
parser = argparse.ArgumentParser(
description='Efficiently download player meta data from NFL.com. Note '
'that each invocation of this program guarantees at least '
'32 HTTP requests to NFL.com',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
aa = parser.add_argument
aa('--json-update-file', type=str, default=None,
help='When set, the file provided will be updated in place with new '
'meta data from NFL.com. If this option is not set, then the '
'"players.json" file that comes with nflgame will be updated '
'instead.')
aa('--simultaneous-reqs', type=int, default=3,
help='The number of simultaneous HTTP requests sent to NFL.com at a '
'time. Set this lower if you are worried about hitting their '
'servers.')
aa('--full-scan', action='store_true',
help='Forces a full scan of nflgame player data since 2009. Typically, '
'this is only done when starting with a fresh JSON player '
'database. But it can be useful to re-scan all of the players if '
'past errors went ignored and data is missing. The advantage of '
'using this option over starting fresh is that an existing '
'(gsis_id <-> profile_id) mapping can be used for the majority of '
'players, instead of querying NFL.com for the mapping all over '
'again.')
aa('--no-block', action='store_true',
help='When set, this program will exit with an error instead of '
'displaying a prompt to continue. This is useful when calling '
'this program from another script. The idea here is not to block '
'indefinitely if something goes wrong and the program wants to '
'do a fresh update.')
aa('--phase', default=None, choices=['PRE', 'REG', 'POST'],
help='Force the update to use the given phase of the season.')
aa('--year', default=None, type=int,
help='Force the update to use nflgame players from a specific year.')
aa('--week', default=None, type=int,
help='Force the update to use nflgame players from a specific week.')
args = parser.parse_args()
if args.json_update_file is None:
args.json_update_file = nflgame.player._player_json_file
teams = [team[0] for team in nflgame.teams if team[0] not in ['STL', 'SD']]
pool = multiprocessing.pool.ThreadPool(args.simultaneous_reqs)
# Before doing anything laborious, make sure we have write access to
# the JSON database.
if not os.access(args.json_update_file, os.W_OK):
eprint('I do not have write access to "%s".' % args.json_update_file)
eprint('Without write access, I cannot update the player database.')
sys.exit(1)
# Fetch the initial mapping of players.
metas, reverse = initial_mappings(args)
if len(metas) == 0:
if args.no_block:
eprint('I want to do a full update, but I have been told to\n'
'exit instead of asking if you want to continue.')
sys.exit(1)
eprint("nflgame doesn't know about any players.")
eprint("Updating player data will require several thousand HTTP HEAD "
"requests to NFL.com.")
eprint("It is strongly recommended to find the 'players.json' file "
"that comes with nflgame.")
eprint("Are you sure you want to continue? [y/n] ", end='')
answer = raw_input()
if answer[0].lower() != 'y':
eprint("Quitting...")
sys.exit(1)
# Accumulate errors as we go. Dump them at the end.
errors = []
# Now fetch a set of players that aren't in our mapping already.
# Restrict the search to the current week if we have a non-empty mapping.
if len(metas) == 0 or args.full_scan:
eprint('Loading players in games since 2009, this may take a while...')
players = {}
# Grab players one game a time to avoid obscene memory requirements.
for schedule in nflgame.sched.games.itervalues():
# If the game is too far in the future, skip it...
if nflgame.live._game_datetime(schedule) > nflgame.live._now():
continue
g = nflgame.game.Game(schedule['eid'])
for pid, name in players_from_games(metas, [g]):
players[pid] = name
eprint('Done.')
else:
year, week = nflgame.live.current_year_and_week()
phase = nflgame.live._cur_season_phase
if args.phase is not None:
phase = args.phase
if args.year is not None:
year = args.year
if args.week is not None:
week = args.week
eprint('Loading games for %s %d week %d' % (phase, year, week))
games = nflgame.games(year, week, kind=phase)
players = dict(players_from_games(metas, games))
# Find the profile ID for each new player.
if len(players) > 0:
eprint('Finding (profile id -> gsis id) mapping for players...')
def fetch(t): # t[0] is the gsis_id and t[1] is the gsis name
return t[0], t[1], profile_url(t[0])
for i, t in enumerate(pool.imap(fetch, players.items()), 1):
gid, name, purl = t
pid = profile_id_from_url(purl)
progress(i, len(players))
if purl is None or pid is None:
errors.append('Could not get profile URL for (%s, %s)'
% (gid, name))
continue
assert gid not in metas
metas[gid] = {'gsis_id': gid, 'gsis_name': name,
'profile_url': purl, 'profile_id': pid}
reverse[pid] = gid
progress_done()
# Get the soup for each team roster.
eprint('Downloading team rosters...')
roster = []
def fetch(team):
return team, roster_soup(team)
for i, (team, soup) in enumerate(pool.imap(fetch, teams), 1):
progress(i, len(teams))
if soup is None:
errors.append('Could not get roster for team %s' % team)
continue
tbodys = soup.find(id='result').find_all('tbody')
for row in tbodys[len(tbodys)-1].find_all('tr'):
try:
roster.append(meta_from_soup_row(team, row))
except Exception:
errors.append(
'Could not get player info from roster row:\n\n%s\n\n'
'Exception:\n\n%s\n\n'
% (row, traceback.format_exc()))
progress_done()
# Find the gsis identifiers for players that are in the roster but haven't
# recorded a statistic yet. (i.e., Not in nflgame play data.)
purls = [r['profile_url']
for r in roster if r['profile_id'] not in reverse]
if len(purls) > 0:
eprint('Fetching GSIS identifiers for players not in nflgame...')
def fetch(purl):
return purl, gsis_id(purl)
for i, (purl, gid) in enumerate(pool.imap(fetch, purls), 1):
progress(i, len(purls))
if gid is None:
errors.append('Could not get GSIS id at %s' % purl)
continue
reverse[profile_id_from_url(purl)] = gid
progress_done()
# Now merge the data from `rosters` into `metas` by using `reverse` to
# establish the correspondence.
for data in roster:
gsisid = reverse.get(data['profile_id'], None)
if gsisid is None:
errors.append('Could not find gsis_id for %s' % data)
continue
merged = dict(metas.get(gsisid, {}), **data)
merged['gsis_id'] = gsisid
metas[gsisid] = merged
# Finally, try to scrape meta data for players who aren't on a roster
# but have recorded a statistic in nflgame.
gids = [(gid, meta['profile_url'])
for gid, meta in metas.iteritems()
if 'full_name' not in meta and 'profile_url' in meta]
if len(gids):
eprint('Fetching meta data for players not on a roster...')
def fetch(t):
gid, purl = t
resp, content = new_http().request(purl, 'GET')
if resp['status'] != '200':
if resp['status'] == '404':
return gid, purl, False
else:
return gid, purl, None
return gid, purl, content
for i, (gid, purl, html) in enumerate(pool.imap(fetch, gids), 1):
progress(i, len(gids))
more_meta = meta_from_profile_html(html)
if not more_meta:
# If more_meta is False, then it was a 404. Not our problem.
if more_meta is None:
errors.append('Could not fetch HTML for %s' % purl)
continue
metas[gid] = dict(metas[gid], **more_meta)
progress_done()
assert len(metas) > 0, "Have no players to add... ???"
with open(args.json_update_file, 'w+') as fp:
json.dump(metas, fp, indent=4, sort_keys=True,
separators=(',', ': '))
if len(errors) > 0:
eprint('\n')
eprint('There were some errors during the download. Usually this is a')
eprint('result of an HTTP request timing out, which means the')
eprint('resulting "players.json" file is probably missing some data.')
eprint('An appropriate solution is to re-run the script until there')
eprint('are no more errors (or when the errors are problems on ')
eprint('NFL.com side.)')
eprint('-' * 79)
eprint(('\n' + ('-' * 79) + '\n').join(errors))
if __name__ == '__main__':
run()
| 36.874502 | 79 | 0.59986 |
79431e9906a970cf446a0d91f888bb980a251074 | 631 | py | Python | Dataset/Leetcode/valid/88/455.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/valid/88/455.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/valid/88/455.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
def XXX(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
left, right, index = m - 1, n - 1, m + n - 1
while left >= 0 and right >= 0:
if nums2[right] >= nums1[left]:
nums1[index] = nums2[right]
index -= 1
right -= 1
else:
nums1[index] = nums1[left]
left -= 1
index -= 1
if right >= 0:
for i in range(right + 1):
nums1[i] = nums2[i]
| 31.55 | 78 | 0.423138 |
79431ee5f1fdbab271353d8c0f851f6f12d1f0d0 | 709 | py | Python | neurovault/celery.py | abitrolly/NeuroVault | e62bc65c8e0e58bff55bb9fa7cf11193dc54d734 | [
"MIT"
] | 68 | 2015-02-07T06:09:49.000Z | 2022-03-03T22:58:33.000Z | neurovault/celery.py | abitrolly/NeuroVault | e62bc65c8e0e58bff55bb9fa7cf11193dc54d734 | [
"MIT"
] | 436 | 2015-01-01T01:01:13.000Z | 2021-11-07T18:24:00.000Z | neurovault/celery.py | abitrolly/NeuroVault | e62bc65c8e0e58bff55bb9fa7cf11193dc54d734 | [
"MIT"
] | 60 | 2015-01-10T23:31:26.000Z | 2021-08-10T06:39:57.000Z | from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'neurovault.settings')
nvcelery = Celery('neurovault')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
nvcelery.config_from_object('django.conf:settings')
nvcelery.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
from raven.contrib.django.raven_compat.models import client
from raven.contrib.celery import register_signal, register_logger_signal
# register a custom filter to filter out duplicate logs
register_logger_signal(client)
# hook into the Celery error handler
register_signal(client)
| 28.36 | 72 | 0.829337 |
Subsets and Splits