metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "Jeff-Szkodzinski/SHARPpy",
"score": 3
}
|
#### File: sharppy/io/csv.py
```python
import urllib.request as request
from contextlib import closing
import csv
def loadCSV(csv_file_name):
csv = []
csv_file = open(csv_file_name, 'r')
csv_fields = [ f.lower() for f in csv_file.readline().strip().split(',') ]
for line in csv_file:
line_dict = dict( (f, v) for f, v in zip(csv_fields, line.strip().split(',')))
csv.append(line_dict)
csv_file.close()
return csv_fields, csv
def loadNUCAPS_CSV(remote_csv):
csv_dict = []
# Open the remote csv and define the csv reader object.
with closing(request.urlopen(remote_csv)) as response:
lines = [l.decode('utf-8') for l in response.readlines()]
csv_file = csv.reader(lines)
# Assign the headers to a list.
csv_fields = next(csv_file)
for line in csv_file:
line_dict = dict( (f, v) for f, v in zip(csv_fields, line))
csv_dict.append(line_dict)
return csv_fields, csv_dict
if __name__ == '__main__':
import sys
fields, csv = loadCSV(sys.argv[1])
print(fields)
```
#### File: sharppy/tests/test_decoders.py
```python
import pytest
import sharppy.io.decoder as decoder
import sharppy.io.buf_decoder as buf_decoder
import sharppy.io.spc_decoder as spc_decoder
import sharppy.io.pecan_decoder as pecan_decoder
import sharppy.io.uwyo_decoder as uwyo_decoder
import sharppy.io.nucaps_decoder as nucaps_decoder # JTS
"""
Unit tests to test to see if decoders work on different file types
"""
files = ['examples/data/14061619.OAX',
'examples/data/rap_oun.buf',
'examples/data/oun_uwyo.html',
'examples/data/ABR.txt',
'examples/data/OUN.txt']
decoders = decoder.getDecoders()
assert len(decoders) > 0
def test_spc_decoder():
dec = spc_decoder.SPCDecoder(files[0])
profs = dec.getProfiles()
profs._backgroundCopy("")
# Test Interpolation
profs.interp()
assert profs.isInterpolated() == True
profs.resetInterpolation()
assert profs.isInterpolated() == False
# Test setting storm motion vectors
profs.modifyStormMotion('right', 0, 0)
profs.modifyStormMotion('left', 0, 0)
profs.resetStormMotion()
# Try modify
profs.modify(0, tmpc=35)
profs.modify(0, u=0)
tmp = profs._profs[""][0].tmpc
tmp[0:2] = 35
profs.modify(-999, tmpc=tmp, idx_range=[0,1])
profs.resetModification('tmpc')
profs.resetModification('u')
# JTS - Added test for NUCAPS decoder.
def test_nucaps_decoder():
dec = nucaps_decoder.NUCAPSDecoder(files[0])
profs = dec.getProfiles()
profs._backgroundCopy("")
# Test Interpolation
profs.interp()
assert profs.isInterpolated() == True
profs.resetInterpolation()
assert profs.isInterpolated() == False
# Test setting storm motion vectors
profs.modifyStormMotion('right', 0, 0)
profs.modifyStormMotion('left', 0, 0)
profs.resetStormMotion()
# Try modify
profs.modify(0, tmpc=35)
profs.modify(0, u=0)
tmp = profs._profs[""][0].tmpc
tmp[0:2] = 35
profs.modify(-999, tmpc=tmp, idx_range=[0,1])
profs.resetModification('tmpc')
profs.resetModification('u')
def test_bufkit_decoder():
# Load in a BUFKIT file
dec = buf_decoder.BufDecoder(files[1])
profs = dec.getProfiles()
stn_id = dec.getStnId()
# Test some of the characteristics of the ProfCollection
assert profs.isEnsemble() == False
assert profs.isModified() == False
assert profs.getAnalogDate() is None
assert profs.hasCurrentProf() == True
def test_uwyo_decoder():
# Try to load in the UWYO file
try:
dec = uwyo_decoder.UWYODecoder(files[2])
except:
print("FAILED")
def test_pecan_decoder():
try:
# Load in the PECAN-type files
dec = pecan_decoder.PECANDecoder(files[3])
dec = pecan_decoder.PECANDecoder(files[4])
except:
return
# Test some of the characteristics of this ProfCollection
assert dec.getProfiles().isEnsemble() == True
profs = dec.getProfiles()
profs.advanceHighlight(1)
profs.advanceHighlight(-1)
profs.advanceTime(1)
profs.advanceTime(-1)
#print(profs)
```
|
{
"source": "JeffT13/rd-diarization",
"score": 2
}
|
#### File: rd-diarization/RDSV/rdsv.py
```python
import json
import numpy as np
from VoiceEncoder.util import getDiary
class RefAudioLibrary:
def __init__(self, c, embed_path, rttm_path, spkrdict_path, judge_only=True, min_audio_len = 4, min_ref_thresh=5, save=False, unref_constant = 'UnRefSpkr'):
self.case_set = c
self.embed = embed_path
self.rttm = rttm_path
self.judge_only = judge_only
self.uid = unref_constant
with open(spkrdict_path) as json_file:
self.sd = json.load(json_file)
load_embed = {}
spkr_ints = {}
RAL = {}
#process RTTM & Embeddings (preprocessed)
for case in self.case_set:
temp_diary = getDiary(self.rttm+case+'.rttm')
load_embed[case] = [np.load(embed_path+case+'_embeds.npy'), np.load(embed_path+case+'_embeds_times.npy')]
for i in range(len(temp_diary)):
speaker = temp_diary[i][0].split(' ')[7]
if speaker not in spkr_ints:
spkr_ints[speaker] = [temp_diary[i]]
else:
spkr_ints[speaker].append(temp_diary[i])
self.spkr_ints = spkr_ints
self.load_embed = load_embed
#Build RAL
for spkr in spkr_ints.keys():
count = 0
if judge_only:
if self.sd[spkr]<20:
while count < len(spkr_ints[spkr]):
entry = spkr_ints[spkr][count][0].split(' ')
count+=1
if float(entry[4])>min_audio_len:
start = float(entry[3])
end = start + float(entry[4])
case = entry[1]
if spkr in RAL:
RAL[spkr].append(self.get_interval_embed(start, end, load_embed[case][0], load_embed[case][1]))
else:
RAL[spkr] = [self.get_interval_embed(start, end, load_embed[case][0], load_embed[case][1])]
else:
while count < len(spkr_ints[spkr]):
entry = spkr_ints[spkr][count][0].split(' ')
count+=1
if float(entry[4])>min_audio_len:
start = float(entry[3])
end = start + float(entry[4])
case = entry[1]
if spkr in RAL:
RAL[spkr].append(self.get_interval_embed(start, end, load_embed[case][0], load_embed[case][1]))
else:
RAL[spkr] = [self.get_interval_embed(start, end, load_embed[case][0], load_embed[case][1])]
if min_ref_thresh is not None:
sl = []
for spkr in RAL.keys():
if len(RAL[spkr])<min_ref_thresh:
sl.append(spkr)
for s in sl:
RAL.pop(s)
self.RAL = RAL
if save is not False:
with open(save, 'w') as outfile:
json.dump(RAL, outfile)
def get_interval_embed(self, start, end, cont_embeds, embed_times):
'''Takes as input start and end time of speaking interval (in s) as well as embeddings for the full case and corresponding
times for each embedding (in ms)
Returns: single reference audio embedding by performing L2 normalization on average of embeddings across embedding interval'''
#get starting embed by choosing closest to given start time
diff = 10
for i in range(len(embed_times)):
check = abs(start-embed_times[i][0]/1000)
if check < diff:
diff = check
start_emb = i
#get ending embed by choosing closest to given end time
diff = 10
for j in range(len(embed_times)):
check = abs(end-embed_times[j][1]/1000)
if check < diff:
diff = check
end_emb = j+1
#take average embedding across embedding interval
raw_embed = np.mean(cont_embeds[start_emb:end_emb], axis=0)
#L2 normalize
embed = raw_embed/np.linalg.norm(raw_embed,2)
return embed
def Diarize(ral, cont_embeds, embed_times, sim_thresh = 0.1, score_thresh=0.9, min_seg = 2):
'''Takes as input a dictionary of multiple reference embeddings for each speaker, as well as a list of their names.
Additionally takes in embeddings of the full court hearing and time intervals associated with each embedding.
sim_thresh = 0.1 as default - > indicates threshold to label as non-judge for each embedding time step difference between the highest sim score
and next highest sim score, if the highest sim score score is below score_thresh
score_thresh = 0.9 as default - > indicates similarity score threshold to consider assigning non-judge speech.
small_seg = 1 second as default - > indicates that the shortest speaking times cannot be less than this duration
Returns: diarization with speaker name and times'''
#creates similarity score for each reference audio with each case embedding at every timestep
similarity ={}
for speaker in ral.RAL.keys():
for i in range(len(ral.RAL[speaker])):
if speaker in similarity:
similarity[speaker].append(cont_embeds @ ral.RAL[speaker][i])
else:
similarity[speaker] = [cont_embeds @ ral.RAL[speaker][i]]
#create similarity dict that stores max val for each speaker at each timestep
similarity_max = {}
for speaker in ral.RAL.keys():
similarity_max[speaker]=np.max(similarity[speaker],axis=0)
#compute speaker with highest similarity score at each interval
#if diff between highest sim score and next highest < thresh, label as "Non-Judge"
speak = []
sim = []
next_sim = []
for i in range(len(cont_embeds)):
max_sim=0
next_max_sim = 0
for name in ral.RAL.keys():
similarity_score = similarity_max[name][i]
if similarity_score > max_sim:
max_sim = similarity_score
max_name = name
elif similarity_score > next_max_sim:
next_max_sim = similarity_score
if ((max_sim-next_max_sim) < sim_thresh) and (max_sim<score_thresh):
speak.append(ral.uid)
else:
speak.append(max_name)
#####
#get diarized list of speaker with actual speaking times and merge if next speaker is curr speaker
diarized = []
curr = speak[0]
start = round(embed_times[0][0]/(1000),2)
for i in range(len(speak)-1):
if curr == speak[i+1]:
continue
else:
diarized.append([curr,start,round(embed_times[i][1]/(1000),2)])
curr = speak[i+1]
start = round(embed_times[i][1]/(1000),2)
#for last iteration
#if last iteration speaker is same as very last speaker just append
if curr == speak[i+1]:
diarized.append([curr,start,round(embed_times[i][1]/(1000),2)])
#otherwise split into last iteration speaker and very last speaker
else:
diarized.append([curr,start,round(embed_times[i][1]/(1000),2)])
curr = speak[i+1]
start = round(embed_times[i][1]/(1000),2)
diarized.append([curr,start,round(embed_times[i+1][1]/(1000),2)])
#####
#####
#remove all small intervals less than min_seg indicated, and merge speaking durations
diarized_merged = []
curr = diarized[0][0]
start = diarized[0][1]
end = diarized[0][2]
for i in range(len(diarized)-1):
if (diarized[i+1][2]-diarized[i+1][1])<min_seg:
end = diarized[i+1][2]
else:
if curr == diarized[i+1][0]:
end = diarized[i+1][2]
else:
diarized_merged.append([curr,start,end])
start = end
end = diarized[i+1][2]
curr = diarized[i+1][0]
#for last iteration
diarized_merged.append([curr,start,end])
#####
return diarized_merged
def diar_to_rttm(diar, case, out_path, verbose=True):
torttm = []
for i, event in enumerate(diar):
torttm.append(' '.join(['SPEAKER '+case+' 1', str(event[1]), str(round(event[2]-event[1], 2)), '<NA> <NA>', event[0],'<NA> <NA>']))
with open(out_path+case+'_rdsv.rttm', 'w') as filehandle:
for listitem in torttm:
filehandle.write('%s\n' % listitem)
if verbose:
print('Case', case, 'diarization saved')
def rttmto_RALrttm(case, ral, in_path, out_path, verbose=True):
out_diary = []
spkr_tracker = []
diary = getDiary(in_path+case+'.rttm')
for entry in diary:
counter = 0
temp = entry[0].split(' ')
if temp[7] not in spkr_tracker:
spkr_tracker.append(temp[7])
if temp[7] not in ral.RAL.keys():
temp[7] = ral.uid
counter+=1
out_diary.append(' '.join(temp))
with open(out_path+case+'_ral.rttm', 'w') as filehandle:
for listitem in out_diary:
filehandle.write('%s\n' % listitem)
if verbose:
print(case, 'rttm has been RAL converted')
print(len(spkr_tracker), 'total speakers')
print([s for s in spkr_tracker if s not in ral.RAL.keys()], 'were unreffed')
```
#### File: rd-diarization/RDSV/run_LR.py
```python
import os, json, timeit
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import accuracy_score
from sklearn.manifold import TSNE
from param import *
with open(set_path) as json_file:
set_dict = json.load(json_file)
with open(sd_path) as json_file:
spkr_dict = json.load(json_file)
def logit_thresh(log, l):
pred = []
for item in log:
if max(item)>l:
pred.append(np.argmax(item))
else:
pred.append(999)
return pred
rate_lr = {}
for r in tune_rate:
print('Processing for rate=', r)
path_out = inf_lab_path+'r'+str(r)+'/'
hold_label_r = 0
tr_seq = []
tr_id = []
for wav in set_dict['r']:
case = wav.split('.')[0]
embed = np.load(path_out+'{}_embeds.npy'.format(case))
label = np.load(path_out+'{}_embeds_labels.npy'.format(case))
for j, id in enumerate(label):
if id<20: #judge speakers
tr_seq.append(embed[j])
tr_id.append(id)
if not hold_label_r:
hold_label_r = len(tr_id)
X = np.asarray(tr_seq)
Y = np.asarray(tr_id)
hold_label_d = 0
dev_seq = []
dev_id = []
for wav in set_dict['d']:
case = wav.split('.')[0]
embed = np.load(path_out+'{}_embeds.npy'.format(case))
label = np.load(path_out+'{}_embeds_labels.npy'.format(case))
for j, id in enumerate(label):
if id<900: # no overlap or padding
dev_seq.append(embed[j])
if id>=20:
dev_id.append(999)
else:
dev_id.append(id)
if not hold_label_d:
hold_label_d = len(dev_id)
Xd = np.asarray(dev_seq)
Yd = np.asarray(dev_id)
# Train LR
clf = OneVsRestClassifier(LogisticRegression()).fit(X, Y)
logit = clf.predict_proba(Xd)
temp = []
for L in LR_lims:
temp.append(round(accuracy_score(Yd, logit_thresh(logit, L)),4))
print('Ensemble Classifier Accuracy for Threshold:', L,' === ', temp[-1])
rate_lr[r]=temp
if save_LR:
save_out = out_path+'r'+str(r)+'/'
if not os.path.exists(save_out):
os.mkdir(save_out)
np.save(save_out+'X.npy',X)
np.save(save_out+'Y.npy',Y)
np.save(save_out+'Xd.npy',Xd)
np.save(save_out+'Yd.npy',Yd)
with open(lr_path,'w') as out:
json.dump(rate_lr, out)
print('LR complete & dict saved')
```
|
{
"source": "JeffT13/SCOTUS_Speaker_Verification",
"score": 2
}
|
#### File: SCOTUS_Speaker_Verification/SCOTUS/scotus_dvector_inference.py
```python
import os, sys, json, csv
import glob, librosa
import torch
import numpy as np
from SpeechEmbedder.hparam import hparam_SCOTUS as hp
from SpeechEmbedder.speech_embedder_net import SpeechEmbedder
from SpeechEmbedder.VAD_segments import VAD_chunk
#-----------------------------
# Following functions adapted/introduced in fork
# replaces utils import in original repo
def concat_segs(times, segs):
#Concatenate continuous voiced segments
concat_seg = []
seg_concat = segs[0]
hold_times = []
t0 = 0
for i in range(0, len(times)-1):
if times[i][1] == times[i+1][0]:
seg_concat = np.concatenate((seg_concat, segs[i+1]))
else:
hold_times.append((t0, times[i][1]))
t0 = times[i+1][0]
concat_seg.append(seg_concat)
seg_concat = segs[i+1]
else:
concat_seg.append(seg_concat)
hold_times.append((t0, times[-1][1]))
return concat_seg, hold_times
def get_STFTs(segs,htemp):
#Get 240ms STFT windows with 50% overlap
sr = hp.data.sr
STFT_frames = []
STFT_labels = []
idx = 0
for seg in segs:
S = librosa.core.stft(y=seg, n_fft=hp.data.nfft,
win_length=int(hp.data.window * sr), hop_length=int(hp.data.hop * sr))
S = np.abs(S)**2
mel_basis = librosa.filters.mel(sr, n_fft=hp.data.nfft, n_mels=hp.data.nmels)
S = np.log10(np.dot(mel_basis, S) + 1e-6) # log mel spectrogram of utterances
for j in range(0, S.shape[1], int(.12/hp.data.hop)):
if j + 24 < S.shape[1]:
STFT_frames.append(S[:,j:j+24])
STFT_labels.append(htemp[idx])
else:
break
idx+=1
return STFT_frames, STFT_labels
def align_embeddings(embeddings, labs):
partitions = []
start = 0
end = 0
j = 1
for i, embedding in enumerate(embeddings):
if (i*.12)+.24 < j*.401:
end = end + 1
else:
partitions.append((start,end))
start = end
end = end + 1
j += 1
else:
partitions.append((start,end))
avg_embeddings = np.zeros((len(partitions),256))
emb_labels = []
for i, partition in enumerate(partitions):
emb_lab = labs[partition[0]:partition[1]]
if len(set(emb_lab))>1:
continue
else:
avg_embeddings[i] = np.average(embeddings[partition[0]:partition[1]],axis=0)
emb_labels.append(emb_lab[0])
return avg_embeddings[0:len(emb_labels)], emb_labels
#new function
def align_times(casetimelist, hold_times, spkr_dict):
htemp = []
_, endtime, endspkr = casetimelist[-1]
for i, h in enumerate(hold_times):
append = False
if h[1]>=endtime:
htemp.append(spkr_dict[endspkr])
append = True
else:
for j, c in enumerate(casetimelist):
if h[1]<c[1]:
if h[1]>=c[0]:
spkr_name = c[2]
htemp.append(spkr_dict[spkr_name])
append = True
elif h[1]<c[0] and not append:
htemp.append(spkr_dict[casetimelist[j-1][2]])
append = True
elif not append:
print("labelling overlapping at ", h)
htemp.append(999) #overlap in diarization and VAD
append=True
if not append and h[1]!=hold_times[-1][1]:
print('value not appended in loop')
print(i, h)
return htemp
#-----------------------------
#initialize SpeechEmbedder
embedder_net = SpeechEmbedder()
embedder_net.load_state_dict(torch.load(hp.model.model_path))
embedder_net.to(hp.device)
embedder_net.eval()
case_path = glob.glob(os.path.dirname(hp.unprocessed_data))
with open(hp.data.dict_path+'spkrs.json') as json_file:
spkr_dict = json.load(json_file)
with open(hp.data.dict_path+'casetimes.json') as json_file:
casetimedict = json.load(json_file)
fold = hp.data.save_path
cut_div = 4
train_sequences = []
train_cluster_ids = []
print("starting generation")
for i, path in enumerate(case_path):
file = path.split('/')[-1]
if os.path.exists(fold+file[:-4]+'_seq.npy'):
print('skipped ', file[:-4])
continue
if file[-4:] == '.wav':
times, segs = VAD_chunk(2, path)
concat_seg, ht = concat_segs(times, segs)
# ---
#assumes files will precleaned
#need to improve for any full case set processing
htemp = align_times(casetimedict[file[:-4]], ht, spkr_dict)
if hp.data.verbose:
#if len(ht)!= len(htemp) then
# skip case and track case_id
print(len(ht))
print(len(htemp))
# ---
STFT_frames, STFT_labels = get_STFTs(concat_seg, htemp)
STFT_frames = np.stack(STFT_frames, axis=2)
STFT_frames = torch.tensor(np.transpose(STFT_frames, axes=(2,1,0)))
cut = STFT_frames.size()[0]//cut_div
t0 = 0
temp_emb = []
for i in range(cut_div):
if i<(cut_div-1):
STFT_samp = STFT_frames[t0:t0+cut, :, :]
else:
STFT_samp = STFT_frames[t0:, :, :]
#process slice
STFT_samp = STFT_samp.to(hp.device)
emb = embedder_net(STFT_samp)
temp_emb.append(emb.detach().cpu().numpy())
t0+=cut
embeddings = np.concatenate(temp_emb, axis=0)
print(embeddings.shape, len(STFT_labels))
aligned_embeddings, aligned_labels = align_embeddings(embeddings, STFT_labels)
train_sequences.append(aligned_embeddings)
train_cluster_ids.append(aligned_labels)
np.save(fold+file[:-4]+'_seq', aligned_embeddings)
np.save(fold+file[:-4]+'_id', aligned_labels)
print('values appended')
full_set = False
if full_set:
np.save('/scratch/jt2565/train_seq', train_sequences)
np.save('/scratch/jt2565/train_clus', train_cluster_ids)
```
|
{
"source": "jefftc/changlab",
"score": 3
}
|
#### File: changlab/arrayio/csv_format.py
```python
def is_format(locator_str, hrows=None, hcols=None):
from genomicode import filelib
if not filelib.exists(locator_str):
return False
handle = filelib.openfh(locator_str)
x = handle.readline()
handle.close() # need to close it properly, or gunzip might not die.
if not x: # blank file
return False
if "," in x:
return True
return False
def is_matrix(X):
# Any matrix can be csv format.
return True
def read(handle, hrows=None, hcols=None, datatype=float):
import csv
from StringIO import StringIO
from genomicode import filelib
import tab_delimited_format
# Convert this to tab-delimited format and let the other module
# deal with it.
outhandle = StringIO()
reader = csv.reader(filelib.openfh(handle))
for row in reader:
print >>outhandle, "\t".join(row)
outhandle.seek(0)
return tab_delimited_format.read(
outhandle, hrows=hrows, hcols=hcols, datatype=datatype)
def write(X, handle):
# Who wants to write CSV?
# If implement, need to be careful to escape commas in gene
# descriptions, etc.
raise NotImplementedError
```
#### File: changlab/arrayio/gct_format.py
```python
def is_format(locator_str, hrows=None, hcols=None):
from genomicode import filelib
if not filelib.exists(locator_str):
return False
if hrows not in [None, 1]:
return False
if hcols not in [None, 2]:
return False
# Read 5 lines and check the headers.
handle = filelib.openfh(locator_str)
lines = [handle.readline() for i in range(5)]
handle.close() # need to close it properly, or gunzip might not die.
lines = [x for x in lines if x]
matrix = [line.rstrip("\r\n").split("\t") for line in lines]
if len(matrix) < 3:
return False
# First line could be just one column, or could be many columns.
if len(matrix[0]) < 1:
return False
# Second line must have at least 2 columns.
if len(matrix[1]) < 2:
return False
if matrix[0][0] != "#1.2":
return False
#if matrix[2][0].strip().upper() != "NAME":
# return False
#if matrix[2][1].strip().upper() != "DESCRIPTION":
# return False
return True
DIAGNOSIS = ""
def is_matrix(X):
global DIAGNOSIS
import tab_delimited_format as tdf
DIAGNOSIS = ""
if not hasattr(X, "col_names") or not hasattr(X, "row_names"):
DIAGNOSIS = "No annotations."
return False
if tdf.SAMPLE_NAME not in X.col_names():
DIAGNOSIS = "No samples."
return False
if len(X.col_names()) != 1:
DIAGNOSIS = "Extra sample annotations."
return False
if len(X.row_names()) != 2:
DIAGNOSIS = "Row annotations not right."
return False
# Make sure "NAME" and "DESCRIPTION" are present somewhere.
x = [x.upper() for x in X.row_names() + X._synonyms.keys()]
if "NAME" not in x or "DESCRIPTION" not in x:
DIAGNOSIS = "Missing NAME and/or DESCRIPTION headers."
return False
#x = [x.upper() for x in X.row_headers()]
#if sorted(x) != sorted(["NAME", "DESCRIPTION"]):
# return False
return True
def read(handle, hrows=None, hcols=None, datatype=float):
import const
import tab_delimited_format
from genomicode import filelib
assert hrows is None or hrows == 1
assert hcols is None or hcols == 2
handle = filelib.openfh(handle)
assert handle.readline().strip() == "#1.2"
x = handle.readline().rstrip("\r\n").split("\t")
assert len(x) >= 2
num_genes, num_samples = map(int, x[:2])
X = tab_delimited_format.read(handle, hrows=1, hcols=2, datatype=datatype)
assert X.dim() == (num_genes, num_samples), (
"Matrix size mismatch.\n"
"The GCT headers indicate a matrix with %d rows and %d columns.\n"
"However, I found %d rows and %d columns." % (
num_genes, num_samples, X.nrow(), X.ncol()))
#assert X.row_headers()[0].upper() == "NAME"
#assert X.row_headers()[1].upper() == "DESCRIPTION"
header0, header1 = X.row_names()[:2]
synonyms = {}
NAME, DESCRIPTION = "NAME", "DESCRIPTION"
if header0 != NAME:
synonyms[NAME] = header0
if header1 != DESCRIPTION:
synonyms[DESCRIPTION] = header1
synonyms[const.ROW_ID] = header0
X._synonyms.update(synonyms)
#X = Matrix.add_synonyms(X, synonyms)
assert is_matrix(X)
# The GCT File Format description at the Broad Institute does not
# require the NAMEs to be unique.
## Make sure the NAMEs are unique.
#seen = {}
#dups = {}
#for name in X.row_annots(NAME):
# if name in seen:
# dups[name] = 1
# seen[name] = 1
#dups = sorted(dups)
#assert len(dups) < 5, "%s column has %d duplicated names." % (
# header0, len(dups))
#assert not dups, "%s column has duplicated names: %s" % (
# header0, dups)
return X
def write(X, handle):
import tab_delimited_format
assert is_matrix(X)
num_genes, num_samples = X.dim()
handle.write("#1.2\n")
handle.write("%d\t%d\n" % (num_genes, num_samples))
tab_delimited_format.write(X, handle)
```
#### File: changlab/arrayio/jeffs_format.py
```python
import const
ROW_HEADERS = ["Probe.Set.ID", "Description", "LocusLink", "Gene.Symbol"]
MYNAME_TO_STDNAME = [
("Probe.Set.ID", const.ROW_ID),
("Probe.Set.ID", const.AFFY_PROBESET_ID),
("Description", const.GENE_DESCRIPTION),
("LocusLink", const.GENE_ID),
("Gene.Symbol", const.GENE_SYMBOL),
]
def is_format(locator_str, hrows=None, hcols=None):
from genomicode import filelib
import util
if hrows not in [None, 1]:
return False
if hcols not in [None, 4]:
return False
if not filelib.exists(locator_str):
# This will only work if locator_str is a string.
return False
# Read 5 lines and check the headers. If the file is small, this
# may contain fewer than 5 lines.
handle = filelib.openfh(locator_str)
lines = [handle.readline() for i in range(5)]
handle.close() # need to close it properly, or gunzip might not die.
lines = [x for x in lines if x]
matrix = [line.rstrip("\r\n").split("\t") for line in lines]
# Make sure there's at least 1 line.
if not matrix:
return False
header = matrix[0]
if header[:len(ROW_HEADERS)] != ROW_HEADERS:
return False
# Check if there's extraneous stuff.
nr, nc = util.num_headers(matrix)
if nc > 4:
return False
return True
#handle = filelib.openfh(locator_str)
#x = handle.readline()
#handle.close() # need to close it properly, or gunzip might not die.
#row = x.rstrip("\r\n").split("\t")
#if row[:len(ROW_HEADERS)] == ROW_HEADERS:
# return True
#return False
def is_matrix(X):
#import tab_delimited_format as tdf
if not hasattr(X, "row_names") or not hasattr(X, "col_names"):
return False
# Should only include SAMPLE_NAME.
if len(X.col_names()) != 1:
return False
if len(X.row_names()) != len(ROW_HEADERS):
return False
for header in X.row_names():
if header not in ROW_HEADERS:
return False
return True
def read(handle, hrows=None, hcols=None, datatype=float):
import tab_delimited_format as tdf
assert hrows is None or hrows == 1
assert hcols is None or hcols == 4
X = tdf.read(handle, hrows=1, hcols=4, datatype=datatype)
synonyms = {}
for myname, stdname in MYNAME_TO_STDNAME:
synonyms[stdname] = myname
#X = Matrix.add_synonyms(X, synonyms)
X._synonyms.update(synonyms)
assert is_matrix(X)
return X
def write(X, handle):
import tab_delimited_format
assert is_matrix(X)
tab_delimited_format.write(X, handle)
```
#### File: changlab/arrayio/tab_delimited_format.py
```python
SAMPLE_NAME = "_SAMPLE_NAME"
def is_format(locator_str, hrows=None, hcols=None):
from genomicode import filelib
if not filelib.exists(locator_str):
return False
# Read 5 lines and check the headers. If the file is small, this
# may contain fewer than 5 lines.
handle = filelib.openfh(locator_str)
lines = [handle.readline() for i in range(5)]
handle.close() # need to close it properly, or gunzip might not die.
lines = [x for x in lines if x]
matrix = [line.rstrip("\r\n").split("\t") for line in lines]
matrix = _clean_tdf(matrix)
# Make sure there's at least 1 line.
if not matrix:
return False
# All rows should contain at least one column.
for x in matrix:
if not x:
return False
# All rows should contain the same number of columns.
for x in matrix:
if len(x) != len(matrix[0]):
return False
return True
def is_matrix(X):
# Any matrix can be a tab-delimited format.
return True
def _clean_tdf(matrix):
# Return a cleaned up matrix.
from genomicode import jmath
# Sometimes people insert blank rows or columns inside the matrix.
# Remove all of those.
# Delete blank rows.
matrix = [x for x in matrix if x]
# The first line can contain one fewer column than the rest of the
# matrix for two reasons.
# 1. R writes col names that contain one fewer column than the
# rest of the matrix, if row names are also requested.
# 2. For GEO data sets, each non-header row ends with a '\t', so
# they have an extra blank column.
# Detect if either of these are the case. If Case #1, then add a
# dummy header. If Case #2, then delete the extra blank columns.
all_one_fewer = True
for i in range(1, len(matrix)):
if len(matrix[i]) != len(matrix[0])+1:
all_one_fewer = False
break
last_column_blank = True
for i in range(1, len(matrix)):
if matrix[i][-1] != "":
last_column_blank = False
break
# This can happen either if the length of the first row is one
# less than every other row of the matrix, or if matrix has
# only 1 row.
if all_one_fewer and len(matrix) > 1:
if last_column_blank:
for i in range(1, len(matrix)):
matrix[i] = matrix[i][:-1]
else:
matrix[0].insert("ROW_NAMES", 0)
## # Make sure each line has the same number of columns.
## for i, x in enumerate(matrix):
## f = ""
## #if filename:
## # f = " [%s]" % filename
## error_msg = "Header%s has %d columns but line %d has %d." % (
## f, num_cols, i + 1, nc)
## assert nc == num_cols, error_msg
# Matlab appends blank columns to the end. Delete columns that
# are completely blank.
# DEBUG.
#handle = open("/home/jchang/debug.txt", 'w')
#for x in data:
# print >>handle, "\t".join(map(str, x))
#handle.close()
i = 0
while matrix and matrix[0] and i < len(matrix[0]):
for row in matrix:
# This row is too short, or there's data in this column.
# Ignore it.
if i >= len(row) or row[i]:
i += 1
break
else:
[x.pop(i) for x in matrix]
# Sometimes, a user might cluster a matrix with an empty column
# using Cluster 3.0. In this case, Cluster 3.0 will preserve the
# empty column, except for a "1.000000" for the EWEIGHT row
# header. Try to detect this case and remove the "1.000000".
last_col = [x[-1] for x in matrix]
#non_empty = [x for x in last_col if x.strip()]
non_empty = [x for x in last_col if x]
value = None
if len(non_empty) == 1:
value = non_empty[0]
try:
value = float(value)
except ValueError, x:
pass
if value is not None and \
type(value) is type(0.0) and abs(value - 1.00) < 1E-10:
for i in range(len(matrix)):
matrix[i][-1] = ""
# Strip whitespace.
for i in range(len(matrix)):
for j in range(len(matrix[i])):
matrix[i][j] = matrix[i][j].strip()
return matrix
def read(handle, hrows=None, hcols=None, datatype=float):
import math
from genomicode import filelib
from genomicode import Matrix
from genomicode import jmath
from genomicode import iolib
import util
import const
# Format:
# - gene x experiment
# - optional header row
# - optional rows of sample annotations (requires header row)
# - optional columns of gene annotations
filename = None
if type(handle) is type(""):
filename = handle
handle = filelib.openfh(handle)
data = filelib.read_all_cols(handle)
#data = [x for x in filelib.read_cols(handle)]
#x = handle.read()
#data = iolib.split_tdf(x, strip=True)
#handle = filelib.read_cols(handle)
#data = [handle.next() for i in range(100)]
data = _clean_tdf(data)
num_cols = len(data[0])
for i, x in enumerate(data):
nc = len(data[i])
f = ""
if filename:
f = " [%s]" % filename
error_msg = "Header%s has %d columns but line %d has %d." % (
f, num_cols, i + 1, nc)
assert nc == num_cols, error_msg
if not data:
return Matrix.InMemoryMatrix([])
# If the rows and cols not explicitly specified, then try to guess
# them from the file.
#print "HEADERS 1", hrows, hcols
if hrows is None or hcols is None:
hr, hc = util.num_headers(data)
if hrows is None:
hrows = hr
if hcols is None:
hcols = hc
#print "HEADERS 2", hrows, hcols
#num_genes, num_arrays = num_rows-hrows, num_cols-hcols
# Pull out the row names from the columns.
row_names = {} # header -> list of names (1 for each gene)
row_order = [] # in-order list of the headers
if hcols:
if hrows:
# If a header row is provided, then the names of these
# annotations are provided in the header.
row_order = data[0][:hcols]
else:
# No header row. Make default name for these annotations.
ndigits = int(math.ceil(math.log(hcols, 10)))
row_order = ["ANNOT%*d" % (ndigits, i + 1) for i in range(hcols)]
# Strip extraneous whitespace from the header names.
# Not necessary. Handled now in split_tdf.
#row_order = [x.strip() for x in row_order]
# Sometimes the format detection can go wrong and a GCT file
# will slip through to here. If this occurs, a "duplicate
# header" exception will be generated. Check for this and
# generate a more meaningful error message.
if(row_order[0] == "#1.2" and len(row_order) > 1 and
row_order[1] == "" and row_order[-1] == ""):
raise AssertionError("ERROR: It looks like a GCT file was missed.")
for i, header in enumerate(row_order):
names = [x[i] for x in data[hrows:]]
assert header not in row_names, "duplicate header: %s" % header
row_names[header] = names
# Pull out the column names.
col_names = {} # header -> list of names (1 for each array)
col_order = []
if hrows:
for i in range(1, hrows):
header = data[i][0]
names = data[i][hcols:]
assert header not in col_names, "duplicate name: %s" % header
# Strip extraneous whitespace from the header names.
# Not necessary. Handled now in split_tdf.
#header = header.strip()
col_order.append(header)
col_names[header] = names
# Now extract the expression values.
matrix = data
if hrows or hcols:
matrix = [x[hcols:] for x in matrix[hrows:]]
# Pull out the sample names.
sample_names = None
if hrows:
# If a header is provided, then use these as the column names.
sample_names = data[0][hcols:]
if sample_names:
col_names[SAMPLE_NAME] = sample_names
col_order.insert(0, SAMPLE_NAME)
if datatype is None:
convert_fn = None # no conversion
elif datatype is int:
convert_fn = jmath.safe_int
elif datatype is float:
convert_fn = jmath.safe_float
else:
# Assume that I was passed a function.
convert_fn = datatype
if convert_fn == jmath.safe_float:
# Try and convert to an integer instead.
is_int = True
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if not jmath.is_int(matrix[i][j]):
is_int = False
break
if not is_int:
break
if is_int:
convert_fn = jmath.safe_int
if convert_fn:
check_each_row = False
try:
matrix = [map(convert_fn, x) for x in matrix]
except ValueError, err1:
if str(err1) == "empty string for float()":
check_each_row = True
elif str(err1).startswith("invalid literal for float()"):
check_each_row = True
elif str(err1).startswith("could not convert string to float"):
check_each_row = True
else:
raise
if check_each_row:
# If there was an exception, then check each row carefully
# to try to pinpoint the problem.
for i, x in enumerate(matrix):
try:
map(convert_fn, x)
except ValueError, err2:
row = data[hrows + i]
raise ValueError("%s\nProblem with row %d: %s" % (
str(err2), i + 1, row))
raise AssertionError("Error converting values.")
# Set ROW_ID and COL_ID to reasonable defaults.
synonyms = {}
if SAMPLE_NAME in col_names:
synonyms[const.COL_ID] = SAMPLE_NAME
if row_order:
# Bug: This should be the first column with unique values.
synonyms[const.ROW_ID] = row_order[0]
X = Matrix.InMemoryMatrix(
matrix, row_names=row_names, col_names=col_names,
row_order=row_order, col_order=col_order, synonyms=synonyms)
#X = Matrix.add_synonyms(X, synonyms)
return X
CLEAN_RE = None
CLEAN_DISALLOWED = None
def _clean(s, disallowed=None):
# Make sure there are no disallowed characters in the string s.
global CLEAN_RE
global CLEAN_DISALLOWED
import re
disallowed = disallowed or "\r\n\t"
if CLEAN_RE is None or CLEAN_DISALLOWED != disallowed:
CLEAN_RE = re.compile("[%s]" % disallowed)
CLEAN_DISALLOWED = disallowed
s, count = CLEAN_RE.subn(" ", s)
s = s.strip()
return s
def _clean_many(l, disallowed=None):
l = [_clean(x, disallowed=disallowed) for x in l]
return l
def write(X, handle):
from genomicode import iolib
assert is_matrix(X)
if type(handle) is type(""):
handle = open(handle, 'w')
row_names = X.row_names()
col_names = X.col_names()
M_out = [] # Matrix to write out.
# Print out the header row if there are row headers or sample
# names.
header = []
if SAMPLE_NAME in col_names:
header = X.col_names(SAMPLE_NAME)
if row_names:
header = row_names + header
if header:
M_out.append(header)
#if row_names:
# header = row_names
# if SAMPLE_NAME in col_names:
# header = header + X.col_names(SAMPLE_NAME)
# M_out.append(header)
# #header = _clean_many(header)
# #print >>handle, "\t".join(header)
# Print out the column annotations.
for header in col_names:
if header == SAMPLE_NAME:
continue
# If there are no row_names, then there is no room for column
# names. Skip it.
x = X.col_names(header)
if row_names:
x = [header] + [""] * (len(row_names) - 1) + x
M_out.append(x)
#x = _clean_many(map(str, x))
#print >>handle, "\t".join(x)
# Print out the row ids and data.
nrow, ncol = X.dim()
M = X.slice()
header2rownames = {}
for header in row_names:
header2rownames[header] = X.row_names(header)
for i in range(nrow):
#names = [X.row_names(header)[i] for header in row_names]
names = [header2rownames[header][i] for header in row_names]
# M[i] might be tuples.
values = list(M[i])
x = names + values
M_out.append(x)
iolib.cleanwrite(M_out, handle)
```
#### File: Betsy/attic/bie3.160122.py
```python
TYPE_ATOM = 100
TYPE_ENUM = 101
# Constraints
MUST_BE = 200
CAN_BE_ANY_OF = 201
SAME_AS = 202
# Consequences
SET_TO = 300
SET_TO_ONE_OF = 301
BASED_ON_DATA = 302
SAME_AS_CONSTRAINT = 303
# CONSTRAINT
# behavior arg1 input_index
# MUST_BE value <optional> (set to 0 by default)
# CAN_BE_ANY_OF list of values <optional> (set to 0 by default)
# SAME_AS index of datatype index of datatype
#
# input_index is the index of the DataType that this constraint
# applies to. So for SAME_AS, that means data[input_index] should get
# its value from data[arg1].
# CONSEQUENCE
# behavior arg1 arg2
# SET_TO <string> None
# SET_TO_ONE_OF <list> None
# BASED_ON_DATA <list> None
# SAME_AS_CONSTRAINT index of input data None
# o arg2 is not used? Can get rid of it.
CONST2STR = {
TYPE_ATOM: "TYPE_ATOM",
TYPE_ENUM: "TYPE_ENUM",
MUST_BE: "MUST_BE",
CAN_BE_ANY_OF: "CAN_BE_ANY_OF",
SAME_AS: "SAME_AS",
SET_TO: "SET_TO",
SET_TO_ONE_OF: "SET_TO_ONE_OF",
BASED_ON_DATA: "BASED_ON_DATA",
SAME_AS_CONSTRAINT: "SAME_AS_CONSTRAINT",
}
DEBUG = False
#DEBUG = True
# When backchaining, should we allow the attributes of the input data
# to be all possible values, or fix it to the default? All possible
# values is correct, but generates a combinatorial explosion that is
# difficult to manage.
DEFAULT_INPUT_ATTRIBUTE_IS_ALL_VALUES = False
#DEFAULT_INPUT_ATTRIBUTE_IS_ALL_VALUES = True
MAX_NETWORK_SIZE = 1024 * 8
class AttributeDef:
def __init__(self, name, values, default_in, default_out, help=None):
# Make sure name and values are valid.
assert type(name) is type("")
assert type(values) is type([]), "Value must be list: %s" % \
type(values)
for x in values:
assert type(x) is type("")
# Make sure no duplicated values.
seen = {}
for x in values:
assert x not in seen, "Duplicated value (%s) in %s." % (x, name)
seen[x] = 1
# Make sure default_in and default_out are valid values.
assert type(default_in) is type(""), "default_in must be ATOM"
assert type(default_out) is type(""), "default_out must be ATOM"
assert default_in in values, \
"Invalid value %r for attribute %r." % (default_in, name)
assert default_out in values, \
"Invalid value %r for attribute %r." % (default_out, name)
self.name = name
self.values = values
self.default_in = default_in
self.default_out = default_out
self.help = help
def is_valid_value(self, value):
if type(value) is type(""):
return value in self.values
elif type(value) is type([]):
return _is_subset(value, self.values)
raise AssertionError
def __cmp__(self, other):
if not isinstance(other, AttributeDef):
return cmp(id(self), id(other))
x1 = [self.name, self.values, self.default_in, self.default_out,
self.help]
x2 = [other.name, other.values, other.default_in, other.default_out,
self.help]
return cmp(x1, x2)
def __hash__(self):
x = self.name, tuple(self.values), self.default_in, self.default_out, \
self.help
return hash(x)
def __str__(self):
return self.__repr__()
def __repr__(self):
x = [repr(self.name),
repr(self.values),
repr(self.default_in),
repr(self.default_out), ]
if self.help is not None:
x.append("help=%r" % self.help)
x = "%s(%s)" % (self.__class__.__name__, ", ".join(x))
return x
@staticmethod
def __init_from_dict(args):
#inst = AttributeDef(**args)
inst = AttributeDef(
args["name"], args["values"], args["default_in"],
args["default_out"], help=args.get("help"))
return inst
class Attribute:
def __init__(self, datatype, name, value):
assert isinstance(datatype, DataType)
assert type(name) is type("")
assert type(value) is type("")
# Check if this is a valid attribute name for the datatype.
x = [x for x in datatype.attribute_defs if x == name]
#x = [x for x in datatype.attribute_defs if x.name == name]
assert len(x) == 1, "datatype %r does not have attribute %r." % (
datatype.name, name)
#attr = x[0]
assert datatype.is_valid_attribute_value(name, value), \
"Invalid value %r for attribute %r." % (value, name)
self.datatype = datatype
self.name = name
self.value = value
def __str__(self):
return self.__repr__()
def __repr__(self):
x = [self.datatype.name, repr(self.name), repr(self.value)]
x = "%s(%s)" % (self.__class__.__name__, ", ".join(x))
return x
class OptionDef:
def __init__(self, name, default=None, help=None):
assert type(name) is type("")
self.name = name
self.default = default
self.help = help
def __cmp__(self, other):
if not isinstance(other, OptionDef):
return cmp(id(self), id(other))
x1 = [self.name, self.default, self.help]
x2 = [other.name, other.default, self.help]
return cmp(x1, x2)
def __hash__(self):
x = self.name, self.default, self.help
return hash(x)
def __str__(self):
return self.__repr__()
def __repr__(self):
x = [repr(self.name), ]
if self.default is not None:
x.append("default=%r" % self.default)
if self.help is not None:
x.append("help=%r" % self.help)
x = "%s(%s)" % (self.__class__.__name__, ", ".join(x))
return x
@staticmethod
def __init_from_dict(args):
assert 'name' in args
assert 'default' in args
assert 'help' in args
inst = OptionDef(
args['name'], default=args['default'], help=args['help'])
return inst
class Option:
def __init__(self, module, name, value):
assert isinstance(module, ModuleNode)
assert type(name) is type("")
assert type(value) is type("")
self.module = module
self.name = name
self.value = value
class Constraint(object):
def __init__(self, name, behavior, arg1=None, input_index=None):
if behavior == MUST_BE:
# name Name of attribute.
# arg1 Value of the attribute.
assert type(arg1) is type("")
elif behavior == CAN_BE_ANY_OF:
# name Name of attribute.
# arg1 List of values of the attribute.
assert type(arg1) in [type([]), type(())]
for x in arg1:
assert type(x) is type("")
elif behavior == SAME_AS:
# name Name of attribute.
# arg1 Index of the datatype that this must match.
assert type(arg1) is type(0), (
"arg1 should be the index of the datatype with the "
"same attribute")
assert input_index is not None, (
"input_index must be given for SAME_AS constraint"
)
assert type(input_index) is type(0)
if input_index is not None:
assert arg1 != input_index
else:
raise AssertionError, "Invalid behavior (%s) for constraint %s." %\
(behavior, name)
assert input_index is None or type(input_index) is type(0)
if behavior == CAN_BE_ANY_OF and len(arg1) == 1:
behavior = MUST_BE
arg1 = arg1[0]
self.name = name
self.behavior = behavior
self.arg1 = arg1
self.input_index = input_index or 0
def __cmp__(self, other):
if not isinstance(other, Constraint):
return cmp(id(self), id(other))
x1 = [self.name, self.behavior, self.arg1, self.input_index]
x2 = [other.name, other.behavior, other.arg1, other.input_index]
return cmp(x1, x2)
def __str__(self):
return self.__repr__()
def __repr__(self):
x = [repr(self.name), CONST2STR[self.behavior], ]
if self.arg1 is not None:
x.append(repr(self.arg1))
if self.input_index is not None:
x = x + ["input_index=%s" % self.input_index]
x = "%s(%s)" % (self.__class__.__name__, ", ".join(x))
return x
@staticmethod
def __init_from_dict(args):
assert 'name' in args
assert 'behavior' in args
assert 'arg1' in args
assert 'input_index' in args
inst = Constraint(
args['name'], args['behavior'], arg1=args['arg1'],
input_index=args['input_index'])
return inst
class Consequence(object):
def __init__(self, name, behavior,
arg1=None,
arg2=None,
side_effect=False):
if behavior == SET_TO:
assert type(arg1) is type("")
assert arg2 is None
elif behavior in [SET_TO_ONE_OF, BASED_ON_DATA]:
assert type(arg1) in [type([]), type(())], "arg should be list"
for x in arg1:
assert type(x) is type("")
assert arg2 is None
elif behavior == SAME_AS_CONSTRAINT:
if arg1 is None: # default to datatype 0.
arg1 = 0
assert type(arg1) is type(0), (
"Argument to SAME_AS_CONSTRAINT should be the index "
"of the input variable.")
assert arg2 is None
else:
raise AssertionError, "Invalid consequence: %s" % behavior
self.name = name
self.behavior = behavior
self.arg1 = arg1
self.arg2 = arg2
self.side_effect = side_effect
def __cmp__(self, other):
if not isinstance(other, Consequence):
return cmp(id(self), id(other))
x1 = [self.name, self.behavior, self.arg1, self.arg2, self.side_effect]
x2 = [other.name, other.behavior, other.arg1, other.arg2,
other.side_effect]
return cmp(x1, x2)
def __str__(self):
return self.__repr__()
def __repr__(self):
x = [repr(self.name), CONST2STR[self.behavior], ]
if self.arg1 is not None:
x.append(repr(self.arg1))
if self.arg2 is not None:
assert self.arg1 is not None
x.append(repr(self.arg2))
if self.side_effect:
x = x + ["side_effect=True"]
x = "%s(%s)" % (self.__class__.__name__, ", ".join(x))
return x
@staticmethod
def __init_from_dict(args):
assert 'name' in args
assert 'behavior' in args
assert 'arg1' in args
assert 'arg2' in args
assert 'side_effect' in args
inst = Consequence(
args['name'], args['behavior'],
arg1=args['arg1'],
arg2=args['arg2'],
side_effect=args['side_effect'])
return inst
class DefaultAttributesFrom(object):
def __init__(self, input_index):
assert type(input_index) is type(0)
self.input_index = input_index
def __cmp__(self, other):
if not isinstance(other, DefaultAttributesFrom):
return cmp(id(self), id(other))
x1 = [self.input_index]
x2 = [other.input_index]
return cmp(x1, x2)
def __str__(self):
return self.__repr__()
def __repr__(self):
x = [str(self.input_index), ]
x = "%s(%s)" % (self.__class__.__name__, ", ".join(x))
return x
@staticmethod
def __init_from_dict(args):
assert 'input_index' in args
inst = DefaultAttributesFrom(args['input_index'])
return inst
class DataType:
def __init__(self, name, *attribute_defs, **keywds):
for x in attribute_defs:
assert isinstance(x, AttributeDef), repr(x)
for x in keywds:
assert x in ["help"]
# Optimizations:
# 1. Save attribute_defs as a dictionary for fast lookups.
# 2. Pre-hash objects for fast comparisons.
attr_defs_dict = {} # name -> AttributeDef
for adef in attribute_defs:
assert adef.name not in attr_defs_dict, \
"Multiple attributes named %s" % adef.name
attr_defs_dict[adef.name] = adef
x = [attr_defs_dict[x] for x in sorted(attr_defs_dict)]
attr_defs_tuple = tuple(x)
self.name = name
#self.attribute_defs = attribute_defs # AttributeDef
self.attribute_defs = attr_defs_dict
self._attribute_names = sorted(attr_defs_dict) # optimize
self.help = keywds.get("help")
self.hash_ = hash((name, hash(attr_defs_tuple), self.help))
def get_attribute_def(self, name):
#x = [x for x in self.attribute_defs if x.name == name]
#assert len(x) > 0, "DataType %s has no attribute %s." % (
# repr(self.name), repr(name))
#assert len(x) == 1, "Multiple attributes with same name?"
#return x[0]
if name not in self.attribute_defs:
raise KeyError, "DataType %s has no attribute %s." % (
repr(self.name), repr(name))
return self.attribute_defs[name]
def get_attribute_names(self):
#return [x.name for x in self.attribute_defs]
#return sorted(self.attribute_defs)
return self._attribute_names
def is_valid_attribute_name(self, name):
return name in self.get_attribute_names()
def is_valid_attribute_value(self, name, value):
attr = self.get_attribute_def(name)
return attr.is_valid_value(value)
def assert_valid_attribute_dict(self, attr_dict):
# attr_dict is dictionary of name -> value. Check if
# everything in this dictionary is valid.
# This function is called frequently, so inline many of the
# function calls for speed.
x = self.get_attribute_names()
all_names = {}.fromkeys(x)
for name, value in attr_dict.iteritems():
assert name in all_names, \
"'%s' is not a known attribute for datatype %s." % (
name, self.name)
if name not in self.attribute_defs:
raise KeyError, "DataType %s has no attribute %s." % (
repr(self.name), repr(name))
#attr = self.attribute_defs[name]
# Optimization for:
#assert attr.is_valid_value(value), \
# "In a %s, '%s' is not a valid value for '%s'." % (
# self.name, value, name)
# Makes code more complicated for a minor speedup.
#attr_values_dict = attr.values_dict
#is_valid = True
#if type(value) is type(""):
# is_valid = value in attr_values_dict
#elif type(value) is type([]):
# for x in value:
# if x not in attr_values_dict:
# is_valid = False
# break
#assert is_valid, \
# "In a %s, '%s' is not a valid value for '%s'." % (
# self.name, value, name)
def __cmp__(self, other):
if not isinstance(other, DataType):
return cmp(id(self), id(other))
# Bug: should compare attributes without regard to order.
#x1 = [self.name, self.attribute_defs, self.help]
#x2 = [other.name, other.attribute_defs, other.help]
#return cmp(x1, x2)
return cmp(self.hash_, other.hash_)
def __hash__(self):
#x = self.name, tuple(self.attribute_defs), self.help
#return hash(x)
return self.hash_
def _resolve_attributes(self, attribute_dict, is_input):
# Make a dictionary of all the attributes. The values given
# by the caller take precedence. Anything else should be set
# to the default attributes.
attrdict = {}
# Priority 1: Set to the attribute dict.
for (name, value) in attribute_dict.iteritems():
attrdict[name] = value
# Priority 2: Set to default attributes.
for attr in self.attribute_defs.itervalues():
if attr.name in attrdict:
continue
value = attr.default_in
if not is_input:
value = attr.default_out
attrdict[attr.name] = value
return attrdict
def input(self, **attribute_dict):
# Create a DataNode object.
attrdict = self._resolve_attributes(attribute_dict, True)
# Don't bother checking the attributes here. The DataNode
# object will do that.
return DataNode(self, **attrdict)
def output(self, **attribute_dict):
attrdict = self._resolve_attributes(attribute_dict, False)
return DataNode(self, **attrdict)
def __str__(self):
return self.__repr__()
def __repr__(self):
x = [self.name]
x += [repr(x) for x in self.attribute_defs.itervalues()]
if self.help:
x.append("help=%r" % self.help)
return "DataType(%s)" % ", ".join(x)
@staticmethod
def __init_from_dict(args):
assert 'name' in args
assert 'attribute_defs' in args
assert 'help' in args
#inst = DataType(
# args['name'], *args['attribute_defs'], help=args['help'])
dictionary = args['attribute_defs']
attributes = []
for i in dictionary:
attributes.append(dictionary[i])
inst = DataType(args['name'], *attributes, **{"help" : args['help']})
return inst
class DataNode(object):
# Members:
# datatype Datatype object.
# attributes Dict of attribute name -> value or list of values
# Should not be called by the user. Should always be created from
# a DataType object.
def __init__(self, datatype, **keywds):
# keywds is a dictionary of attribute name ->
# value (or list of values).
# Make sure values are provided for every attribute.
attr_names = datatype.get_attribute_names()
for name in attr_names:
assert name in keywds, "No value given for %s." % name
# Make sure the values of the attributes are legal.
#for name, value in keywds.iteritems():
# assert datatype.is_valid_attribute_name(name), \
# "'%s' is not a known attribute for datatype %s." % (
# name, datatype.name)
# assert datatype.is_valid_attribute_value(name, value), \
# "In a %s, '%s' is not a valid value for '%s'." % (
# datatype.name, value, name)
datatype.assert_valid_attribute_dict(keywds)
self.datatype = datatype
self.attributes = keywds.copy()
def __cmp__(self, other):
if not isinstance(other, DataNode):
return cmp(id(self), id(other))
x1 = [self.datatype, self.attributes]
x2 = [other.datatype, other.attributes]
return cmp(x1, x2)
def __str__(self):
return self.__repr__()
def __repr__(self):
#keywds = self.attributes.copy()
x = [self.datatype.name,
#_pretty_attributes(keywds),
_pretty_attributes(self.attributes), ]
x = [x for x in x if x]
return "DataNode(%s)" % ", ".join(x)
@staticmethod
def __init_from_dict(args):
assert 'datatype' in args
assert 'attributes' in args
inst = DataNode(args['datatype'], **args['attributes'])
return inst
class ModuleNode:
def __init__(self, name, in_datatypes, out_datatype, *params, **keywds):
# params is a list of Constraint, Consequence, and OptionDef.
# objects.
assert type(name) is type("")
for k in keywds:
assert k in ["help"]
# The caller can provide either a single DataType object or a
# list of DataType objects. Make sure it is always a list of
# objects. If it is a single object, make it a list.
assert type(in_datatypes) is type([]) or \
isinstance(in_datatypes, DataType)
if type(in_datatypes) != type([]):
in_datatypes = [in_datatypes]
for x in in_datatypes:
assert isinstance(x, DataType)
assert isinstance(out_datatype, DataType)
# Separate the param objects.
constraints = []
consequences = []
option_defs = [] # OptionDef
default_attributes_from = []
for x in params:
if isinstance(x, Constraint):
constraints.append(x)
elif isinstance(x, Consequence):
consequences.append(x)
elif isinstance(x, OptionDef):
option_defs.append(x)
elif isinstance(x, DefaultAttributesFrom):
default_attributes_from.append(x)
else:
print type(x)
raise AssertionError, "invalid parameter: %s" % repr(x)
# Convenience hack: If the module doesn't convert the data
# type, and no DefaultAttributesFrom is given, then set the
# default_attributes_from.
if not default_attributes_from and \
len(in_datatypes) == 1 and in_datatypes[0] == out_datatype:
default_attributes_from = [DefaultAttributesFrom(0)]
# Another hack: If no DefaultAttributesFrom is given, and only
# one of the in_datatypes is the same as the out_datatype,
# then set the default_attributes_from.
if not default_attributes_from and len(in_datatypes) > 1:
i = [i for i in range(len(in_datatypes))
if in_datatypes[i] == out_datatype]
if len(i) == 1:
i = i[0]
default_attributes_from = [DefaultAttributesFrom(i)]
# Check default_attributes_from. Should be a list of
# DefaultAttributesFrom objects.
assert len(default_attributes_from) <= len(in_datatypes)
seen = {} # make sure no duplicates
for daf in default_attributes_from:
assert daf.input_index < len(in_datatypes)
assert out_datatype.name == in_datatypes[daf.input_index].name
assert daf.input_index not in seen
seen[daf.input_index] = 1
# default_attributes_from can be an empty list if the
# attributes of the output object should come from none of the
# input data types. Probably the most common case if the
# module converts the data type.
#assert len(default_attributes_from) <= 1
#x = None
#if default_attributes_from:
# x = default_attributes_from[0]
#default_attributes_from = x
#if default_attributes_from:
# assert len(in_datatypes) > 1
# assert default_attributes_from.input_index < len(in_datatypes)
# assert out_datatype == \
# in_datatypes[default_attributes_from.input_index]
# Any checking necessary on Option?
self.name = name
self.in_datatypes = in_datatypes
self.out_datatype = out_datatype
self.constraints = constraints
self.consequences = consequences
self.default_attributes_from = default_attributes_from
self.option_defs = option_defs
self.help = keywds.get("help")
# To optimize __cmp__.
self.hash_ = hash((
name, tuple(in_datatypes), out_datatype,
tuple(constraints), tuple(consequences),
tuple(default_attributes_from), tuple(option_defs), self.help))
for x in constraints:
self._assert_constraint(name, in_datatypes, out_datatype,
constraints, consequences, x)
for x in consequences:
self._assert_consequence(name, in_datatypes, out_datatype,
constraints, x)
def _assert_constraint(self, name, in_datatypes, out_datatype, constraints,
consequences, constraint):
# Get the input datatype that this constraint refers to.
i = constraint.input_index
assert i < len(in_datatypes), \
"Invalid constraint index %d in module %s" % (i, name)
in_datatype = in_datatypes[i]
assert constraint.behavior in [MUST_BE, CAN_BE_ANY_OF, SAME_AS]
if constraint.behavior in [MUST_BE, CAN_BE_ANY_OF]:
assert in_datatype.is_valid_attribute_value(
constraint.name,
constraint.arg1), ("%r: Invalid value %r for attribute %r." %
(name, constraint.arg1, constraint.name))
elif constraint.behavior == SAME_AS:
# Make sure the datatype has this attribute.
dt = in_datatypes[constraint.arg1]
assert dt.is_valid_attribute_name(constraint.name)
# Make sure value can be resolved.
assert len(in_datatypes) > 1, (
"%r: SAME_AS constraint requires at least two input "
"datatypes." % name)
const = _resolve_constraint(constraint, constraints)
assert const.behavior in [MUST_BE, CAN_BE_ANY_OF]
#assert constraint.arg1 < len(in_datatypes)
#assert constraint.arg1 != constraint.input_index
## Make sure there is a MUST_BE or CAN_BE_ANY_OF constraint
## on constraint.arg1.
#x = constraints
#x = [x for x in x if x.name == constraint.name]
#x = [x for x in x if x.input_index == constraint.arg1]
#assert len(x) > 0, (
# "%r: %r SAME_AS %d, but datatype %d has no constraint on %r."%
# (name, constraint.name, constraint.arg1, constraint.arg1,
# constraint.name))
#assert len(x) == 1
#x = x[0]
#assert x.behavior in [MUST_BE, CAN_BE_ANY_OF]
else:
raise NotImplementedError
# For every constraint, there must be a consequent given.
# Need to specify what the module does with the variable.
if in_datatype.name == out_datatype.name:
x = [x for x in consequences if x.name == constraint.name]
assert x, "%r: constraint but no consequence for %r." % (
name, constraint.name)
def _assert_consequence(self, name, in_datatypes, out_datatype,
constraints, consequence):
import itertools
assert consequence.name in out_datatype.get_attribute_names(), \
"ModuleNode %r refers to an unknown attribute %r." % (
self.name, consequence.name)
if consequence.behavior in [SET_TO, SET_TO_ONE_OF, BASED_ON_DATA]:
assert out_datatype.is_valid_attribute_value(
consequence.name, consequence.arg1), \
"'%s' is not a valid value for '%s' in module '%s'" % (
consequence.arg1, consequence.name, name)
elif consequence.behavior == SAME_AS_CONSTRAINT:
# Make sure index on input variable is reasonable.
index = consequence.arg1 # index of input variable
assert index < len(in_datatypes), \
"Invalid input index (%s) for module %s:%s." % (
index, name, consequence.name)
in_datatype = in_datatypes[index]
# Make sure there is a valid constraint.
x = constraints
#x = [x for x in constraints
# if x.behavior in [MUST_BE, CAN_BE_ANY_OF, SAME_AS]]
x = [x for x in x if x.input_index == index]
x = [x for x in x if x.name == consequence.name]
assert len(x) > 0, (
"%r: I could not find a constraint on %r for input "
"datatype %d." % (name, consequence.name, index))
assert len(x) == 1
cons = x[0]
# Make sure the values of this constraint are allowed in
# the input and output datatypes. The values of the
# consequent should be a subset of the values of the
# constraint.
if cons.behavior in [MUST_BE, CAN_BE_ANY_OF]:
in_attr = in_datatype.get_attribute_def(consequence.name)
out_attr = out_datatype.get_attribute_def(consequence.name)
assert in_attr.is_valid_value(cons.arg1), \
"Invalid value for %s (%s) in module %s" % (
in_attr.name, cons.arg1, self.name)
assert out_attr.is_valid_value(cons.arg1), \
"Invalid value for %s (%s) in module %s" % (
out_attr.name, cons.arg1, self.name)
else:
raise AssertionError, consequence.behavior
# Make sure a MUST_BE constraint and SET_TO constraint aren't
# the same value. If so, this will confuse the backchainer
# into thinking this is the objective of the module, even
# if it isn't.
if consequence.behavior == SET_TO:
for x in itertools.product(in_datatypes, constraints):
in_datatype, constraint = x
if in_datatype.name != out_datatype.name:
continue
if consequence.name != constraint.name:
continue
if constraint.behavior != MUST_BE:
continue
assert constraint.arg1 != consequence.arg1, \
"MUST_BE and SET_TO should not be set to the " \
"same value in module %s (%s)." % (
self.name, constraint.name)
def __hash__(self):
return self.hash_
def __cmp__(self, other):
if not isinstance(other, ModuleNode):
return cmp(id(self), id(other))
#x1 = [self.name, self.in_datatypes, self.out_datatype,
# self.constraints, self.consequences,
# self.default_attributes_from, self.option_defs, self.help]
#x2 = [other.name, other.in_datatypes, other.out_datatype,
# other.constraints, other.consequences,
# other.default_attributes_from, other.option_defs, other.help]
#return cmp(x1, x2)
return cmp(self.hash_, other.hash_)
def __str__(self):
return self.__repr__()
def __repr__(self):
x1 = repr(self.name)
if len(self.in_datatypes) == 1:
x2 = self.in_datatypes[0].name
else:
x2 = [x.name for x in self.in_datatypes]
x2 = "[%s]" % ", ".join(x2)
x3 = self.out_datatype.name
x4 = [repr(x) for x in self.constraints]
x5 = [repr(x) for x in self.consequences]
x6 = [repr(x) for x in self.default_attributes_from]
x7 = [repr(x) for x in self.option_defs]
x = [x1, x2, x3] + x4 + x5 + x6 + x7
if self.help is not None:
x.append("help=%r" % self.help)
x = "%s(%s)" % (self.__class__.__name__, ", ".join(x))
return x
@staticmethod
def __init_from_dict(args):
assert 'name' in args
assert 'in_datatypes' in args
assert 'out_datatype' in args
assert 'consequences' in args
assert 'constraints' in args
assert 'option_defs' in args
assert 'default_attributes_from' in args
assert 'help' in args
name = args['name']
in_datatypes = args['in_datatypes']
out_datatype = args['out_datatype']
help_ = args['help']
#params = args.copy()
#del params['name']
#del params['in_datatypes']
#del params['out_datatype']
#del params['help']
#params = (params['consequences']+params['constraints']+
# params['user_inputs']+params['default_attributes_from'])
params = (args['consequences'] + args['constraints'] +
args['option_defs'] + args['default_attributes_from'])
inst = ModuleNode(
name, in_datatypes, out_datatype, *params, **{"help" : help_})
return inst
class ModuleDbSummary:
# module_names List of module names (strings).
# name2module Dictionary of module name (string) to ModuleNode
# object.
# name2datatypes Dict of module name to (in Datatype, out Datatype).
# datatypes List of Datatype objects.
def __init__(self, module_names, name2module, name2datatypes, datatypes):
self.module_names = module_names[:]
self.name2module = name2module.copy()
self.name2datatypes = name2datatypes.copy()
self.datatypes = datatypes[:]
class Network:
def __init__(self, nodes, transitions):
# nodes should be a list of DataNode or ModuleNode objects.
# DataNode transition to ModuleNodes, and ModuleNodes
# transition to DataNode.
# Make sure nodes are DataNode or ModuleNode objects.
for n in nodes:
assert isinstance(n, DataNode) or isinstance(n, ModuleNode)
# Make sure transitions point to the right types of objects.
for node_id, next_ids in transitions.iteritems():
assert node_id >= 0 and node_id < len(nodes)
for nid in next_ids:
assert nid >= 0 and nid < len(nodes)
for nid in next_ids:
n1 = nodes[node_id]
n2 = nodes[nid]
if isinstance(n1, DataNode):
assert isinstance(n2, ModuleNode)
else:
assert isinstance(n2, DataNode)
self.nodes = nodes[:]
self.transitions = transitions.copy()
def iterate(self, node_class=None):
# Yield tuple of (node_id, next_node_ids). node_class is the
# class of the node of the network (either DataNode or
# ModuleNode). If provided, will only iterate over that kind
# of nodes.
assert node_class in [DataNode, ModuleNode]
for node_id, node in enumerate(self.nodes):
if node_class and not isinstance(node, node_class):
continue
next_ids = self.transitions.get(node_id, [])
yield node_id, next_ids
def delete_node(self, node_id):
"""Delete a node and return a new Network object."""
assert node_id < len(self.nodes)
nodes = self.nodes[:]
nodes.pop(node_id)
transitions = {}
for nid, next_ids in self.transitions.iteritems():
if nid == node_id:
continue
elif nid > node_id:
nid = nid - 1
next_ids = [x for x in next_ids if x != node_id]
for i in range(len(next_ids)):
if next_ids[i] > node_id:
next_ids[i] = next_ids[i] - 1
transitions[nid] = next_ids
return Network(nodes, transitions)
def delete_nodes(self, node_ids):
# Make sure no duplicates.
for i in range(len(node_ids)):
assert node_ids[i] not in node_ids[i+1:]
network = Network(self.nodes, self.transitions)
# Delete from high to low so the node_ids don't get messed up.
node_ids = reversed(sorted(node_ids))
for nid in node_ids:
network = network.delete_node(nid)
return network
def merge_nodes(self, node_ids, nodeid2parents=None):
"""node_ids is a list of the indexes of nodes. Replace all
these nodes with just a single one. Returns a new Network
object."""
if nodeid2parents is None:
nodeid2parents = _make_parents_dict(self)
node_ids = sorted(node_ids)
# Make sure no duplicate node_ids.
for i in range(len(node_ids) - 1):
assert node_ids[i] != node_ids[i + 1], "Duplicate node IDs."
# Make sure nodes are the same type.
for i in range(1, len(node_ids)):
n1 = self.nodes[node_ids[0]]
n2 = self.nodes[node_ids[i]]
assert n1.__class__ == n2.__class__, "%s %s" % (
n1.__class__, n2.__class__)
# Keep the first node, and delete the rest.
# Make transitions to any node_ids point to the first one.
node_id = node_ids[0]
prev_ids = []
for nid in node_ids[1:]:
x = nodeid2parents.get(nid, [])
prev_ids.extend(x)
transitions = self.transitions.copy()
for prev_id in prev_ids:
x = transitions[prev_id]
if node_id not in x:
x = x + [node_id]
transitions[prev_id] = x
# Make the first node point to all the next_nodes of the other
# node_ids.
nid0 = node_ids[0]
for nidi in node_ids[1:]:
x = transitions.get(nid0, []) + transitions.get(nidi, [])
x = sorted({}.fromkeys(x))
transitions[nid0] = x
x = Network(self.nodes, transitions)
x = x.delete_nodes(node_ids[1:])
return x
def __cmp__(self, other):
if not isinstance(other, Network):
return cmp(id(self), id(other))
# Optimization. Do some quick comparisons first.
if id(self) == id(other):
return 0
x = cmp(len(self.nodes), len(other.nodes))
if x != 0:
return x
x = cmp(self.transitions, other.transitions)
if x != 0:
return x
x1 = [self.nodes, self.transitions]
x2 = [other.nodes, other.transitions]
return cmp(x1, x2)
@staticmethod
def __init_from_dict(args):
assert 'nodes' in args
assert 'transitions' in args
new_transition = dict()
for key, value in args['transitions'].items():
new_transition[int(key)] = value
inst = Network(args['nodes'], new_transition)
return inst
def make_network(moduledb, out_data, custom_attributes):
import copy
# Clean up this code.
network = _init_network(moduledb, out_data, custom_attributes)
# Split the data nodes so that everything is TYPE_ATOM. Fixes
# problems in the inference, and also makes the other steps easier
# to handle. Carefully build the network back up.
network = _split_network(network)
optimizers = [
# There should not be any cycles.
#_OptimizeNoCycles(),
_OptimizeNoInvalidOutputs(),
_OptimizeNoDuplicateModules(),
_OptimizeNoDuplicateData(),
_OptimizeMergeData1(),
# Don't do this merging. See below for reason.
#_OptimizeMergeData2(),
]
it = 0
old_network = None
while old_network != network:
old_network = copy.deepcopy(network)
for opt in optimizers:
#old_network2 = copy.deepcopy(network)
network = opt.optimize(network, custom_attributes)
#if old_network2 != network:
# print "Optimized with %s." % opt.__class__.__name__
it += 1
#num = 0
#plot_network_gv("test-%02d.png" % num, network, verbose=True); num+=1
# This makes the network really messy. Might have to be rewritten.
#network = _complete_network(network, custom_attributes)
return network
def _init_network(moduledb, out_data, custom_attributes):
# Return a Network object.
check_moduledb(moduledb)
if isinstance(out_data, DataType):
out_data = out_data.output()
assert isinstance(out_data, DataNode)
nodes = [] # list of DataNode or ModuleNode objects.
transitions = {} # list of index -> list of indexes
nodes.append(out_data)
stack = [0]
seen = {}
while stack:
assert len(nodes) < MAX_NETWORK_SIZE, "network too large"
#_print_network(Network(nodes, transitions))
# Pop the next node off the stack.
node_id = stack.pop()
assert node_id < len(nodes)
node = nodes[node_id]
# If I've already seen this node, then don't process it again.
if node_id in seen:
continue
seen[node_id] = 1
if isinstance(node, DataNode):
# Backwards chain to the previous module.
modules = _bc_to_modules(moduledb, node)
for m in modules:
nodes.append(m)
m_id = len(nodes) - 1
stack.append(m_id)
transitions[m_id] = transitions.get(m_id, [])
transitions[m_id].append(node_id)
elif isinstance(node, ModuleNode):
x = [_bc_to_inputs(node, nodes[x], custom_attributes)
for x in transitions[node_id]]
all_inputs = _uniq(_flatten(x))
# XXX Why only one chain back from one data node?
#cons_id = transitions[node_id][0]
#all_inputs = _bc_to_inputs(
# node, nodes[cons_id], custom_attributes)
for d in all_inputs:
d_id = _find_same_data(nodes, d)
if d_id == -1:
nodes.append(d)
d_id = len(nodes) - 1
stack.append(d_id)
transitions[d_id] = transitions.get(d_id, [])
transitions[d_id].append(node_id)
else:
raise AssertionError, "Unknown node type: %s" % node
# Remove the duplicates from transitions.
for nid, next_ids in transitions.iteritems():
transitions[nid] = _uniq(next_ids)
network = Network(nodes, transitions)
return network
def _split_network(network):
# Inferencing can lead to a situation where a ModuleNode points to
# DataNode that it can't generate. E.g.
# trim_adapters -> Fastq.trimmed=["no", "yes"] (should only be "yes")
#
# Solution: split Fastq into multiple objects.
# _OptimizeNoInvalidOutputs will remove the bad links.
import itertools
nodeid2parents = _make_parents_dict(network)
to_delete = []
for node_id in range(len(network.nodes)):
node = network.nodes[node_id]
if not isinstance(node, DataNode):
continue
# Look for attributes with multiple values. Once found, replace
# with all possible individual values.
attr_names = [] # list of attribute names
attr_values = [] # list of list of attribute values
for name, value in node.attributes.iteritems():
if _get_attribute_type(value) != TYPE_ENUM:
continue
attr_names.append(name)
attr_values.append(value)
if not attr_names:
continue
# Make a new DataNode.
for values in itertools.product(*attr_values):
attrs = node.attributes.copy()
assert len(values) == len(attr_names)
for name, value in zip(attr_names, values):
attrs[name] = value
x = DataNode(node.datatype, **attrs)
network.nodes.append(x)
nid = len(network.nodes)-1
# Make sure this points to all the children of the
# previous node.
network.transitions[nid] = network.transitions[node_id][:]
# Make sure all the parent nodes point to this one.
for pid in nodeid2parents.get(node_id, []):
network.transitions[pid].append(nid)
# Mark the old node for deletion.
to_delete.append(node_id)
network = network.delete_nodes(to_delete)
return network
def _complete_network(network, custom_attributes):
# Sometimes, the network generated by backchaining may be missing
# some links. This function will search for missing links and add
# them back into the network. Returns a new Network object.
#
# Example:
# 1. PSF (preprocess=unknown) -> rank_genes_by_class_neighbors ->
# GeneListFile
# preprocess assigned to unknown because it is the default
# value for PSF files.
# 2. During inferencing, PSF (preprocess=illumina) is created.
# It does not point to rank_genes_by_class_neighbors--it
# points to another module.
# 3. complete_network will add link:
# PSF (preprocess=illumina) -> rank_genes_by_class_neighbors
#
# This occurs because of the optimization we made where
# backchaining created antecedents with default values. If the
# antecedents countained all possible values, this would not be
# necessary.
import copy
import itertools
debug_print("Completing network.")
network = copy.deepcopy(network)
nodeid2parents = _make_parents_dict(network)
ancestors = _make_ancestor_dict(network)
descendents = _make_descendent_dict(network)
# For each DataNode object, check to see if it can be the
# antecedent of any ModuleNode objects.
data_ids = [x for x in range(len(network.nodes))
if isinstance(network.nodes[x], DataNode)]
module_ids = [x for x in range(len(network.nodes))
if isinstance(network.nodes[x], ModuleNode)]
for x in itertools.product(data_ids, module_ids):
input_id, module_id = x
# If data_id already points to module_id, then ignore
# this.
if module_id in network.transitions.get(input_id, []):
continue
# If this node is not a DataType that the module takes, then
# don't bother checking.
found = False
for dt in network.nodes[module_id].in_datatypes:
if network.nodes[input_id].datatype.name == dt.name:
found = True
break
if not found:
continue
# Don't add a link from data_id to module_id if it would
# create a cycle.
if module_id in ancestors[input_id]:
#debug_print("Skipping DataNode %d -> ModuleNode %d (cycle)." % (
# input_id, module_id))
continue
# Since modules can take multiple inputs, we need to combine
# input_id with all previous input IDs and try all possible
# combinations.
#x = _get_parents_of(network, module_id)
x = nodeid2parents.get(module_id, [])
combined_ids = x + [input_id]
# Find combinations of inputs that are compatible with the
# network.
combos = _bc_to_input_ids(
network, module_id, custom_attributes, all_input_ids=combined_ids,
nodeid2parents=nodeid2parents)
# Add the new transitions.
added = []
for id_ in itertools.chain.from_iterable(combos):
# Probably don't need to search through. All the id_'s,
# except for input_id, is already in a parent of this
# node.
assert id_ in network.transitions
if module_id in network.transitions[id_]:
continue
# Add id_ -> module_id.
network.transitions[id_].append(module_id)
added.append(id_)
debug_print(
"Completing DataNode %s [%d] -> ModuleNode %s [%d]." % (
network.nodes[id_].datatype.name, id_,
network.nodes[module_id].name, module_id))
# id_ is now a parent of module_id.
for id_ in added:
if module_id not in nodeid2parents:
nodeid2parents[module_id] = []
if id_ not in nodeid2parents[module_id]:
nodeid2parents[module_id].append(id_)
# Module and all its descendents inherit the ancestors of all
# the added nodes (including the added nodes).
all_ids = [module_id] + descendents.get(module_id, [])
for node_id in all_ids:
anc = ancestors[node_id]
anc.extend(added)
for id_ in added:
anc.extend(ancestors.get(id_, []))
ancestors[node_id] = _uniq(anc)
# All the added nodes inherit the descendents of the Module.
for id_ in added:
desc = descendents[id_]
desc.append(module_id)
desc.extend(descendents.get(module_id, []))
descendents[id_] = _uniq(desc)
return network
class _OptimizeNoCycles:
# Methods:
# optimize
#
# _find_cycle Find a cycle in a network.
# _find_cycle_from_one_node
# _find_cycle_from_all_nodes
# _choose_breakpoint Given a cycle, choose the best breakpoint.
# _break_cycle Break a cycle
def __init__(self):
pass
def optimize(self, network, custom_attributes):
global NUM_TIMES
# Do backwards chaining. If I encounter a cycle, then break it.
# Optimization. Since many cycles are short, break the
# 5-cycles first to speed up the search.
# [131104] Is this still necessary, since the algorithm is now
# smarter about not searching non-cycle nodes?
# Length of cycles:
# 5 168,081 67%
# 7 84,056 33%
# 9-17 318 0%
while True:
cycle = self._find_cycle(network, 5)
if not cycle:
break
transition = self._choose_breakpoint(network, cycle)
if transition:
network = self._break_cycle(network, transition)
while True:
cycle = self._find_cycle(network, 0)
if not cycle:
break
transition = self._choose_breakpoint(network, cycle)
if transition:
network = self._break_cycle(network, transition)
return network
def _list_noncycle_node_ids(self, network, nodeid2parents):
# Return a list of the node_ids that are not in cycles.
# The nodes at the top of the tree (no prev nodes) are not in
# cycles. The nodes at the bottom of the tree (no next nodes)
# are not in cycles.
#
# If all of a node's next nodes are noncycle, then it is
# noncycle.
# If all of a node's prev nodes are noncycle, then it is
# noncycle.
noncycle = {}
while True:
changed = False
node_ids = [i for i in range(len(network.nodes))
if i not in noncycle]
for node_id in node_ids:
prev_ids = nodeid2parents.get(node_id, [])
next_ids = network.transitions.get(node_id, [])
x1 = [i for i in prev_ids if i not in noncycle]
x2 = [i for i in next_ids if i not in noncycle]
if x1 and x2:
continue
# Either all prev_ids are noncycle (or missing), or
# all next_are noncycle (or missing).
noncycle[node_id] = 1
changed = True
if not changed:
break
return noncycle
def _find_cycle(self, network, max_path_length):
assert max_path_length >= 0
nodeid2parents = _make_parents_dict(network)
noncycle = self._list_noncycle_node_ids(network, nodeid2parents)
cycle = None
node_ids = [i for i in range(len(network.nodes)) if i not in noncycle]
for start_id in node_ids:
cycle = self._find_cycle_from_one_node(
network, start_id, max_path_length, noncycle, nodeid2parents)
if cycle:
break
return cycle
def _find_cycle_from_one_node(self, network, start_id, max_path_length,
noncycle, nodeid2parents):
# Do a depth-first search and look for cycles. Return a cycle
# (list of node_ids) or None. The cycle will start and end
# with the same node_id.
# Previously did a breadth-first search, but stack.pop(0) was
# running too slowly (see below). Depth-first search will be
# faster to find a cycle, if it exists, anyway.
if not nodeid2parents:
nodeid2parents = _make_parents_dict(network)
cycle = None
# list of (node_id, path (not including node_id))
stack = [(start_id, [])]
while stack:
# Slow.
#node_id, path = stack.pop(0)
# Really slow.
#node_id, path = stack[0]
#stack = stack[1:]
# 10x faster than pop(0).
node_id, path = stack.pop()
if node_id in noncycle:
continue
if max_path_length and len(path) > max_path_length:
continue
if node_id in path:
# If this node_id is already in the path, then this is
# a cycle.
i = path.index(node_id)
cycle = path[i:] + [node_id]
break
# Add node to the path.
path = path + [node_id]
for prev_id in nodeid2parents.get(node_id, []):
stack.append((prev_id, path))
return cycle
def _find_depth_of_nodes(self, network):
# Do a breadth-first search to assign the depth of each node.
assert network.nodes
nodeid2parents = _make_parents_dict(network)
# OPTIMIZE: can memoize this function.
stack = [(0, -1)] # node_id, next_depth
nodeid2depth = {}
while stack:
node_id, next_depth = stack.pop(0)
if node_id in nodeid2depth:
continue
depth = next_depth + 1
nodeid2depth[node_id] = depth
for prev_id in nodeid2parents.get(node_id, []):
stack.append((prev_id, depth))
return nodeid2depth
def _choose_breakpoint(self, network, cycle):
# Break the cycle at the point furthest from the root of the
# network (node 0). If I choose the wrong breakpoint, can
# leave a whole section dangling. Return tuple of (node_id,
# next_id) of the transition to break.
nodeid2depth = self._find_depth_of_nodes(network)
# See if this cycle is already broken from the main network.
x = [x for x in cycle if x not in nodeid2depth]
if x:
return None
# Furthest point is the one with the highest depth.
depths = [nodeid2depth[x] for x in cycle]
schwartz = zip(depths, cycle)
schwartz.sort()
highest_depth, highest_node_id = schwartz[-1]
# Find the transition. cycle is a list of [id_3, id_2, id_1,
# id_0], where id_0 points to id_1 (etc).
transition = None
for i in range(len(cycle) - 1):
next_id, node_id = cycle[i], cycle[i + 1]
if next_id == highest_node_id:
transition = node_id, next_id
break
assert transition is not None
return transition
def _break_cycle(self, network, bad_transition):
node_id, next_id = bad_transition
transitions = network.transitions.copy()
x = transitions.get(node_id, [])
assert next_id in x
i = x.index(next_id)
assert i >= 0
x = x[:i] + x[i + 1:]
transitions[node_id] = x
return Network(network.nodes, transitions)
class _OptimizeNoInvalidOutputs:
# Fixing overlapping data can lead to a situation where a
# ModuleNode points to DataNode that it can't generate. E.g.
# convert_signal_to_tdf -> format=["tdf", "pcl"]
# will be changed to:
# convert_signal_to_tdf -> format=["tdf"]
# convert_signal_to_tdf -> format=["pcl"]
#
# Since one of these is now incorrect, remove it.
def __init__(self):
pass
def optimize(self, network, custom_attributes):
import copy
bad_transitions = {} # (node_id, next_id) -> 1
for (node_id, next_ids) in network.iterate(node_class=ModuleNode):
module = network.nodes[node_id]
for next_id in next_ids:
node = network.nodes[next_id]
assert isinstance(node, DataNode)
if not _is_valid_output(module, node):
bad_transitions[(node_id, next_id)] = 1
network = copy.deepcopy(network)
for node_id, next_id in bad_transitions:
x = network.transitions.get(node_id, [])
assert next_id in x
i = x.index(next_id)
assert i >= 0
x.pop(i)
network.transitions[node_id] = x
return network
class _OptimizeNoDuplicateModules:
def __init__(self):
pass
def optimize(self, network, custom_attributes):
ancestors = _make_ancestor_dict(network)
nodeid2parents = _make_parents_dict(network)
while True:
duplicates = self.find_duplicate_modules(network)
# Merge each of the duplicates.
changed = False
while duplicates:
n1, n2 = duplicates.pop()
# Don't merge these if it will create a cycle. This
# will happen if one node is an ancestor of the other.
if n2 in ancestors.get(n1, []) or n1 in ancestors.get(n2, []):
continue
network = network.merge_nodes(
[n1, n2], nodeid2parents=nodeid2parents)
duplicates = _fix_node_id_pairs_after_merge(duplicates, n1, n2)
ancestors = _fix_node_id_dict_after_merge(ancestors, n1, n2)
nodeid2parents = _fix_node_id_dict_after_merge(
nodeid2parents, n1, n2)
changed = True
if not changed:
# No duplicates merged. Either no more duplicates, or
# would create cycles.
break
return network
def find_duplicate_modules(self, network):
# Return a list of (node_id1, node_id2) for modules that are
# duplicated. If no duplicates found, return an empty list.
# DEFINITION: If the same data node points to two of the same
# module nodes, then those modules are duplicated.
pairs = {}
for node_id, next_ids in network.iterate(node_class=DataNode):
if len(next_ids) < 2:
continue
next_ids = sorted(next_ids)
for (i, j) in _iter_upper_diag(len(next_ids)):
node_id1, node_id2 = next_ids[i], next_ids[j]
pairs[(node_id1, node_id2)] = 1
dups = []
for (id1, id2) in pairs:
node_1 = network.nodes[id1]
node_2 = network.nodes[id2]
if node_1.name != node_2.name:
continue
dups.append((id1, id2))
return dups
class _OptimizeNoDuplicateData:
def __init__(self):
pass
def optimize(self, network, custom_attributes):
# This could be made much more efficient with a better way of
# finding duplicates.
ancestors = _make_ancestor_dict(network)
nodeid2parents = _make_parents_dict(network)
while True:
duplicates = self.find_duplicate_data(network)
changed = False
# Merge each of the duplicates.
while duplicates:
n1, n2 = duplicates.pop()
# Don't merge these if it will create a cycle. This
# will happen if one node is an ancestor of the other.
if n2 in ancestors.get(n1, []) or n1 in ancestors.get(n2, []):
continue
network = network.merge_nodes(
[n1, n2], nodeid2parents=nodeid2parents)
duplicates = _fix_node_id_pairs_after_merge(duplicates, n1, n2)
ancestors = _fix_node_id_dict_after_merge(ancestors, n1, n2)
nodeid2parents = _fix_node_id_dict_after_merge(
nodeid2parents, n1, n2)
changed = True
if not changed:
break
return network
def find_duplicate_data(self, network):
# Return list of (node_id1, node_id2) for DataNode objects
# that are duplicated. If no duplicates found, return an
# empty list.
# Make a list of all pairs of DataNode objects.
data_node_ids = [
node_id for (node_id, node) in enumerate(network.nodes)
if isinstance(node, DataNode)]
duplicates = []
for (i, j) in _iter_upper_diag(len(data_node_ids)):
node_id1, node_id2 = data_node_ids[i], data_node_ids[j]
node_1, node_2, = network.nodes[node_id1], network.nodes[node_id2]
if node_1.datatype.name != node_2.datatype.name:
continue
if node_1 == node_2:
duplicates.append((node_id1, node_id2))
return duplicates
class _OptimizeMergeData1:
# Sometimes the inference can lead to two nodes that share the
# same parents and the same children, and almost the same
# attributes. For example:
# Node1 Node2
# preprocess="unknown" preprocess=<everything else>
#
# If this happens, merge them to simplify the network.
def __init__(self):
pass
def optimize(self, network, custom_attributes):
import copy
network = copy.deepcopy(network)
while True:
similar = self._find_similar_nodes(network, custom_attributes)
if not similar:
break
# Merge the similar nodes.
while similar:
n1, n2 = similar.pop()
network = self._merge_nodes(network, n1, n2)
similar = _fix_node_id_pairs_after_merge(similar, n1, n2)
return network
def _find_similar_nodes(self, network, custom_attributes):
# Return a list of (node_id1, node_id2). Can be empty.
nodeid2parents = _make_parents_dict(network)
data_node_ids = [
node_id for (node_id, node) in enumerate(network.nodes)
if isinstance(node, DataNode)]
# Optimization: The same calls are made to _fc_to_output_ids,
# which takes up a lot of the compute time. Cache these
# calls.
fc_cache = {}
similar = []
for i, node_id1 in enumerate(data_node_ids):
for node_id2 in data_node_ids[i+1:]:
if self._are_nodes_similar(
network, node_id1, node_id2, custom_attributes,
nodeid2parents, fc_cache):
similar.append((node_id1, node_id2))
return similar
def _are_nodes_similar(
self, network, node_id1, node_id2, custom_attributes,
nodeid2parents, fc_cache):
node_1 = network.nodes[node_id1]
node_2 = network.nodes[node_id2]
# The data type must be the same.
if node_1.datatype.name != node_2.datatype.name:
return False
# They must share the same children.
c1 = network.transitions.get(node_id1, [])
c2 = network.transitions.get(node_id2, [])
if len(c1) != len(c2):
return False
if sorted(c1) != sorted(c2):
return False
# They might not share the same parents.
# align_bowtie1 -> SamFolder.aligner (bowtie1)
# align_bowtie2 -> SamFolder.aligner (bowtie2)
# Merge to:
# align_bowtie1 -> SamFolder.aligner (bowtie1, bowtie2)
# align_bowtie2 ->
## They must share the same parents.
##p1 = nodeid2parents.get(node_id1, [])
##p2 = nodeid2parents.get(node_id2, [])
##if len(p1) != len(p2):
## return False
##if sorted(p1) != sorted(p2):
## return False
# They must share all but 1 attribute.
x, x, diff_attrs = _score_same_data(node_1, node_2)
if len(diff_attrs) != 1:
return False
# After merging, these data nodes must be able to generate all
# the (grand)children that the unmerged data could generate.
module_ids = c1
paths = [] # list of (in_data_ids, module_id, out_data_id)
for module_id in module_ids:
if module_id not in fc_cache:
x = _fc_to_output_ids(
network, module_id, custom_attributes,
nodeid2parents=nodeid2parents)
fc_cache[module_id] = x
paths.extend(fc_cache[module_id])
# Make sure the input data includes node_id1 or node_id2.
paths = [x for x in paths if node_id1 in x[0] or node_id2 in x[0]]
# Make sure the input data does include both node_id1 and node_id2.
paths = [x for x in paths
if not (node_id1 in x[0] and node_id2 in x[0])]
# The combined data node must be able to generate all these
# out data nodes.
merged_data = _merge_data_nodes(node_1, node_2)
for x in paths:
in_data_ids, module_id, out_data_id = x
in_datas = [network.nodes[x] for x in in_data_ids]
for i in range(len(in_data_ids)):
if in_data_ids[i] in [node_id1, node_id2]:
in_datas[i] = merged_data
break
module = network.nodes[module_id]
out_data = network.nodes[out_data_id]
if not _is_valid_inputs(
module, in_datas, out_data, custom_attributes):
return False
return True
def _merge_nodes(self, network, node_id1, node_id2):
# Delete the one with the higher node_id (node_id2).
if node_id1 > node_id2:
node_id1, node_id2 = node_id2, node_id1
# Merge the attributes of the nodes.
n1 = network.nodes[node_id1]
n2 = network.nodes[node_id2]
network.nodes[node_id1] = _merge_data_nodes(n1, n2)
# Everything that pointed to node_id2 now goes to node_id1.
for node_id, next_ids in network.transitions.iteritems():
if node_id2 in next_ids and node_id1 not in next_ids:
next_ids.append(node_id1)
# They share the same children already. No need to add.
return network.delete_node(node_id2)
class _OptimizeMergeData2:
# is_compressed -> Fastq.trimmed=no -> uncompress
# is_compressed -> Fastq.trimmed=yes -> uncompress
# Sometimes will be root nodes (no parents).
#
# Actually, don't use this. It can make the inferencing harder.
# e.g.
# Fastq.trimmed (no, yes) -> is_compressed -> Fastq.trimmed (no)
# Hard to reason whether Fastq.trimmed (no, yes) is a valid
# antecedent of Fastq.trimmed (no).
def __init__(self):
pass
def optimize(self, network, custom_attributes):
import copy
network = copy.deepcopy(network)
while True:
similar = self._find_similar_nodes(network)
if not similar:
break
# Merge the similar nodes.
while similar:
n1, n2 = similar.pop()
network = self._merge_nodes(network, n1, n2)
similar = _fix_node_id_pairs_after_merge(similar, n1, n2)
return network
def _find_similar_nodes(self, network):
# Return a list of (node_id1, node_id2). Can be empty.
nodeid2parents = _make_parents_dict(network)
data_node_ids = [
node_id for (node_id, node) in enumerate(network.nodes)
if isinstance(node, DataNode)]
similar = []
for i, node_id1 in enumerate(data_node_ids):
for node_id2 in data_node_ids[i+1:]:
if self._are_nodes_similar(
network, node_id1, node_id2, nodeid2parents):
similar.append((node_id1, node_id2))
return similar
def _are_nodes_similar(self, network, node_id1, node_id2,
nodeid2parents):
node_1 = network.nodes[node_id1]
node_2 = network.nodes[node_id2]
# The data type must be the same.
if node_1.datatype.name != node_2.datatype.name:
return False
# They must share the same children.
c1 = network.transitions.get(node_id1, [])
c2 = network.transitions.get(node_id2, [])
if len(c1) != len(c2):
return False
if sorted(c1) != sorted(c2):
return False
# They must share the same parents.
p1 = nodeid2parents.get(node_id1, [])
p2 = nodeid2parents.get(node_id2, [])
if len(p1) != len(p2):
return False
if sorted(p1) != sorted(p2):
return False
# They must share all but 1 attribute.
x, x, diff_attrs = _score_same_data(node_1, node_2)
if len(diff_attrs) != 1:
return False
return True
def _merge_nodes(self, network, node_id1, node_id2):
# Delete the one with the higher node_id (node_id2).
if node_id1 > node_id2:
node_id1, node_id2 = node_id2, node_id1
# Merge the attributes of the nodes.
n1 = network.nodes[node_id1]
n2 = network.nodes[node_id2]
network.nodes[node_id1] = _merge_data_nodes(n1, n2)
# They share the same parents and children, so nothing needs
# to be rewired.
return network.delete_node(node_id2)
def _find_paths_h(network, node_id, custom_attributes, nodeid2parents):
#import itertools
assert node_id < len(network.nodes)
node = network.nodes[node_id]
prev_ids = nodeid2parents.get(node_id)
if not prev_ids:
assert isinstance(node, DataNode)
yield (node_id,)
return
if isinstance(node, DataNode):
combos = []
for prev_id in prev_ids:
combos.append((prev_id,))
elif isinstance(node, ModuleNode):
combos = _bc_to_input_ids(
network, node_id, custom_attributes, nodeid2parents=nodeid2parents)
for combo in combos:
# Make a list of the possible paths for each branch.
branch2paths = []
for prev_id in combo: # prev_id is node_id for one branch
paths = []
for x in _find_paths_h(
network, prev_id, custom_attributes, nodeid2parents):
x = tuple(x)
paths.append(x)
assert paths
branch2paths.append(paths)
# Merge the paths for each branch.
for x in _product_and_chain(branch2paths, None):
x = x + (node_id,)
yield x
def find_paths(network, custom_attributes, max_paths=None):
# Iterate over all possible paths from the start nodes to the end
# nodes. Each path is a list of the node_ids.
assert network.nodes, "empty network"
nodeid2parents = _make_parents_dict(network)
for i, x in enumerate(
_find_paths_h(network, 0, custom_attributes, nodeid2parents)):
yield x
if max_paths is not None and i >= max_paths:
break
def _find_paths_by_datatypes_h(
network, node_id, custom_attributes, datatype_names,
nodeid2parents, depth):
# Yield tuples of:
# path list of node_ids in this path.
# used_ids list of node_ids for nodes from datatype_names
# missing_ids list of node_ids not in datatype_names
import itertools
assert node_id < len(network.nodes), "%s %d" % (
repr(node_id), len(network.nodes))
node = network.nodes[node_id]
prev_ids = _get_parents_of(network, node_id)
if isinstance(node, DataNode):
# If this node is one of these datatypes, then this can be an input.
if node.datatype.name in datatype_names:
yield [node_id], [node_id], []
elif not prev_ids:
# If this is a start node, then this is a missing input.
yield [node_id], [], [node_id]
combos = []
for prev_id in prev_ids:
combos.append((prev_id,))
elif isinstance(node, ModuleNode):
# Find some combination of inputs that works.
combos = _bc_to_input_ids(network, node_id, custom_attributes)
for combo in combos:
# Each branch is a generator to this recursive function.
branch2info = [
_find_paths_by_datatypes_h(
network, x, custom_attributes, datatype_names, nodeid2parents,
depth+[node_id])
for x in combo]
#branch2info = []
#for x in combo:
# x = _find_paths_by_datatypes_h(
# network, user_attributes, x, datatype_names, nodeid2parents,
# depth+[node_id])
# x = list(x)
# branch2info.append(x)
# Try different combinations of paths for each branch.
for x in itertools.product(*branch2info):
# Merge the information from each branch.
path = []
possible_ids = [] # either used or missing
for x in x:
p, uids, mids = x
path.extend(p)
possible_ids.extend(uids)
possible_ids.extend(mids)
# path may have duplicates if different branches converge
# upstream.
path = {}.fromkeys(path).keys()
possible_ids = {}.fromkeys(possible_ids).keys()
# For each of the possible_ids, sort out whether it is
# used or missing. Do not use the same datatype more than
# once.
names = datatype_names[:]
used_ids = []
missing_ids = []
for id_ in possible_ids:
n = network.nodes[id_].datatype.name
if n in names:
used_ids.append(id_)
names.pop(names.index(n))
else:
missing_ids.append(id_)
# If this is a DataNode, the transition from prev_id (a
# ModuleNode) to node_id may be invalid. e.g.
# FastqFold (trimmed=no) -> merge_reads -> FastqFold (trimmed=no)
# FastqFold (trimmed=yes) -> -> FastqFold (trimmed=yes)
# If this is an output FastqFolder where trimmed=no, then this
# is only valid if the input FastqFolder (trimmed=no) is part
# of the path. Only follow the valid paths.
# Actually, this probably isn't necessary anymore given
# the new reasoning engine.
if isinstance(network.nodes[node_id], DataNode):
if not _is_valid_output_id_path(
network, path, node_id, custom_attributes, nodeid2parents):
continue
path = path + [node_id]
yield path, used_ids, missing_ids
def find_paths_by_datatypes(network, custom_attributes, datatype_names):
# Whether this set of datatypes (by name) provides a complete set
# of inputs. Yield tuples of:
# path list of node_ids in this path.
# used_ids list of node_ids for nodes found in datatype_names
# missing_ids list of node_ids not in datatype_names
nodeid2parents = _make_parents_dict(network)
# Recursively check if this set of datatype_names can be inputs to
# this network.
x = _find_paths_by_datatypes_h(
network, 0, custom_attributes, datatype_names, nodeid2parents, [])
return x
class Pathway:
def __init__(self, node_ids, transitions, start_ids, missing_ids):
assert type(node_ids) is type([])
# Debug: Check for duplicates in node_ids.
#assert sorted(_uniq(node_ids)) == sorted(node_ids)
self.node_ids = node_ids[:]
self.transitions = transitions.copy()
self.start_ids = start_ids[:]
self.missing_ids = missing_ids[:]
def __cmp__(self, other):
if not isinstance(other, Pathway):
return cmp(id(self), id(other))
x1 = [
self.node_ids, self.transitions, self.start_ids, self.missing_ids]
x2 = [
other.node_ids, other.transitions, other.start_ids,
other.missing_ids]
return cmp(x1, x2)
def __str__(self):
return self.__repr__()
def __repr__(self):
x = [repr(self.node_ids),
repr(self.transitions),
repr(self.start_ids),
repr(self.missing_ids),
]
x = "%s(%s)" % (self.__class__.__name__, ", ".join(x))
return x
def _find_paths_by_start_ids_hh(
network, node_id, custom_attributes, node2startids, nodeid2parents,
depth, cache):
import itertools
from genomicode import jmath
assert node_id < len(network.nodes), "%s %d" % (
repr(node_id), len(network.nodes))
node = network.nodes[node_id]
prev_ids = nodeid2parents.get(node_id, [])
paths = []
if isinstance(node, DataNode):
# If this node matches one of the node2startids, then this can
# be an input.
for i, start_ids in enumerate(node2startids):
if node_id in start_ids:
sids = [None] * len(node2startids)
sids[i] = node_id
paths.append(Pathway([node_id], {}, sids, []))
# If this doesn't match any nodes, then this branch may be
# missing.
if not paths:
sids = [None] * len(node2startids)
paths.append(Pathway([node_id], {}, sids, [node_id]))
# Search each of the parents for inputs.
combos = []
for prev_id in prev_ids:
combos.append((prev_id,))
elif isinstance(node, ModuleNode):
# Find some combination of inputs that works.
combos = _bc_to_input_ids(
network, node_id, custom_attributes, nodeid2parents=nodeid2parents)
for combo in combos:
# combo is a list of node_ids.
# Each branch is a list of Pathway objects.
branch2info = [
_find_paths_by_start_ids_h(
network, x, custom_attributes, node2startids, nodeid2parents,
depth+[node_id], cache)
for x in combo]
# Make sure there aren't too many combinations to search.
MAX_COMBINATIONS = 1E4
x = [len(x) for x in branch2info]
total = jmath.prod(x)
assert total < MAX_COMBINATIONS, "Too many paths (%d)" % total
# No branches indicate an error somewhere.
assert total > 0
# Try different combinations of paths for each branch.
for branches in itertools.product(*branch2info):
assert branches
# Merge the node_ids. It may have duplicates if different
# branches converge upstream.
x = [x.node_ids for x in branches]
node_ids = _uniq(_flatten(x))
assert node_id not in node_ids
# Merge the transitions.
transitions = {}
for p in branches:
for n1, n2 in p.transitions.iteritems():
transitions[n1] = _uniq(transitions.get(n1, []) + n2)
# Add transitions to this node.
for x in combo:
if x not in transitions:
transitions[x] = []
if node_id not in transitions:
transitions[x].append(node_id)
# Merge the missing_ids.
x = [x.missing_ids for x in branches]
missing_ids = _uniq(_flatten(x))
# See if these paths conflict. Paths may conflict if:
# 1. Different data nodes are used for the same start_id.
# 2. A module_node has an consequence that is
# BASED_ON_DATA, and the subsequent DataNode has
# different values for that attribute.
# Ignore this check if there are missing_ids. Is
# already an invalid merged pipeline.
# 3. If data node, can't transition from previous module
# node.
# If the paths conflict, then skip it.
# Case 1. See if there are conflicting start_ids.
# (As a side effect, also merge the start_ids).
conflicting_start_ids = False
start_ids = branches[0].start_ids
for p in branches[1:]:
assert len(start_ids) == len(p.start_ids)
for i in range(len(start_ids)):
if p.start_ids[i] is None:
continue
if start_ids[i] is not None and \
start_ids[i] != p.start_ids[i]:
conflicting_start_ids = True
break
start_ids[i] = p.start_ids[i]
if conflicting_start_ids:
continue
# Case 2. Look for modules with a BASED_ON_DATA
# consequence. Then, make sure the attribute values of
# the data are the same.
# Make a list of the module_node_ids in any of these
# branches with BASED_ON_DATA consequences.
based_on_data_1 = [] # list of (module_node_id, cons name)
module_node_ids = [x for x in node_ids
if isinstance(network.nodes[x], ModuleNode)]
for module_id in module_node_ids:
x = network.nodes[module_id].consequences
x = [x for x in x if x.behavior == BASED_ON_DATA]
x = [(module_id, x.name) for x in x]
based_on_data_1.extend(x)
# list of (module_node_id, attribute_name, data_node_id, value)
based_on_data_2 = []
for (module_id, name) in based_on_data_1:
for data_id in transitions.get(module_id, []):
node = network.nodes[data_id]
assert name in node.attributes
value = node.attributes[name]
x = module_id, name, data_id, value
based_on_data_2.append(x)
# Make sure there are no conflicts.
based_on_data_2.sort()
conflict = False
for i in range(len(based_on_data_2)-1):
module_id1, name1, data_id1, value1 = based_on_data_2[i]
module_id2, name2, data_id2, value2 = based_on_data_2[i+1]
if module_id1 != module_id2 or name1 != name2:
continue
if value1 != value2:
conflict = True
break
if conflict and not missing_ids:
continue
# If this is a DataNode, the transition from prev_id (a
# ModuleNode) to node_id may be invalid. e.g.
# FastqFold (trimmed=no) -> merge_reads -> FastqFold (trimmed=no)
# FastqFold (trimmed=yes) -> -> FastqFold (trimmed=yes)
# If this is an output FastqFolder where trimmed=no, then this
# is only valid if the input FastqFolder (trimmed=no) is part
# of the path.
# Only follow the valid paths.
if isinstance(network.nodes[node_id], DataNode):
if not _is_valid_output_id_path(
network, node_ids, node_id, custom_attributes,
nodeid2parents):
continue
x = node_ids + [node_id]
paths.append(Pathway(x, transitions, start_ids, missing_ids))
# Return the working paths. If any of the paths have no missing
# nodes, then remove all paths with missing nodes.
no_missing = [x for x in paths if not x.missing_ids]
if no_missing:
paths = no_missing
# If there are no working paths, then merge the paths by
# start_ids.
# Be careful not to do this with working paths. Otherwise, this
# may end up merging alternate routes through the network.
# Fastq.compress (unknown) -> is_compressed -> Fastq.compress (yes)
# -> Fastq.compress (no)
schwartz = sorted([(x.start_ids, x) for x in paths])
paths = [x[-1] for x in schwartz]
i = 0
while i < len(paths)-1:
p1, p2 = paths[i], paths[i+1]
if paths[i] == paths[i+1]:
del paths[i+1]
elif not paths[i].missing_ids or not paths[i].missing_ids:
i += 1
continue
elif p1.start_ids == p2.start_ids:
n1, n2 = p1.node_ids, p2.node_ids
m1, m2 = p1.missing_ids, p2.missing_ids
node_ids = {}.fromkeys(n1+n2).keys()
missing_ids = {}.fromkeys(m1+m2).keys()
transitions = p1.transitions.copy()
for n1, n2 in p2.transitions.iteritems():
transitions[n1] = _uniq(transitions.get(n1, []) + n2)
paths[i] = Pathway(
node_ids, transitions, p1.start_ids, missing_ids)
del paths[i+1]
else:
i += 1
return paths
def _find_paths_by_start_ids_h(
network, node_id, custom_attributes, node2startids, nodeid2parents,
depth, cache):
if node_id not in cache:
x = _find_paths_by_start_ids_hh(
network, node_id, custom_attributes, node2startids, nodeid2parents,
depth, cache)
cache[node_id] = x
return cache[node_id]
def find_paths_by_start_ids(network, custom_attributes, node2startids):
# node2startids should be a list of lists indicating the possible
# start_ids for each input node.
#
# This function will search through the network for pipelines that
# start from this and return a list of tuples:
# node_ids list of node IDs in this path.
# transitions node_id -> list of next node IDs
# start_ids list of node IDs, parallel to node2startids
# missing_ids list of node IDs
#
# node_ids is only provided if there are no missing_ids.
nodeid2parents = _make_parents_dict(network)
x = _find_paths_by_start_ids_h(
network, 0, custom_attributes, node2startids, nodeid2parents, [], {})
# One of these may be a trivial pathway [0]. Remove this.
#x = [x for x in x if x[0] != [0]]
x = [x for x in x if x.node_ids != [0]]
return x
def get_input_nodes(
network, custom_attributes, skip_datatypes=None,
skip_private_datatypes=False, max_inputs=None):
# Return a list of tuples of node ids. Each tuple contains a set
# of node IDs that can serve as the inputs to this network.
#
# Example return value:
# [(1, 5), (8,)]
# This means that the set of nodes 1 and 5 would make a valid input
# to this network. Or, node 8 by itself would also be a valid
# input.
# Should be list of names of datatypes to ignore.
skip_datatypes = skip_datatypes or []
data_node_ids = [
node_id for (node_id, node) in enumerate(network.nodes)
if isinstance(node, DataNode)]
# Make list of node_ids to skip.
skip_ids = [
x for x in data_node_ids
if network.nodes[x].datatype.name in skip_datatypes]
if skip_private_datatypes:
x = [x for x in data_node_ids
if network.nodes[x].datatype.name.startswith("_")]
skip_ids.extend(x)
skip_ids = {}.fromkeys(skip_ids)
class nodeid2nodeid_fn:
def __init__(self, skip_ids):
self.skip_ids = skip_ids
def __call__(self, node_id):
if node_id in self.skip_ids:
return None
return node_id
fn = nodeid2nodeid_fn(skip_ids)
nodeid_combos = _product_network(
network, custom_attributes, max_nodes=max_inputs, nodeid2id_fn=fn)
return nodeid_combos
def get_input_datatypes(
network, custom_attributes,
skip_datatypes=None, skip_private_datatypes=False, max_inputs=None):
# Return a list of tuples of datatypes. Each tuple contains a set
# of DataType objects that can serve as the inputs to this
# network.
#
# Example return value:
# [(UnprocessedSignalFile, ClassLabelFile), (UnprocessedSignalFile,)]
# Should be list of names of datatypes to ignore.
skip_datatypes = skip_datatypes or []
data_node_ids = [
node_id for (node_id, node) in enumerate(network.nodes)
if isinstance(node, DataNode)]
# Make list of node_ids to skip.
skip_ids = [
x for x in data_node_ids
if network.nodes[x].datatype.name in skip_datatypes]
if skip_private_datatypes:
x = [x for x in data_node_ids
if network.nodes[x].datatype.name.startswith("_")]
skip_ids.extend(x)
skip_ids = {}.fromkeys(skip_ids)
class nodeid2dtid_fn:
def __init__(self, network, skip_ids):
# Make a list of all the datatypes in the network.
# Datatype ID is index into this list.
id2name = [] # id of datatype -> name of datatype
name2datatype = {} # name of datatype -> DataType
name2id = {} # name of datatype -> id of datatype
for node_id in data_node_ids:
dt = network.nodes[node_id].datatype
name2datatype[dt.name] = dt
id2name = sorted(name2datatype)
for i, dt in enumerate(id2name):
name2id[dt] = i
self.network = network
self.skip_ids = skip_ids
self.name2datatype = name2datatype
self.id2name = id2name
self.name2id = name2id
def __call__(self, node_id):
if node_id in self.skip_ids:
return None
node = self.network.nodes[node_id]
return self.name2id[node.datatype.name]
fn = nodeid2dtid_fn(network, skip_ids)
# Take the product of the data types across the network.
dtid_combos = _product_network(
network, custom_attributes, max_nodes=max_inputs, nodeid2id_fn=fn)
# Convert from ID back to datatypes.
datatype_combos = []
for dtid_combo in dtid_combos:
names = [fn.id2name[x] for x in dtid_combo]
x = tuple([fn.name2datatype[x] for x in names])
datatype_combos.append(x)
return datatype_combos
def group_nodes_by_datatype(network, inputs):
# Take a network and inputs (e.g. from get_inputs) and return a
# dictionary where the keys are tuples of DataTypes and the values
# are the inputs with that pattern of DataTypes.
#
# Example return value:
# {
# (SignalFile,) : [(2,), (4,), (8,)],
# (SignalFile, ClassLabelFile) : [(5, 10)],
# }
# The order of the nodes may not be preserved.
data_node_ids = [
node_id for (node_id, node) in enumerate(network.nodes)
if isinstance(node, DataNode)]
# Make a list of all the datatypes in the network. Datatype ID is
# index into this list.
#all_datatypes = [] # id of datatype -> name of datatype
name2datatype = {} # name of datatype -> DataType
datatype2id = {} # name of datatype -> id of datatype
for node_id in data_node_ids:
dt = network.nodes[node_id].datatype
name2datatype[dt.name] = dt
all_datatypes = sorted(name2datatype)
for i, dt in enumerate(all_datatypes):
datatype2id[dt] = i
# Figure out the datatype for each node.
# Really fast.
nodeid2dtid = {}
for node_id in data_node_ids:
dtid = datatype2id[network.nodes[node_id].datatype.name]
nodeid2dtid[node_id] = dtid
# Figure out the nodes for each data type. Very slow. There's
# lots of different inputs, all unique. There are many fewer
# combinations of datatypes. Thus, iterating over datatypes will
# be much faster.
data = {} # tuple of datatype ids -> tuple of node IDs -> 1
for inp in inputs:
# This line takes ~50% of the time in this function.
x = [nodeid2dtid[x] for x in inp]
x = sorted(x) # Takes ~25% of time.
dtids = tuple(x)
if dtids not in data:
data[dtids] = {}
data[dtids][inp] = 1
# Convert the datatype IDs to objects.
# Relatively fast.
clean = {} # tuple of datatype objects -> list of tuple of node IDs
for dtids, value in data.iteritems():
names = [all_datatypes[x] for x in dtids]
datatypes = tuple([name2datatype[x] for x in names])
clean[datatypes] = value.keys()
return clean
def summarize_moduledb(moduledb):
"""Take a list of ModuleNodes and return a ModuleDbSummary object."""
name2module = {} # module_name -> ModuleNode
for module in moduledb:
assert module.name not in name2module
name2module[module.name] = module
module_names = sorted(name2module)
# module_name -> (list of in Datatypes, out Datatype)
name2datatypes = {}
for name, module in name2module.iteritems():
name2datatypes[name] = module.in_datatypes, module.out_datatype
# All DataType objects found in moduledb.
datatypes = {} # name -> DataType object
for (in_datatypes, out_datatype) in name2datatypes.itervalues():
dts = in_datatypes + [out_datatype]
for dt in dts:
if dt.name in datatypes:
continue
datatypes[dt.name] = dt
datatypes = datatypes.values()
x = ModuleDbSummary(module_names, name2module, name2datatypes, datatypes)
return x
def check_moduledb(moduledb):
seen = {}
for module in moduledb:
assert isinstance(module, ModuleNode)
seen[module.name] = seen.get(module.name, 0) + 1
# Make sure no duplicate modules.
dups = ["%s (%d times)" % (x, seen[x]) for x in seen if seen[x] > 1]
msg = ""
x = dups
if len(x) > 5:
x = x[:5] + ["... plus %s more" % (len(dups) - 5)]
msg = "\n".join(x)
assert not dups, "Duplicate modules: %s" % msg
def print_modules(moduledb):
summary = summarize_moduledb(moduledb)
for name in summary.module_names:
in_datatypes, out_datatype = summary.name2datatypes[name]
in_names = [x.name for x in in_datatypes]
x = ", ".join(in_names)
x = name, x, out_datatype.name
print "\t".join(x)
print
for datatype in summary.datatypes:
print datatype.name
for attr in sorted(datatype.attributes):
values = datatype.attributes[attr]
if type(values) is type(""):
values = [values]
print " %s : %s" % (attr, ", ".join(map(str, values)))
print
def print_network(network, outhandle=None):
outhandle = outhandle or sys.stdout
if type(outhandle) is type(""):
outhandle = open(outhandle, 'w')
line_width = 72
for i in range(len(network.nodes)):
p_step = "%2d. " % i
p_space = " " * (len(p_step) + 2)
_print_line(str(network.nodes[i]), p_step, p_space, line_width,
outhandle=outhandle)
print >>outhandle
for i in sorted(network.transitions):
x = [i, "->"] + network.transitions[i]
print >>outhandle, "\t".join(map(str, x))
def _format_datanode_gv(node, node_id):
from genomicode import parselib
node_name = "%s [%d]" % (node.datatype.name, node_id)
LINE_WIDTH = 60
lines = []
w = lines.append
w("<B>%s</B>" % node_name)
if node.attributes:
w('<BR/>')
w('<BR/>')
w("<U>Data Attributes</U>:")
w('<BR ALIGN="LEFT"/>')
for x in node.attributes.iteritems():
name, value = x
x = "%s = %s" % (name, value)
for x in parselib.linesplit(x, prefix1=0, prefixn=4, width=LINE_WIDTH):
w(x)
w('<BR ALIGN="LEFT"/>')
return "<%s>" % "".join(lines)
def _format_modulenode_gv(node, node_id, options):
node_name = "%s [%d]" % (node.name, node_id)
if options is None:
options = {}
lines = []
w = lines.append
w("<B>%s</B>" % node_name)
if node.option_defs:
w('<BR/>')
w('<BR/>')
w("<U>Module Attributes</U>:")
w('<BR ALIGN="LEFT"/>')
for option in node.option_defs:
name = option.name
value = options.get(name)
if value is None:
value = option.default
if value is None:
x = ' <B><FONT COLOR="RED">MISSING</FONT></B>'
elif value:
x = " = %s" % value
else:
x = ""
w("%s%s" % (name, x))
w('<BR ALIGN="LEFT"/>')
return "<%s>" % "".join(lines)
def plot_network_gv(
filename, network, options=None, bold=[], bold_transitions=[],
highlight_green=[], highlight_orange=[], highlight_purple=[],
highlight_yellow=[],
verbose=False):
# bold List of node IDs to bold.
# highlight[1-2] List of node IDs to highlight.
# bold_transitions List of tuples (node_id1, node_id2)
from genomicode import graphviz
gv_nodes = []
gv_edges = []
gv_node2attr = {}
gv_edge2attr = {}
bold_width = 3
#bold_color = "#960308"
bold_color = "#CD1A1C"
#data_color = "#EEEEEE"
#module_color = "#80E0AA"
# Brewer brbg-div. Looks OK.
#data_color = "#E0C37E"
#module_color = "#80CDC2"
# Brewer bugn-seq. Looks OK.
#data_color = "#EEF9FC"
#module_color = "#99D9CA"
# Brewer blues-seq. Best.
data_color = "#F0F4FF"
module_color = "#BFD4E7"
# Brewer qualitative.
#highlight_color = "#FDAF91" # red
#highlight1_color = "#E51517" # red # set1
#highlight2_color = "#50B24E" # green
#highlight3_color = "#F97808" # orange
#highlight1_color = "#63C2A4" # green # set2
#highlight2_color = "#FB875D" # orange
#highlight3_color = "#50B24E" # purple
highlight_green_color = "#B4E2CE" # green # pastel
highlight_orange_color = "#FACEAE" # orange
highlight_purple_color = "#F5CAE4" # purple
highlight_yellow_color = "#FEF2AD" # yellow
# Brewer rdbu-div. Too much contrast.
#data_color = "#F5A582"
#module_color = "#92C6DF"
# Brewer piyg-div. Too pastel
#data_color = "#F2B7DB"
#module_color = "#B8E286"
id2name = {}
for node_id, node in enumerate(network.nodes):
node2attr = {}
node2attr["style"] = "filled"
node2attr["penwidth"] = "1"
if node.__class__.__name__ == "DataNode":
name = node.datatype.name
#node2attr["shape"] = "note"
node2attr["shape"] = "box"
node2attr["style"] = "rounded,filled"
node2attr["fillcolor"] = data_color
if verbose:
node2attr["label"] = _format_datanode_gv(node, node_id)
elif node.__class__.__name__ == "ModuleNode":
name = node.name
node2attr["shape"] = "box"
node2attr["fillcolor"] = module_color
if verbose:
node2attr["label"] = _format_modulenode_gv(
node, node_id, options)
else:
raise AssertionError
#if node_id in highlight_node1:
# node2attr["fillcolor"] = highlight_color
if node_id in bold:
node2attr["penwidth"] = bold_width
node2attr["color"] = bold_color
if node_id in highlight_yellow:
node2attr["fillcolor"] = highlight_yellow_color
elif node_id in highlight_green:
node2attr["fillcolor"] = highlight_green_color
elif node_id in highlight_orange:
node2attr["fillcolor"] = highlight_orange_color
elif node_id in highlight_purple:
node2attr["fillcolor"] = highlight_purple_color
node_name = "%s [%d]" % (name, node_id)
id2name[node_id] = node_name
gv_nodes.append(node_name)
gv_node2attr[node_name] = node2attr
for node_id, next_ids in network.transitions.iteritems():
for next_id in next_ids:
edge2attr = {}
x1 = id2name[node_id]
x2 = id2name[next_id]
#if node_id in bold and next_id in bold:
if (node_id, next_id) in bold_transitions:
edge2attr["penwidth"] = bold_width
edge2attr["color"] = bold_color
gv_edges.append((x1, x2))
if edge2attr:
gv_edge2attr[(x1, x2)] = edge2attr
G = graphviz.make_graph(
gv_nodes, gv_edges, node2attributes=gv_node2attr,
edge2attributes=gv_edge2attr, prog="dot", directed=True)
G.draw(filename)
#G.write("test.gv")
def read_network(file_or_handle):
import json
handle = file_or_handle
if type(handle) is type(""):
handle = open(file_or_handle, 'r')
text = handle.read()
network = json.loads(text, object_hook=_dict_to_object)
return network
def write_network(file_or_handle, network):
import json
handle = file_or_handle
if type(handle) is type(""):
handle = open(file_or_handle, 'w')
json.dump(network, handle, default=_object_to_dict, indent=2)
def debug_print(s):
from genomicode import parselib
if not DEBUG:
return
parselib.print_split(s)
def _bc_to_modules(moduledb, out_data):
# Return list of modules that can generate an output that is
# compatible with data.
modules = [] # list of (module, num compatible attributes)
for module in moduledb:
if _is_valid_output(module, out_data):
modules.append(module)
return modules
def _bc_to_inputs(
module, out_data, custom_attributes,
force_default_input_attribute_to_be_all_values=False):
all_attributes = []
all_attrsource = []
force = force_default_input_attribute_to_be_all_values
for in_num in range(len(module.in_datatypes)):
x = _bc_to_one_input(
module, in_num, out_data, custom_attributes,
force_default_input_attribute_to_be_all_values=force)
attributes, attrsource = x
all_attributes.append(attributes)
all_attrsource.append(attrsource)
# Handle the SAME_AS constraints here. Can't do in
# _bc_to_one_input because these constraints require the
# comparison of multiple inputs.
for constraint in module.constraints:
if constraint.behavior != SAME_AS:
continue
# BUG: Need to handle user attributes with SAME_AS.
# If this constraint is the SAME_AS another one, then use the
# value of the copied constraint.
i_src = constraint.arg1
i_dst = constraint.input_index
assert i_src < len(module.in_datatypes)
assert i_dst < len(module.in_datatypes)
#input_src = all_inputs[i_src]
#input_dst = all_inputs[i_dst]
attr_src = all_attributes[i_src]
attr_dst = all_attributes[i_dst]
name = constraint.name
assert name in attr_src, \
"Invalid attribute %s (%s:%s)" % (
name, module.name, module.in_datatypes[i_src].name)
assert name in attr_dst, \
"Invalid attribute %s (%s:%s)" % (
name, module.name, module.in_datatypes[i_dst].name)
attr_dst[name] = attr_src[name]
all_attrsource[i_dst][name] = "SAME_AS,%d" % (i_src)
# Make the input objects.
all_inputs = []
for in_num in range(len(module.in_datatypes)):
in_datatype = module.in_datatypes[in_num]
out_datatype = out_data.datatype
attributes = all_attributes[in_num]
attrsource = all_attrsource[in_num]
x = in_datatype.output(**attributes)
all_inputs.append(x)
# Optimization: Don't call debug_print and sorted.
if not DEBUG:
continue
debug_print(
"Backchaining %s (input=%d) -> %s -> %s." %
(in_datatype.name, in_num, module.name, out_datatype.name))
#debug_print("Generating a %s with attributes:" % in_datatype.name)
for name in sorted(attributes):
debug_print(" %s=%s (%s)" %
(name, attributes[name], attrsource[name]))
return all_inputs
def _bc_to_one_input(module, in_num, out_data, custom_attributes,
force_default_input_attribute_to_be_all_values=False):
# Given a module and output_data, return the input_data object
# that can generate the output. This should only be called by
# _bc_to_inputs. The SAME_AS constraint is handled
# there. If this is called by itself, the attributes might not be
# right.
#
# force_default_input_attribute_to_be_all_values can be useful
# when checking for the possibility that an input node can go into
# a module. However, it should be False when generating the
# network to prevent combinatorial explosion.
assert in_num < len(module.in_datatypes)
in_datatype = module.in_datatypes[in_num]
#out_datatype = out_data.datatype
# Can't generate debug messages here because the SAME_AS
# constraints aren't handled in this function.
# The attributes for the input object should come from (in
# decreasing priority):
# 1. Consequence (i.e. SAME_AS_CONSTRAINT).
# 2. Constraint.
# 3. user attribute
# 4. out_data (default_attributes_from)
# 5. default output value of input datatype
#
# If the user attribute conflicts with a Constraint, the
# Constraint is higher priority to make sure no objects are
# generated that the module cannot handle. However, if the user
# attribute or default value is a part of the constraint,
# (e.g. one of many options), then we can refine it with the user
# attribute (or default).
#
# The user attribute also only applies to the first time this node
# is generated. E.g.
# Bam_1.recal=no -> create_targets -> RealignTarget ->
# Bam_2.recal=no -> recal -> Bam_3.recal=yes -> call_variants
# If the user has requested a specific value for recal, then it
# only applies to Bam_3.
#
# Consequence (SAME_AS_CONSTRAINT) is higher priority than
# constraint because it indicates a more specific value than the
# constraint. e.g.:
# Constraint("quantile_norm", CAN_BE_ANY_OF, ["no", "yes"])
# Consequence("quantile_norm", SAME_AS_CONSTRAINT, 0)
# Start with empty attributes.
attributes = {}
# Keep track of the source of the attribute, for debugging.
attrsource = {} # attribute name -> source
# Set the attributes in increasing order of priority. Higher
# priority overwrites lower priority.
# Case 5. Set values from defaults.
#
# Using default attributes makes things a lot simpler and
# mitigates combinatorial explosion. However, it can close up
# some possibilities. What if the value should be something other
# than the default?
for attrdef in in_datatype.attribute_defs.itervalues():
default = attrdef.default_out
if DEFAULT_INPUT_ATTRIBUTE_IS_ALL_VALUES or \
force_default_input_attribute_to_be_all_values:
default = attrdef.values
attributes[attrdef.name] = default
attrsource[attrdef.name] = "default"
# Case 4. If default_attributes_from is the same as in_num, then
# fill with the same values as the out_data.
indexes = [x.input_index for x in module.default_attributes_from]
if in_num in indexes:
for name, value in out_data.attributes.iteritems():
attributes[name] = value
attrsource[name] = "out_data"
# Case 3. If the input data object does not proceed to the output
# data object, then use the attribute provided by the user.
# Only applies for lowest time this data object is seen in the
# network (see above).
# XXX NOT IMPLEMENTED YET!
#attrs = {}
x = [x for x in module.default_attributes_from if x.input_index == in_num]
if not x:
# Look for relevant user attributes.
#for attr in custom_attributes:
# # Ignore attributes for other data types.
# if attr.datatype.name != in_datatype.name:
# continue
# attrs[attr.name] = attr.value
# Set values from user attributes.
for attr in custom_attributes:
# Ignore attributes for other data types.
if attr.datatype.name != in_datatype.name:
continue
attributes[attr.name] = attr.value
attrsource[attr.name] = "user"
# Case 2. Set the attributes based on the constraints.
for constraint in module.constraints:
if constraint.input_index != in_num:
continue
if constraint.behavior == MUST_BE:
attributes[constraint.name] = constraint.arg1
attrsource[constraint.name] = "constraint"
elif constraint.behavior == CAN_BE_ANY_OF:
value = constraint.arg1
source = "constraint"
# If the user specified an attribute, then refine by it.
# Refine by default value?
# o If YES: network may suggest that only the default for
# an attribute is acceptable, when other values would
# work.
# o If NO: may generate large network.
#if attrsource.get(constraint.name) in ["user", "default"]:
if attrsource.get(constraint.name) in ["user"]:
x = _get_attribute_type(attributes[constraint.name])
if x == TYPE_ATOM:
if attributes[constraint.name] in value:
value = attributes[constraint.name]
source = "constraint,%s" % attrsource[constraint.name]
elif x == TYPE_ENUM:
x = _intersect(attributes[constraint.name], value)
if x:
value = x
source = "constraint,%s" % attrsource[constraint.name]
else:
raise AssertionError
attributes[constraint.name] = value
attrsource[constraint.name] = source
elif constraint.behavior == SAME_AS:
# Handled in _bc_to_inputs.
pass
else:
raise AssertionError
# Case 1. Set the attributes based on the consequences. If there
# is a Consequence that is SAME_AS_CONSTRAINT, then the attribute
# should be determined by the out_data. e.g.
# Constraint("quantile_norm", CAN_BE_ANY_OF, ["no", "yes"])
# Consequence("quantile_norm", SAME_AS_CONSTRAINT)
#
# The module takes anything, produces the same value. So the
# backchainer needs to preserve the value from the out_data.
# Get SAME_AS_CONSTRAINT. Nothing to do for SET_TO,
# SET_TO_ONE_OF, BASED_ON_DATA.
x = [x for x in module.consequences if x.behavior == SAME_AS_CONSTRAINT]
# Get the consequences that are based on this datatype.
x = [x for x in x if x.arg1 == in_num]
consequences = x
for consequence in consequences:
n = consequence.name
source = "consequence"
# Copy the value from the output data.
data_value = out_data.attributes[n]
data_type = _get_attribute_type(data_value)
# Since more values may be allowed in the consequence,
# further refine based on the constraint. E.g.:
# Constraint [A, B]
# Consequence [A, B, C, D]
x = [x for x in module.constraints if x.name == n]
x = [x for x in x if x.input_index == in_num]
assert len(x) > 0
assert len(x) == 1
constraint = x[0]
if constraint.behavior == SAME_AS:
constraint = _resolve_constraint(constraint, module.constraints)
if constraint.behavior == MUST_BE:
# constraint.arg1 <value>
if data_type == TYPE_ATOM:
assert constraint.arg1 == data_value
elif data_type == TYPE_ENUM:
assert constraint.arg1 in data_value
data_value = constraint.arg1
source = "consequence+constraint"
else:
raise AssertionError
elif constraint.behavior == CAN_BE_ANY_OF:
# constraint.arg1 list of <values>
if data_type == TYPE_ATOM:
assert data_value in constraint.arg1
elif data_type == TYPE_ENUM:
common = _intersect(constraint.arg1, data_value)
assert common
data_value = common
source = "consequence+constraint"
else:
raise AssertionError
else:
raise AssertionError
attributes[n] = data_value
attrsource[n] = source
return attributes, attrsource
def _bc_to_input_ids(
network, module_id, custom_attributes,
all_input_ids=None, all_output_ids=None, nodeid2parents=None):
# Return a list of tuples of input_ids that match the input
# datatypes of the module. Checks the datatypes and makes sure
# that this combination of data can generate an output in the
# network.
# all_input_ids and all_output_ids can be used to restrict the
# search to specific input or output IDs.
import itertools
if not nodeid2parents:
nodeid2parents = _make_parents_dict(network)
assert module_id in nodeid2parents
if all_input_ids is None:
all_input_ids = nodeid2parents[module_id]
if all_output_ids is None:
all_output_ids = network.transitions.get(module_id, [])
# In about 97% of the cases, the module only has 1 datatype. In
# ~90% of the cases, there are 2 input IDs.
module = network.nodes[module_id]
# For each in_datatype, find all the data objects that match this
# type.
args = [] # list of list node IDs. Parallel to module.in_datatypes
for datatype in module.in_datatypes:
x = [x for x in all_input_ids
if network.nodes[x].datatype.name == datatype.name]
args.append(x)
# If there are many in_datatypes, then this could cause a
# combinatorial explosion of possibilities. Optimize by throwing
# out data objects that fail a constraint.
if len(args) > 1:
for i in range(len(args)):
x = args[i]
x = [x for x in x if _is_valid_input_i_net(
network, module_id, i, network.nodes[x], custom_attributes)]
args[i] = x
valid = []
# This doesn't work. The same module can have multiple inputs and
# outputs.
## Optimization: Assume existing inputs in the network are valid
## and don't check them again.
## Fastq.trimmed (no) -> is_compressed -> Fastq.trimmed (no)
## Fastq.trimmed (yes) -> is_compressed -> Fastq.trimmed (yes)
#if len(module.in_datatypes) == 1:
# ids = nodeid2parents.get(module_id, [])
# x = [(x, ) for x in ids]
# valid.extend(x)
# The multiple in_datatypes case is harder, because we don't know
# the order of the inputs.
for input_ids in itertools.product(*args):
assert len(input_ids) == len(module.in_datatypes)
# Don't check again if already done.
if input_ids in valid:
continue
# Actually, duplicated IDs are OK. Some modules may accept
# the same data type twice. Usually, this is a bug in the
# rules, but we should allow it.
# Why allow it?
# No duplicated IDs.
x = {}.fromkeys(input_ids)
if len(x) != len(input_ids):
continue
# Make sure the inputs are compatible with the module.
input_datas = [network.nodes[x] for x in input_ids]
if not _is_valid_inputs_net(
network, module_id, input_datas, custom_attributes):
continue
# Make sure the outputs are compatible with the module.
# Can't use _fc_to_output_ids--recursive.
output_datas = _fc_to_outputs(module, input_datas)
for x in itertools.product(all_output_ids, output_datas):
out_id, output_data = x
if _is_data_compatible(output_data, network.nodes[out_id]):
break
else:
continue
# Passes all the tests.
valid.append(input_ids)
return valid
def _fc_to_outputs(module, in_datas):
# Generate a list of DataNode objects that can be generated from
# module and in_datas. Multiple objects can be generated because
# the consequences can vary. E.g. center_genes can set
# gene_center to either "mean" or "median". It can be either, but
# must be one of them.
import itertools
# Check the input variables.
assert len(module.in_datatypes) == len(in_datas), module.name
for i in range(len(module.in_datatypes)):
assert in_datas[i].datatype.name == module.in_datatypes[i].name
# Assume that in_datas fulfill the constraints of the module.
datatype = module.out_datatype
attributes = {}
# Priorities (in increasing order):
# 1. Default values
# 2. Constraints
# Set the default values.
# Set the attributes based on the default values.
# Case 1: default_attributes_from is given.
# Use the attributes from this default.
# Case 2: Fill with the default input values of the output
# datatype.
# Case 1.
if module.default_attributes_from:
# If there are multiple default_attributes_from, just use the
# first one.
# BUG: Is this always the right thing to do?
input_index = module.default_attributes_from[0].input_index
assert input_index < len(in_datas)
data = in_datas[input_index]
assert data.datatype.name == datatype.name
attributes.update(data.attributes)
# Case 2.
else:
for attrdef in datatype.attribute_defs.itervalues():
attributes[attrdef.name] = attrdef.default_in
# Set the constraints.
# Set the attributes based on the consequences of the module.
possibilities = {}
for cons in module.consequences:
if cons.behavior == SET_TO:
attributes[cons.name] = cons.arg1
elif cons.behavior == SET_TO_ONE_OF:
possibilities[cons.name] = cons.arg1
elif cons.behavior == BASED_ON_DATA:
possibilities[cons.name] = cons.arg1
elif cons.behavior == SAME_AS_CONSTRAINT:
input_index = cons.arg1
data = in_datas[input_index]
attributes[cons.name] = data.attributes[cons.name]
else:
raise AssertionError
# If no possibilities, then make one output variable.
if not possibilities:
x = DataNode.__new__(DataNode)
x.datatype = datatype
x.attributes = attributes.copy()
return [x]
names = sorted(possibilities)
args = [possibilities[x] for x in names]
outputs = []
for values in itertools.product(*args):
for key, value in zip(names, values):
attributes[key] = value
# Optimization: DataNode.__init__ is very expensive
# because of all the checks. Skip the checks and
# instantiate the class directly.
#x = DataNode(datatype, **attributes)
x = DataNode.__new__(DataNode)
x.datatype = datatype
x.attributes = attributes.copy()
outputs.append(x)
return outputs
def _fc_to_output_ids(
network, module_id, custom_attributes,
all_input_ids=None, all_output_ids=None, nodeid2parents=None):
import itertools
# Return a list of (in_data_ids, module_id, out_data_id) that can be
# generated by this module.
combos = _bc_to_input_ids(
network, module_id, custom_attributes, all_input_ids=all_input_ids,
all_output_ids=all_output_ids, nodeid2parents=nodeid2parents)
paths = []
for in_data_ids in combos:
# For a given combo and module, find the children it
# generates.
in_datas = [network.nodes[x] for x in in_data_ids]
output_data_ids = network.transitions[module_id]
if all_output_ids is not None:
output_data_ids = [
x for x in output_data_ids if x in all_output_ids]
output_datas = _fc_to_outputs(network.nodes[module_id], in_datas)
for x in itertools.product(output_data_ids, output_datas):
out_data_id, output_data = x
if _is_data_compatible(network.nodes[out_data_id], output_data):
x = in_data_ids, module_id, out_data_id
paths.append(x)
return paths
def _resolve_constraint(constraint, all_constraints):
# If this should be the same as another constraint, then check the
# other constraint.
# CONSEQUENCE NAME
# CONSTRAINT 1 NAME CAN_BE_AN_OF
# CONSTRAINT 2 NAME SAME_AS 1
# CONSTRAINT 3 NAME SAME_AS 2
#
# Given CONSTRAINT_2 or CONSTRAINT_3, return CONSTRAINT_1 (the one
# that has the actual value.
const = constraint
assert const.behavior == SAME_AS
assert const.arg1 != const.input_index
#assert const.arg1 < module.in_datatypes
while const.behavior == SAME_AS:
#x = [x for x in module.constraints if x.name == const.name]
x = [x for x in all_constraints if x.name == const.name]
x = [x for x in x if x.input_index == const.arg1]
assert len(x) > 0, (
"%r SAME_AS %d, but datatype %d has no constraint on %r." %
(const.name, const.arg1, const.arg1, const.name))
assert len(x) == 1
const = x[0]
return const
def _is_valid_inputs(module, in_datas, out_data, custom_attributes):
# Return True/False if a module can take in_datas (list of
# DataNode nodes) as input.
assert len(in_datas) == len(module.in_datatypes)
all_inputs = _bc_to_inputs(
module, out_data, custom_attributes,
force_default_input_attribute_to_be_all_values=True)
for i in range(len(in_datas)):
if not _is_data_compatible(in_datas[i], all_inputs[i]):
return False
return True
def _is_valid_input_i(module, input_num, in_data, out_data, custom_attributes):
assert input_num < len(module.in_datatypes)
all_inputs = _bc_to_inputs(
module, out_data, custom_attributes,
force_default_input_attribute_to_be_all_values=True)
return _is_data_compatible(in_data, all_inputs[input_num])
def _is_valid_inputs_net(network, module_id, in_datas, custom_attributes):
module = network.nodes[module_id]
# If in_datas is compatible with any of the out_datas, then return
# True.
out_data_ids = network.transitions.get(module_id, [])
out_datas = [network.nodes[x] for x in out_data_ids]
for out_data in out_datas:
if _is_valid_inputs(module, in_datas, out_data, custom_attributes):
return True
return False
def _is_valid_input_i_net(
network, module_id, input_num, in_data, custom_attributes):
module = network.nodes[module_id]
# If in_datas is compatible with any of the out_datas, then return
# True.
# No. This doesn't happen with new inference engine.
## Subtle corner case.
## Fastq.trimmed (yes, no) -> merge_reads -> Fastq.trimmed (no)
## Fastq.trimmed (yes, no) -> merge_reads -> Fastq.trimmed (yes)
## Will test in_data against each of the outputs. Since in_data is
## a superset of each output, will erroneously say that module
## cannot take this input.
## Solution: merge the outputs when possible.
out_data_ids = network.transitions.get(module_id, [])
out_datas = [network.nodes[x] for x in out_data_ids]
for out_data in out_datas:
if _is_valid_input_i(
module, input_num, in_data, out_data, custom_attributes):
return True
return False
def _is_valid_input_ids(
network, module_id, in_data_ids, out_id, custom_attributes,
nodeid2parents):
# Optimization: Only check for modules that can generate two
# or more outputs.
#next_ids = network.transitions.get(module_id, [])
#if len(next_ids) <= 1:
# return True
if _bc_to_input_ids(
network, module_id, custom_attributes, all_input_ids=in_data_ids,
all_output_ids=[out_id], nodeid2parents=nodeid2parents):
return True
return False
def _is_valid_output(module, data):
# Return whether this module can produce this data object.
# A module cannot produce this data if:
# - The module's output data type is not the same as the data.
# - One or more of the consequences conflict.
# - An input does not go into the output, and a user_attribute
# doesn't match a constraint. OBSOLETE. Constraint now takes
# precedence over user_attribute. So user_attribute doesn't
# matter.
# - An input does not go into the output, and the data attribute
# doesn't match the (in) defaults of the output data type.
#
# A module can produce this data if:
# - An consequence (SET_TO, SET_TO_ONE_OF, BASED_ON_DATA) that is not
# a side effect has a value that matches the value of the data.
# - The module only converts the datatype. There are no
# consequences (SET_TO, SET_TO_ONE_OF, BASED_ON_DATA), and the
# output data type has no attributes.
# e.g. download_geo_GSEID gseid -> expression_files (no attributes)
#
# These rules need to match the policies in _bc_to_one_input.
# If this module doesn't produce the same data type, then it can't
# produce this data object.
if module.out_datatype.name != data.datatype.name:
#debug_print(
# "ModuleNode can't generate data type: %s." % data.datatype.name)
return False
debug_print("Testing if module %s can produce data %s." %
(repr(module.name), str(data)))
# If any of the consequences conflict, then the module can't produce
# this data object.
for consequence in module.consequences:
assert consequence.name in data.attributes
data_value = data.attributes[consequence.name]
data_type = _get_attribute_type(data_value)
assert data_type in [TYPE_ATOM, TYPE_ENUM]
if consequence.behavior == SET_TO:
outc_value = consequence.arg1
outc_type = TYPE_ATOM
elif consequence.behavior in [SET_TO_ONE_OF, BASED_ON_DATA]:
outc_value = consequence.arg1
outc_type = TYPE_ENUM
elif consequence.behavior == SAME_AS_CONSTRAINT:
# Get the value from the constraint.
datatype_index = consequence.arg1
assert type(datatype_index) is type(0)
assert datatype_index < len(module.in_datatypes)
x = [x for x in module.constraints if x.name == consequence.name]
x = [x for x in x if x.input_index == datatype_index]
assert len(x) == 1
constraint = x[0]
# If this should be the same as another constraint, then
# check the other constraint.
if constraint.behavior == SAME_AS:
assert constraint.arg1 < len(module.in_datatypes)
constraint = _resolve_constraint(
constraint, module.constraints)
#x = [x for x in module.constraints
# if x.name == consequence.name]
#x = [x for x in x if x.input_index == constraint.arg1]
#assert len(x) == 1
#constraint = x[0]
if constraint.behavior == MUST_BE:
outc_value = constraint.arg1
outc_type = TYPE_ATOM
elif constraint.behavior == CAN_BE_ANY_OF:
outc_value = constraint.arg1
outc_type = TYPE_ENUM
else:
raise NotImplementedError, constraint.behavior
else:
raise AssertionError
assert data_type in [TYPE_ATOM, TYPE_ENUM]
assert outc_type in [TYPE_ATOM, TYPE_ENUM]
case = _assign_case_by_type(data_type, outc_type)
if case == 1:
if data_value != outc_value:
msg = ("Consequence '%s' requires '%s', "
"but data contains '%s'." %
(consequence.name, outc_value, data_value))
debug_print(msg)
return False
elif case == 2:
# ModuleNode can produce any of a list of values. Check
# if the data's value can be produced by the module.
if data_value not in outc_value:
debug_print("Consequence %s conflicts." % consequence.name)
return False
elif case == 3:
# ModuleNode produces a specific value. DataNode could be
# one of many values.
if outc_value not in data_value:
debug_print("Consequence %s conflicts." % consequence.name)
return False
elif case == 4:
if not _intersect(data_value, outc_value):
debug_print("Consequence %s conflicts." % consequence.name)
return False
else:
raise AssertionError
# Make sure the module's constraints are aligned with the
# user_attributes. THIS IS OBSOLETE. Constraint now takes
# precedence over user_attribute. So user_attribute doesn't
# matter.
# Get a list of the in_datatypes that don't continue into the
# out_datatype.
#indexes = [x.input_index for x in module.default_attributes_from]
#for i in range(len(module.in_datatypes)):
# if i in indexes:
# # The values from this datatype should be passed through.
# # The user attributes does not apply.
# continue
#
# user_attrs = [
# x for x in user_attributes
# if x.datatype == module.in_datatypes[i]]
# for attr in user_attrs:
# x = [x for x in module.constraints if x.input_index == i]
# x = [x for x in x if x.name == attr.name]
# constraints = x
#
# for cons in constraints:
# if cons.behavior == MUST_BE:
# if attr.value != cons.arg1:
# debug_print(
# "Consequence %s conflicts with user attribute." %(
# cons.name))
# return False
# elif cons.behavior == CAN_BE_ANY_OF:
# if attr.value not in cons.arg1:
# debug_print(
# "Consequence %s conflicts with user attribute." %(
# cons.name))
# return False
# elif cons.behavior == SAME_AS:
# # No conflict with user_attribute.
# pass
# else:
# raise AssertionError
# If the module converts the datatype, and no
# DefaultAttributesFrom is specified, then the data should match
# the (in) defaults from the output data type.
if not module.default_attributes_from:
debug_print(
"ModuleNode converts datatype. Checking default attributes.")
consequence_names = [x.name for x in module.consequences]
for attrdef in module.out_datatype.attribute_defs.itervalues():
# Ignore the attributes that have consequences.
if attrdef.name in consequence_names:
debug_print(
"Attr %r: Skipping--has consequence." % attrdef.name)
continue
assert attrdef.name in data.attributes
data_value = data.attributes[attrdef.name]
data_type = _get_attribute_type(data_value)
assert data_type in [TYPE_ATOM, TYPE_ENUM]
if data_type == TYPE_ATOM:
if attrdef.default_in != data_value:
debug_print("Attr %r: Conflicts (module %r, data %r)." %
(attrdef.name, attrdef.default_in, data_value))
return False
elif data_type == TYPE_ENUM:
if attrdef.default_in not in data_value:
debug_print("Attr %r: Conflicts (module %r, data %r)." %
(attrdef.name, attrdef.default_in, data_value))
return False
else:
raise AssertionError
debug_print("Attr %r: matches defaults." % attrdef.name)
# TESTING.
# If the module converts the datatype, the consequences don't
# conflict, and the default attributes don't conflict, then this
# should match.
if not module.default_attributes_from:
debug_print("Match because of converting datatype.")
return True
# At this point, the module produces this datatype and there are
# no conflicts. Look for an consequence that is not a side effect
# that changes the value of an output attribute.
for consequence in module.consequences:
if consequence.name not in data.attributes:
continue
if consequence.side_effect:
continue
if consequence.behavior in [SET_TO, SET_TO_ONE_OF, BASED_ON_DATA]:
debug_print("Consequence '%s' matches." % consequence.name)
return True
assert consequence.behavior == SAME_AS_CONSTRAINT
# If the value of the output attribute is the same as the
# input attribute, then this does not change the data.
#
# If:
# - this consequence refers to a different object, and
# - there is a constraint that refers to the same object with
# a different value,
# then this is a match.
#
# Example:
# [GeneListFile, SignalFile] -> SignalFile
# Constraint("gene_order", CAN_BE_ANY_OF, ["pvalue", "fdr"], 0)
# Constraint("gene_order", MUST_BE, "no", 1)
# Consequence("gene_order", SAME_AS_CONSTRAINT, 0)
# Make sure it refers to a different object.
indexes = [x.input_index for x in module.default_attributes_from]
if consequence.arg1 in indexes:
continue
# Find the constraint that goes with this consequence.
x = [
x for x in module.constraints
if x.name == consequence.name and x.input_index == consequence.arg1
]
assert len(x) == 1
const1 = x[0]
# Find the constraint that refers to the same object.
x = [x for x in module.constraints
if x.name == consequence.name and x.input_index in indexes]
if not x:
continue
# If there are multiple constraints, make sure they have the
# same values.
if len(x) >= 2:
raise NotImplementedError
const2 = x[0]
# Follow SAME_AS.
if const1.behavior == SAME_AS:
const1 = _resolve_constraint(const1, module.constraints)
if const2.behavior == SAME_AS:
const2 = _resolve_constraint(const2, module.constraints)
#while const1.behavior == SAME_AS:
# x = [x for x in module.constraints
# if x.name == consequence.name and
# x.input_index == const1.arg1]
# assert len(x) == 1
# const1 = x[0]
#while const2.behavior == SAME_AS:
# x = [x for x in module.constraints
# if x.name == const2.name and x.input_index == const2.arg1]
# assert len(x) == 1
# const2 = x[0]
assert const1.behavior in [MUST_BE, CAN_BE_ANY_OF]
assert const2.behavior in [MUST_BE, CAN_BE_ANY_OF]
if (const1.behavior, const2.behavior) == (MUST_BE, MUST_BE):
if const1.arg1 != const2.arg1:
debug_print("Consequence '%s' matches." % consequence.name)
return True
elif (const1.behavior, const2.behavior) == (MUST_BE, CAN_BE_ANY_OF):
if const1.arg1 not in const2.arg1:
debug_print("Consequence '%s' matches." % consequence.name)
return True
elif (const1.behavior, const2.behavior) == (CAN_BE_ANY_OF, MUST_BE):
if const2.arg1 not in const1.arg1:
debug_print("Consequence '%s' matches." % consequence.name)
return True
elif (const1.behavior, const2.behavior) == \
(CAN_BE_ANY_OF, CAN_BE_ANY_OF):
if not _intersect(const1.arg1, const2.arg1):
debug_print("Consequence '%s' matches." % consequence.name)
return True
else:
raise AssertionError
# No conflicts, and the module has no consequences.
if not module.consequences:
debug_print("Match because there are no consequences.")
return True
# No consequences match.
debug_print("No consequences match.")
return False
def _is_valid_output_id_path(network, path, out_id, custom_attributes,
nodeid2parents):
# Can the nodes in this pathway produce out_id.
# Find the modules that can produce out_id
x = nodeid2parents.get(out_id, [])
module_ids = [x for x in x if x in path]
assert module_ids
for module_id in module_ids:
x = nodeid2parents.get(module_id, [])
prev_ids = [x for x in x if x in path]
if _is_valid_input_ids(
network, module_id, prev_ids, out_id, custom_attributes,
nodeid2parents):
return True
return False
def _find_same_data(nodes, node):
# All values need to be exactly equal. Return index into nodes.
# -1 if not found.
assert isinstance(node, DataNode)
for i, n in enumerate(nodes):
if not isinstance(n, DataNode):
continue
if n.datatype.name != node.datatype.name:
continue
attr1 = n.attributes
attr2 = node.attributes
is_equal = True
for key in attr1:
assert key in attr2
value1 = attr1[key]
value2 = attr2[key]
if not _is_attribute_same(value1, value2):
is_equal = False
break
if is_equal:
return i
return -1
def _find_compat_data(network, data_node, ids_to_score=None):
# Return a list of node_ids that match the user_data data node exactly.
scores = _score_compat_data(network, data_node, ids_to_score=ids_to_score)
x = [x for x in scores if x[0] == 0]
node_ids = [x[1] for x in x]
return node_ids
def _score_same_data(node1, node2):
# Return a tuple (score, has_same_datatype, list of attributes with
# different values). If the two nodes have a different datatype,
# then the attributes will be None. The score is the number of
# attributes that are different. It is 99 if the datatypes are
# different.
if node1.datatype.name != node2.datatype.name:
return (99, False, None)
attrs = []
for name in node1.attributes:
V1 = node1.attributes[name]
V2 = node2.attributes[name]
if not _is_attribute_same(V1, V2):
attrs.append(name)
return (len(attrs), True, attrs)
def _score_compat_data(network, data_node, ids_to_score=None):
# Return a list of (score, node_id, data_node, list of (name,
# value in network node, value in user_data). Sorted by
# increasing score. ids_to_score is a list of node IDs in the
# network to score. If None, will score them all.
# Look for the nodes in the network that are compatible with
# user_data.
results = []
for node_id, next_ids in network.iterate(node_class=DataNode):
if ids_to_score is not None and node_id not in ids_to_score:
continue
netw_node = network.nodes[node_id]
if netw_node.datatype.name != data_node.datatype.name:
continue
# Look for incompatible attributes.
netw_attr = netw_node.attributes
data_attr = data_node.attributes
attrs = []
for key in netw_attr:
assert key in data_attr
netw_value = netw_attr[key]
data_value = data_attr[key]
if not _is_attribute_compatible(data_value, netw_value):
attrs.append(key)
attr_values = []
for attr in attrs:
x = attr, netw_attr[attr], data_attr[attr]
attr_values.append(x)
x = len(attrs), node_id, data_node, attr_values
results.append(x)
return sorted(results)
def _is_data_same(data_1, data_2):
x = _score_same_data(data_1, data_2)
score, has_same_datatype, diff_attributes = x
return score == 0
def _is_data_compatible(data_specific, data_general):
# Return boolean indicating whether data_specific is compatible
# with data_general.
data_s, data_g = data_specific, data_general
if data_s.datatype.name != data_g.datatype.name:
return False
assert len(data_s.attributes) == len(data_g.attributes)
assert sorted(data_s.attributes) == sorted(data_g.attributes)
for name in data_s.attributes:
s_value = data_s.attributes[name]
g_value = data_g.attributes[name]
if not _is_attribute_compatible(s_value, g_value):
return False
return True
def _is_attribute_same(values1, values2):
# CASE N1_TYPE N2_TYPE RESULT
# 1 ATOM ATOM OK if ATOM equal.
# 2 ATOM ENUM No.
# 3 ENUM ATOM No.
# 4 ENUM ENUM OK if ENUM equal.
type1 = _get_attribute_type(values1)
type2 = _get_attribute_type(values2)
case = _assign_case_by_type(type1, type2)
if case == 1:
if values1 == values2:
return True
elif case == 4:
if sorted(values1) == sorted(values2):
return True
return False
def _is_attribute_compatible(value_specific, value_general):
# CASE SPECIFIC GENERAL RESULT
# 1 ATOM ATOM Check if items are equal.
# 2 ATOM ENUM Check if ATOM in ENUM.
# 3 ENUM ATOM Not compatible.
# 4 ENUM ENUM Check if SPECIFIC is subset of GENERAL
s_value = value_specific
g_value = value_general
# Optimization. Code below is too slow due to function calls.
if type(s_value) is type(""):
if type(g_value) is type(""):
if s_value != g_value:
return False
else: # assume g_value is a sequence type
if s_value not in g_value:
return False
else: # assume s_value is a sequence type
if type(g_value) is type(""):
# If specific is ENUM and general is ATOM, can't be more
# compatible. specific item takes too many possible
# values.
return False
else:
if not _is_subset(s_value, g_value):
return False
#s_type = _get_attribute_type(s_value)
#g_type = _get_attribute_type(g_value)
#case = _assign_case_by_type(s_type, g_type)
#
#if case == 1:
# if s_value != g_value:
# return False
#elif case == 2:
# if s_value not in g_value:
# return False
#elif case == 3:
# # If specific is ENUM and general is ATOM, can't be more
# # compatible. specific item takes too many possible
# # values.
# return False
#elif case == 4:
# if not _is_subset(s_value, g_value):
# return False
#else:
# raise AssertionError
return True
def _merge_data_nodes(node1, node2):
assert isinstance(node1, DataNode)
assert isinstance(node2, DataNode)
assert node1.datatype == node2.datatype
attrs = {}
for n, v1 in node1.attributes.iteritems():
v2 = node2.attributes[n]
attrs[n] = _merge_attribute_values(v1, v2)
return DataNode(node1.datatype, **attrs)
def _merge_attribute_values(values1, values2):
t1 = _get_attribute_type(values1)
t2 = _get_attribute_type(values2)
if t1 == TYPE_ATOM and t2 == TYPE_ATOM:
if values1 == values2:
return values1
return [values1, values2]
elif t1 == TYPE_ATOM and t2 == TYPE_ENUM:
if values1 in values2:
return values2
return [values1] + values2
elif t1 == TYPE_ENUM and t2 == TYPE_ATOM:
if values2 in values1:
return values1
return values1 + [values2]
else:
x = [x for x in values1 if x not in values2]
return x + values2
raise AssertionError, "How did I get here?"
import types
def _get_attribute_type(value):
t = type(value)
if t is types.StringType:
return TYPE_ATOM
elif t is types.ListType:
return TYPE_ENUM
elif t is types.TupleType:
return TYPE_ENUM
#if type(name) is type(""):
# return TYPE_ATOM
#elif type(name) in [type([]), type(())]:
# return TYPE_ENUM
raise AssertionError, "Unknown attribute type: %s" % str(name)
def _assign_case_by_type(type1, type2):
types = [(TYPE_ATOM, TYPE_ATOM),
(TYPE_ATOM, TYPE_ENUM),
(TYPE_ENUM, TYPE_ATOM),
(TYPE_ENUM, TYPE_ENUM), ]
x = (type1, type2)
assert x in types, "Unknown types: %s %s" % (type1, type2)
i = types.index(x)
return i + 1
def _get_parents_of(network, node_id):
# Return a list of IDs that point to this node_id.
assert node_id < len(network.nodes)
nodeid2parents = _make_parents_dict(network)
return nodeid2parents.get(node_id, [])
#ids = {}
#for nid, next_ids in network.transitions.iteritems():
# if node_id in next_ids:
# ids[nid] = 1
#return ids.keys()
def _get_children_of(network, node_id):
assert node_id < len(network.nodes)
return network.transitions.get(node_id, [])
def _make_parents_dict_h(network):
# Return a dictionary of node_id -> prev_node_ids
nodeid2parents = {}
for prev_id, node_ids in network.transitions.iteritems():
for node_id in node_ids:
if node_id not in nodeid2parents:
nodeid2parents[node_id] = []
nodeid2parents[node_id].append(prev_id)
return nodeid2parents
BACKCHAIN_CACHE = None # tuple of (network, nodeid2parents)
def _make_parents_dict(network):
global BACKCHAIN_CACHE
import copy
network_cache = nodeid2parents_cache = None
if BACKCHAIN_CACHE is not None:
network_cache, nodeid2parents_cache = BACKCHAIN_CACHE
# Even though this comparison is slow, caching saves a lot of time.
if network_cache != network:
network_cache = copy.deepcopy(network)
nodeid2parents_cache = _make_parents_dict_h(network)
x1 = network_cache
x2 = nodeid2parents_cache
BACKCHAIN_CACHE = x1, x2
return nodeid2parents_cache
def _make_ancestor_dict_h(network):
# Return a dictionary of node_id -> all node_ids that are
# ancestors of node_id.
if not network.nodes:
return {}
node2parents = _make_parents_dict(network)
# Very inefficient algorithm.
ancestors = {} # node id -> list of parent node ids.
all_nodes = list(range(len(network.nodes)))
niter = 0
while all_nodes:
niter += 1
assert niter < 1E6, "cycle in network"
node_id = all_nodes.pop(0)
parents = node2parents.get(node_id, [])
# If there are no parents, then it has no ancestors.
if not parents:
ancestors[node_id] = []
continue
# If I haven't found the ancestors of all my parents, try
# again later.
# BUG: This will generate an infinite loop if there are
# cycles. If there is a cycle, there will never be a node
# with no parents.
all_found = True
for parent_id in parents:
if parent_id not in ancestors:
all_found = False
break
if not all_found:
all_nodes.append(node_id)
continue
# If all the parents are in ancestors already, then the
# ancestors of this node are the parents and all their
# ancestors.
nodes = parents[:]
for parent_id in parents:
nodes.extend(ancestors[parent_id])
nodes = _uniq(nodes)
ancestors[node_id] = nodes
return ancestors
ANCESTOR_CACHE = None # tuple of (network, ancestor_dict)
def _make_ancestor_dict(network):
global ANCESTOR_CACHE
import copy
network_cache = ancestor_cache = None
if ANCESTOR_CACHE is not None:
network_cache, ancestor_cache = ANCESTOR_CACHE
if network_cache != network:
ancestor_cache = _make_ancestor_dict_h(network)
ANCESTOR_CACHE = copy.deepcopy(network), ancestor_cache
return ancestor_cache
def _make_descendent_dict(network):
# Return a dictionary of node_id -> all node_ids that are
# descendents of node_id.
if not network.nodes:
return {}
# Very inefficient algorithm.
descendents = {} # node id -> list of descendent node ids.
stack = list(range(len(network.nodes)))
niter = 0
while stack:
niter += 1
assert niter < 1E6, "cycle in network"
node_id = stack.pop(0)
children = network.transitions.get(node_id, [])
# If there are no children, then it has no descendents.
if not children:
descendents[node_id] = []
continue
# If I haven't found the descendents of all my children, try
# again later. This will generate an infinite loop if there
# are cycles.
all_found = True
for child_id in children:
if child_id not in descendents:
all_found = False
break
if not all_found:
stack.append(node_id)
continue
# If all the children are in descendents already, then the
# descendents of this node are the children and all their
# descendents.
nodes = children[:]
for child_id in children:
nodes.extend(descendents[child_id])
nodes = _uniq(nodes)
descendents[node_id] = nodes
return descendents
def _can_reach_by_bc(network, node_id, good_ids):
# Return a dictionary of all the node IDs that can be reached by
# backwards chaining.
reachable_ids = {}
stack = [node_id]
while stack:
nid = stack.pop(0)
if nid in reachable_ids:
continue
if nid not in good_ids:
continue
reachable_ids[nid] = 1
ids = _get_parents_of(network, nid)
stack.extend(ids)
return reachable_ids
def _can_reach_by_fc(network, node_id, good_ids=None):
# Return a dictionary of all the node IDs that can be reached by
# forward chaining.
reachable_ids = {}
stack = [node_id]
while stack:
nid = stack.pop(0)
if nid in reachable_ids:
continue
if good_ids is not None and nid not in good_ids:
continue
reachable_ids[nid] = 1
ids = network.transitions.get(nid, [])
stack.extend(ids)
return reachable_ids
# THIS FUNCTION IS REALLY SLOW. DO NOT USE.
ITER_UPPER_DIAG_CACHE = {}
def _iter_upper_diag(n):
global ITER_UPPER_DIAG_CACHE
if n >= 1024:
return _iter_upper_diag_h(n)
if n not in ITER_UPPER_DIAG_CACHE:
x = list(_iter_upper_diag_h(n))
ITER_UPPER_DIAG_CACHE[n] = x
return ITER_UPPER_DIAG_CACHE[n]
def _iter_upper_diag_h(n):
for i in range(n - 1):
for j in range(i + 1, n):
yield i, j
def _intersect(x, y):
return list(set(x).intersection(y))
def _is_subset(x, y):
# Return whether x is a subset of y.
for i in range(len(x)):
if x[i] not in y:
return False
return True
def _flatten(l, ltypes=(list, tuple)):
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
#l[i:i+1] = l[i] # causes problem in pychecker
l = l[:i] + l[i] + l[i+1:]
i += 1
return ltype(l)
def _uniq(seq):
return {}.fromkeys(seq).keys()
def _print_nothing(s):
pass
def _print_string(s):
print s
def _print_line(line, prefix1, prefixn, width, outhandle=None):
from genomicode import parselib
outhandle = outhandle or sys.stdout
lines = parselib.linesplit(
line, prefix1=prefix1, prefixn=prefixn, width=width)
for x in lines:
print >>outhandle, x
def _pretty_attributes(attributes):
import re
if not attributes:
return ""
all_attributes = sorted(attributes)
# Separate the proper python variables from other attributes.
proper = []
improper = []
for key in all_attributes:
if re.match(r"^[a-z_][a-z0-9_]*$", key, re.I):
proper.append(key)
else:
improper.append(key)
# For proper python variables, format as <key>=<value>.
fmt_proper = []
for key in proper:
x = "%s=%r" % (key, attributes[key])
fmt_proper.append(x)
fmt_proper = ", ".join(fmt_proper)
# For improper python variables, format as a dict.
x = {}
for key in improper:
x[key] = attributes[key]
fmt_improper = ""
if x:
fmt_improper = repr(x)
if not fmt_improper:
return fmt_proper
return "%s, %s" % (fmt_proper, fmt_improper)
def _fix_node_id_pairs_after_merge(node_id_pairs, merge_id1, merge_id2):
# n2 was merged into n1.
# n2 doesn't exist anymore.
assert merge_id1 != merge_id2
n1, n2 = merge_id1, merge_id2
if n1 > n2: # make sure n1 < n2, for convenience.
n1, n2 = n2, n1
# d < n1 < n2 Don't change d.
# n1 < d < n2 Don't change d.
# n1 < n2 < d Subtract d by 1.
# n1 < n2 = d d is now n1.
# Watch out for weird situations, e.g. 3-way merges.
# A-B, A-C, B-C
# After we merge A and B, we are left with A-C, A-C.
# Then, after we merge the second A-C, we are left with
# A-A. Ignore duplicates.
pairs = []
for i in range(len(node_id_pairs)):
d1, d2 = node_id_pairs[i]
if d1 == n2:
d1 = n1
elif d1 > n2:
d1 -= 1
if d2 == n2:
d2 = n1
elif d2 > n2:
d2 -= 1
if d1 != d2:
pairs.append((d1, d2))
return pairs
def _fix_node_id_dict_after_merge(node_id_dict, merge_id1, merge_id2):
# node_id_dict is node_id -> list of node_ids
# n2 was merged into n1.
# n2 doesn't exist anymore.
assert merge_id1 != merge_id2
n1, n2 = merge_id1, merge_id2
if n1 > n2: # make sure n1 < n2, for convenience.
n1, n2 = n2, n1
# d < n1 < n2 Don't change d.
# n1 < d < n2 Don't change d.
# n1 < n2 < d Subtract d by 1.
# n1 < n2 = d d is now n1.
fixed_dict = {}
for (key_id, value_ids) in node_id_dict.iteritems():
if key_id == n2:
key_id = n1
elif key_id > n2:
key_id -= 1
value_ids = value_ids[:]
for i in range(len(value_ids)):
if value_ids[i] == n2:
value_ids[i] = n1
elif value_ids[i] > n2:
value_ids[i] -= 1
if key_id in fixed_dict:
# happens when merging n2 into n1
value_ids = value_ids + fixed_dict[key_id]
# no duplicates in value_ids
value_ids = {}.fromkeys(value_ids).keys()
# key_id should not be in value_ids
assert key_id not in value_ids
#if key_id in value_ids:
# del value_ids[key]
fixed_dict[key_id] = value_ids
return fixed_dict
def _product_network(network, custom_attributes, max_nodes=None,
nodeid2id_fn=None):
# Perform a product operation (find all combinations of inputs to
# the node) over every node in the network. max_nodes is the
# maximum number of nodes that I should perform a product over.
# Return a list of tuples.
if max_nodes is None:
max_nodes = len(network.nodes)
nodeid2parents = _make_parents_dict(network)
cache = {}
x = _product_network_h(
network, 0, custom_attributes, nodeid2parents, max_nodes,
nodeid2id_fn, cache)
return x.keys()
def _product_network_h(
network, node_id, custom_attributes, nodeid2parents, max_nodes,
nodeid2id_fn, cache):
if node_id not in cache:
x = _product_network_hh(
network, node_id, custom_attributes, nodeid2parents, max_nodes,
nodeid2id_fn, cache)
cache[node_id] = x
return cache[node_id]
def _product_network_hh(
network, node_id, custom_attributes, nodeid2parents, max_nodes,
nodeid2id_fn, cache):
# Gets called exactly once per node in the network (due to caching
# in _product_network_h).
node = network.nodes[node_id]
inputs = {}
if isinstance(node, DataNode):
#if node_id not in skip_ids:
# inputs[(node_id, )] = 1
x = node_id
if nodeid2id_fn is not None:
x = nodeid2id_fn(node_id)
if x is not None:
inputs[(x, )] = 1
for previd in nodeid2parents.get(node_id, []):
x = _product_network_h(
network, previd, custom_attributes, nodeid2parents, max_nodes,
nodeid2id_fn, cache)
inputs.update(x)
elif isinstance(node, ModuleNode):
# Find all potential sets of DataNode notes that can feed into me.
assert node_id in nodeid2parents
# list of tuples
combos = _bc_to_input_ids(
network, node_id, custom_attributes, nodeid2parents=nodeid2parents)
# Get the inputs from each of the combinations.
for combo in combos:
# No. Combo is the number of input nodes. However, if
# multiple of those input nodes can be created by the same
# upstream node, then the total number of nodes might be
# fewer. So it is inappropriate to check for max_nodes
# here.
#if max_nodes is not None and len(combo) > max_nodes:
# continue
# Get the inputs for each branch of this combination.
# list (for each branch) of list of tuples (possible
# inputs from this branch).
branch2inputs = [
_product_network_h(
network, x, custom_attributes, nodeid2parents, max_nodes,
nodeid2id_fn, cache)
for x in combo
]
branch2inputs = [x.keys() for x in branch2inputs]
# Debug. See what tuples can make each branch.
#for i in range(len(combo)):
# for x in branch2inputs[i]:
# print node_id, combo[i], x
# Optimization: if only one branch, then no need to find
# combinations. Just handle it here. Actually, this
# optimization doesn't really save much time.
if len(branch2inputs) == 1:
for x in branch2inputs[0]:
inputs[x] = 1
continue
# If any branches are empty (e.g. the branch consists
# entirely of skip_ids), then skip this set of
# combinations.
empty_branches = False
for x in branch2inputs:
if not x:
empty_branches = True
break
if empty_branches:
continue
# Doing the product and chaining takes ~50% of the time in
# this function.
#x = itertools.product(*branch2inputs)
#x = [itertools.chain(*x) for x in x]
x = _product_and_chain(branch2inputs, max_nodes)
inputs.update(x)
else:
raise AssertionError
return inputs
def _product_and_chain(lists_of_tuples, max_length):
# list of list of tuples of integers
# Find all permutations for the inputs for each branch.
# [ [(49, 40)] Each branch has a set of inputs.
# [(38,), (35,)] ]
# -> itertools.product
# [ ((49, 40), (38,)),
# ((49, 40), (35,)) ]
# -> flatten or itertools.chain
# [ (49, 40, 38), (49, 40, 35) ]
# Is shallow list, so don't need to flatten arbitrarily
# deeply.
# Flatten the tuples.
#x = [_flatten(x) for x in x]
assert lists_of_tuples
if max_length is None:
max_length = 1E6 # shouldn't be network this big
product = {}
for x in lists_of_tuples[0]:
if len(x) <= max_length:
product[x] = 1
for i in range(1, len(lists_of_tuples)):
list_of_tuples = lists_of_tuples[i]
# Do a product of everything in product with everything in
# list_of_tuples.
# Nearly all the time in this function is spent in this
# function.
product = _product_and_chain_h(
product.keys(), list_of_tuples, max_length)
return product
def _product_and_chain_h(tup_list1, tup_list2, max_length):
results = {}
for x1 in tup_list1:
for x2 in tup_list2:
x = x1 + x2
if len(x) > max_length:
continue
# No duplicates.
x = {}.fromkeys(x)
x = tuple(sorted(x))
if len(x) > max_length:
continue
results[x] = 1
return results
def _object_to_dict(obj):
# Convert objects to a dictionary of their representation
d = {'__class__': obj.__class__.__name__, '__module__': obj.__module__, }
d.update(obj.__dict__)
return d
def _dict_to_object(d):
assert isinstance(d, dict)
args = dict((key.encode('ascii'), value) for key, value in d.items())
for key, value in args.iteritems():
if isinstance(value, dict):
args[key] = _dict_to_object(value)
elif isinstance(value, list):
if value:
if isinstance(value[0], unicode):
args[key] = [i.encode('ascii') for i in value]
elif isinstance(value, unicode):
args[key] = value.encode('ascii')
else:
assert 'not expected type %s' % value
inst = args
if '__class__' in args:
class_name = args.pop('__class__')
module_name = args.pop('__module__')
if '.' in module_name:
module = __import__(module_name, globals(), locals(),
[module_name.split('.')[-1]], -1)
else:
module = __import__(module_name)
class_ = getattr(module, class_name)
fn = getattr(class_, '_' + class_name + '__init_from_dict')
inst = fn(args)
return inst
import sys
try:
import cbie3
except ImportError:
pass
else:
this_module = sys.modules[__name__]
for name in cbie3.__dict__.keys():
if name.startswith("__"):
continue
this_module.__dict__[name] = cbie3.__dict__[name]
```
#### File: Betsy/Betsy/bhashlib.py
```python
CHUNK_SIZE = 1024*1024
def checksum_file(filename, fast=False):
import os
import stat
from hashlib import md5
assert os.path.exists(filename)
# Make sure I don't hash a symlink.
filename = os.path.realpath(filename)
hasher = md5()
# If fast, just checksum based on size and creation date.
# Otherwise, checksum the entire contents.
if fast:
x = os.stat(filename)
x = "%d %d" % (x[stat.ST_SIZE], x[stat.ST_MTIME])
hasher.update(x)
return hasher.hexdigest()
handle = open(filename)
while True:
x = handle.read(CHUNK_SIZE)
if not x:
break
hasher.update(x)
return hasher.hexdigest()
def checksum_path(path, fast=False):
from hashlib import md5
from genomicode import filelib
hasher = md5()
# Checksum each file.
filenames = filelib.list_files_in_path(path)
# Need to checksum files in a standard order, or the checksums may
# change.
filenames = sorted(filenames)
for filename in filenames:
x = checksum_file(filename, fast=fast)
hasher.update(x)
return hasher.hexdigest()
def checksum_file_or_path(file_or_path, fast=False):
import os
#size = get_file_or_path_size(file_or_path)
if os.path.isdir(file_or_path):
return checksum_path(file_or_path, fast=fast)
return checksum_file(file_or_path, fast=fast)
def checksum_file_or_path_smart(file_or_path):
# Returns a checksum. If the files are too big, does a fast
# checksum.
from genomicode import filelib
MB = 1024 * 1024
size = filelib.get_file_or_path_size(file_or_path)
# Do a fast checksum if files are over 128 Mb.
fast = size > 128*MB
return checksum_file_or_path(file_or_path, fast=fast)
```
#### File: Betsy/modules/get_illumina_control.py
```python
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
import os
import shutil
from genomicode import filelib
in_data = antecedents
result_files = os.listdir(in_data.identifier)
for result_file in result_files:
if '-controls' in result_file:
goal_file = os.path.join(in_data.identifier, result_file)
shutil.copyfile(goal_file, outfile)
assert filelib.exists_nz(outfile), (
'the output file %s for illu_control fails' % outfile
)
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
original_file = module_utils.get_inputid(antecedents.identifier)
filename = 'control_illumina_' + original_file + '.gct'
return filename
```
#### File: Betsy/modules/infer_read_strandedness.py
```python
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
self.cache = {}
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
from genomicode import filelib
from genomicode import alignlib
from Betsy import module_utils as mlib
bam_node, gene_node = antecedents
bam_filenames = mlib.find_bam_files(bam_node.identifier)
gtf_file = gene_node.identifier
filelib.assert_exists_nz(gtf_file)
assert bam_filenames, "No bam files found."
metadata = {}
# Make output filenames.
p, r, e = mlib.splitpath(gtf_file)
bed_file = "%s.bed" % r
# Make bed file.
alignlib.gtf_to_bed(gtf_file, bed_file)
#bed_file = "/data/jchang/biocore/gtf02.txt"
# Figure out the orientation.
x = get_paired_stranded_rseqc(bed_file, bam_filenames[0])
single_or_paired, stranded, frac_failed, frac_first, frac_second = x
x = mlib.Stranded(
single_or_paired, stranded, frac_failed, frac_first, frac_second)
mlib.write_stranded(x, outfile)
return metadata
def name_outfile(self, antecedents, user_options):
return "stranded.json"
def parse_rseqc_infer_experiment(output):
# Return tuple of (
# "single" or "paired",
# "unstranded", "firststrand", "secondstrand"
# <fraction failed>,
# <fraction firststrand>,
# <fraction secondstrand>
# )
# Reading reference gene model gencode.v19.annotation.bed ... Done
# Loading SAM/BAM file ... Finished
# Total 184900 usable reads were sampled
#
#
# This is PairEnd Data
# Fraction of reads failed to determine: 0.0889
# Fraction of reads explained by "1++,1--,2+-,2-+": 0.0229
# Fraction of reads explained by "1+-,1-+,2++,2--": 0.8882
# This is SingleEnd Data
# Fraction of reads failed to determine: 0.0170
# Fraction of reads explained by "++,--": 0.9669
# Fraction of reads explained by "+-,-+": 0.0161
x = output.split("\n")
x = [x.strip() for x in x]
x = [x for x in x if x]
lines = x
# MISSING FILE ERROR:
# /data/jchang/biocore/gtf02.txt does NOT exists.
# Look for missing file error.
for x in lines:
if x.find("does NOT exists") >= 0:
raise AssertionError, "RSeQC Error: %s" % x.strip()
# ANOTHER ERROR:
# Reading reference gene model gencode.vM8.annotation.bed ... Done
# Loading SAM/BAM file ... Finished
# Total 0 usable reads were sampled
# Unknown Data type
#
# This can be caused by:
# 1. Mixing single and paired end reads (according to internet).
# 2. Aligning RNA-Seq data with a DNA-Seq aligner.
if output.find("Total 0 usable reads were sampled") >= 0:
err = [
"RSeQC Error:",
output.strip(),
#"This may be due to poor alignment. Try more reads."
]
err = "\n".join(err)
raise AssertionError, err
# Might be correct. Do more checks.
assert len(lines) >= 4
# Look for the "This is" line.
for i in range(len(lines)):
if lines[i].startswith("This is"):
break
else:
raise AssertionError, 'Cannot find "This is ..." line:\n%s' % output
single_or_paired = None
if lines[i].find("PairEnd") >= 0:
single_or_paired = "paired"
elif lines[i].find("SingleEnd") >= 0:
single_or_paired = "single"
else:
raise AssertionError, "Unknown output: %s" % lines[i]
for i in range(i+1, len(lines)):
if lines[i].find("failed to determine") >= 0:
x = lines[i].split()
frac_failed = float(x[-1])
elif lines[i].find("1++,1--,2+-,2-+") >= 0:
assert single_or_paired == "paired"
x = lines[i].split()
frac_secondstrand = float(x[-1])
elif lines[i].find("1+-,1-+,2++,2--") >= 0:
assert single_or_paired == "paired"
x = lines[i].split()
frac_firststrand = float(x[-1])
elif lines[i].find("++,--") >= 0:
assert single_or_paired == "single"
x = lines[i].split()
frac_secondstrand = float(x[-1])
elif lines[i].find("+-,-+") >= 0:
assert single_or_paired == "single"
x = lines[i].split()
frac_firststrand = float(x[-1])
else:
raise AssertionError, "Unknown line: %s" % lines[i]
assert frac_failed is not None
assert frac_firststrand is not None
assert frac_secondstrand is not None
stranded = "unstranded"
if frac_firststrand >= 0.7:
stranded = "firststrand"
elif frac_secondstrand >= 0.7:
stranded = "secondstrand"
x = single_or_paired, stranded, frac_failed, \
frac_firststrand, frac_secondstrand
return x
def get_paired_stranded_rseqc(reference_bed, bam_filename):
from genomicode import alignlib
from genomicode import filelib
from genomicode import parallel
from Betsy import module_utils as mlib
script = alignlib.find_rseqc_script("infer_experiment.py")
filelib.assert_exists_nz(reference_bed)
filelib.assert_exists_nz(bam_filename)
# RSeQC scripts use #!/usr/bin/python, which may not be the right
# one. Use the python on the path.
cmd = [
"python",
mlib.sq(script),
"-r", mlib.sq(reference_bed),
"-i", mlib.sq(bam_filename),
]
cmd = " ".join(cmd)
x = parallel.sshell(cmd)
x = parse_rseqc_infer_experiment(x)
#single_or_paired, stranded, frac_failed, frac_first, frac_second = x
return x
```
#### File: Betsy/modules/make_homer_tag_directory.py
```python
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
out_path):
import os
from genomicode import config
from genomicode import filelib
from genomicode import parallel
in_path = in_data.identifier
filelib.safe_mkdir(out_path)
in_filenames = filelib.list_files_in_path(
in_path, endswith=".bam", case_insensitive=True)
assert in_filenames, "No .bam files."
jobs = [] # list of (in_filename, tag_dir, log_file)
seen = {}
for in_filename in in_filenames:
p, f = os.path.split(in_filename)
x = f
assert x.endswith(".bam")
x = x[:-4]
sample = x
assert sample not in seen
seen[sample] = 1
tag_dir = sample
log_file = "%s.log" % sample
x = in_filename, tag_dir, log_file
jobs.append(x)
# Get the command.
homer_path = filelib.which_assert(config.homer_path)
x = os.path.join(homer_path, "bin", "makeTagDirectory")
assert filelib.exists_nz(x)
make_tag_directory = x
sq = parallel.quote
commands = []
for x in jobs:
in_filename, tag_dir, log_file = x
# makeTagDirectory <tag_dir> <bam_file> >& log_file
x = [
sq(make_tag_directory),
sq(tag_dir),
sq(in_filename),
]
x = " ".join(x)
x = "%s >& %s" % (x, log_file)
commands.append(x)
parallel.pshell(commands, max_procs=num_cores, path=out_path)
# Make sure the analysis completed successfully.
x = [x[-1] for x in jobs]
x = [os.path.join(out_path, x) for x in x]
log_filenames = x
x = [x[1] for x in jobs]
x = [os.path.join(out_path, x, "tagInfo.txt") for x in x]
info_filenames = x
x = log_filenames + info_filenames
filelib.assert_exists_nz_many(x)
def name_outfile(self, antecedents, user_options):
return "tag_directories"
```
#### File: Betsy/Betsy/reportlib.py
```python
def extract_filenames(antecedents, out_path, rename_outfile_fn=None):
# Return list of (in_file, out_file, in_filename, out_filename)
# where the filenames are the full paths, while the files are just
# the file part.
import os
filenames = []
for i, data_node in enumerate(antecedents):
in_filename = data_node.identifier
in_path, in_file = os.path.split(in_filename)
out_file = in_file
if rename_outfile_fn:
out_file = rename_outfile_fn(i, in_file)
out_filename = os.path.join(out_path, out_file)
x = in_file, out_file, in_filename, out_filename
filenames.append(x)
return filenames
def copy_file_or_path(in_filename, out_filename):
import os
import shutil
if os.path.exists(out_filename):
# Need to clean up the path so that old files aren't left
# over.
if os.path.isdir(out_filename):
shutil.rmtree(out_filename)
else:
os.unlink(out_filename)
if os.path.isdir(in_filename):
shutil.copytree(in_filename, out_filename)
else:
shutil.copyfile(in_filename, out_filename)
```
#### File: Betsy/Betsy/rule_engine.py
```python
VERSION = 7
FINISHED_FILE = "finished.txt"
LAST_ACCESSED_FILE = "last_accessed.txt"
IN_PROGRESS_FILE = "in_progress.txt"
BETSY_PARAMETER_FILE = "BETSY_parameters.txt"
# When running a module, whether to make sure it starts running in an
# empty directory. Can turn off for debugging.
CLEAN_UP_PATH_FOR_NEW_MODULE = True
#CLEAN_UP_PATH_FOR_NEW_MODULE = False # DEBUGGING ONLY!
DEBUG_RUN_PIPELINE = False
TIME_FMT = "%a %b %d %H:%M:%S %Y"
DEBUG_POOL = {}
def run_pipeline(
network, in_datas, custom_attributes, user_options, paths, user=None,
job_name='', clean_up=True, num_cores=8, verbosity=0):
# Run the pipeline that is indicated by the network. Returns a
# tuple of:
# - dictionary of node_id -> IdentifiedDataNode
# - output filename
# Returns None if not successful.
#
# Can raise an exception if there was an error in one of the
# modules, or if there is no path through the network (which
# probably indicates an inferencing error).
#
# in_datas List of IdentifiedDataNodes.
# user_attributes From --dattr. AttributeDef
# user_options From --mattr. OptionDef
# paths List of (node_ids, start_ids).
global DEBUG_POOL
import os
import getpass
import logging
import time
from genomicode import parselib
from Betsy import bie3
from Betsy import config
user = user or getpass.getuser()
output_path = config.CACHE_PATH
if not os.path.exists(output_path):
os.mkdir(output_path)
# Is this thread-safe?
LOG_FILENAME = os.path.join(output_path, 'traceback.txt')
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
# Make a list of the valid node_ids and transitions in the pipeline.
x = bie3._merge_paths(paths)
path_ids, path_transitions, x = x
# Make a list of all the nodes provided by the user.
start_nodes = [] # list of (node_id, IdentifiedDataNode).
for p in paths:
for index, id_ in enumerate(p.start_ids):
# id_ may be None if it's just not used in this pipeline.
# Ignore it.
if id_ is None:
continue
node = in_datas[index]
x = id_, node
if x not in start_nodes:
start_nodes.append(x)
# Create a stack with all the start nodes. Each member of the
# stack can be a tuple of:
# 1. (IdentifiedDataNode, node_id, None, transitions)
# 2. (ModuleNode, node_id, None, transitions)
# 3. (ModuleNode, node_id, antecedent_ids, transitions)
# Keep track of which set of antecedents to run.
# transitions is a dictionary of (node_id, next_node_id) -> 1
# indicating which transitions were taken to get to this node.
stack = []
for (node_id, node) in start_nodes:
x = node, node_id, None, {}
stack.append(x)
# Keep track of nodes that have already been generated.
# BUG: The values should technically be a list. Since the nodes
# in the network may not be atomic, it is possible that multiple
# different atomic DataNodes can be assigned to the same node_id.
# But usually, we expect just 1.
pool = {} # dict of node_id -> IdentifiedDataNode
# Keep track of the transitions.
transition_cache = {} # dict of node_id -> dict of transitions
# Cache the module node_ids that aren't ready to be run. If all
# modules on the stack are not ready, then something is wrong and
# quit. Otherwise, we would be stuck in an infinite loop.
not_ready = {}
# Track the total analysis time.
total_time = 0
#MAX_ITER = 10000
MAX_ITER = len(path_ids) * 5
it = 0
while stack:
DEBUG_POOL = pool
it += 1
if DEBUG_RUN_PIPELINE and it >= MAX_ITER:
debug_file = "broken.png"
print "Saving network: %s" % debug_file
start_ids = [x[0] for x in start_nodes]
done_ids = [x for x in pool if x not in start_ids]
all_ids = [x for x in path_ids
if x not in done_ids and x not in start_ids]
bie3.plot_network_gv(
debug_file, network, options=user_options, bold=path_ids,
bold_transitions=path_transitions, highlight_yellow=all_ids,
highlight_green=start_ids, highlight_orange=done_ids,
verbose=True)
# Make an error message to try to diagnose problem.
if it >= MAX_ITER:
# Look for all the modules, and print out why each of them
# can't be run.
msg = []
p = msg.append
for x in stack:
node, node_id = x[:2]
if not isinstance(node, bie3.ModuleNode):
continue
x = _get_available_input_combinations(
network, node_id, custom_attributes, pool,
path_transitions)
all_antecedent_ids, not_available = x
assert not all_antecedent_ids, (
"Too many iterations, but modules left to be run. "
"Increase MAX_ITER")
assert not_available
x = [x for x in not_available if x[0] in path_ids]
p("Can't run [%d] %s. Missing:" % (node_id, node.name))
for i, (input_id, reason) in enumerate(x):
n = network.nodes[input_id]
p("%d. [%d] %s" % (i+1, input_id, n.datatype.name))
for name in sorted(n.attributes):
p(" %s=%s" % (name, n.attributes[name]))
msg = "\n".join(msg)
assert it < MAX_ITER, "Too many iterations (%d)\n%s" % (it, msg)
# Make sure we're not stuck in an infinite loop.
# 1. Only modules on the stack. AND
# 2. They are all not_ready.
x = [x for x in stack if isinstance(x[0], bie3.ModuleNode)]
if len(x) == len(stack): # only modules.
# Make sure there are modules ready to be checked.
x = [x for x in x if x[1] not in not_ready]
assert x, "Inference error: No more nodes to run."
if DEBUG_RUN_PIPELINE:
print "[%d] Stack:" % it
for x in stack:
name = bie3.get_node_name(x[0])
print " %s [%d]" % (name, x[1])
#print " %s" % x[3]
node, node_id, more_info, transitions = stack.pop()
if DEBUG_RUN_PIPELINE:
print "Processing: %s [%d]." % (bie3.get_node_name(node), node_id)
if node_id not in path_ids: # ignore if not in pipeline
if DEBUG_RUN_PIPELINE:
print "Skipping. Not in path."
continue
# If this is the last node, then we're done.
if node_id == 0:
pool[node_id] = node
if DEBUG_RUN_PIPELINE:
print "Root node. Done."
break
# If this node has already been run, ignore.
if node_id in pool:
if DEBUG_RUN_PIPELINE:
print "Already run. Skip."
continue
if isinstance(node, bie3.IdentifiedDataNode):
# Add to the pool.
pool[node_id] = node
# Add the next modules into the stack, if not already there.
on_stack = [x[1] for x in stack]
add_to_stack = []
for next_id in network.transitions[node_id]:
next_node = network.nodes[next_id]
assert isinstance(next_node, bie3.ModuleNode)
if next_id in on_stack:
# This can happen if:
# GEOSignalFile -> convert_geo_to_signal
# GEOPlatformAnnotationFile ->
#
# After the first one is processed,
# convert_geo_to_signal is added onto the stack,
# but cannot be processed yet and gets reordered
# onto the bottom of the stack. After the second
# one is processed, make sure it goes back onto
# the top of the stack.
# Remove it from the stack, so we can add this
# node back onto the top.
stack = [x for x in stack if x[1] != next_id]
# Module updates the transitions based on which set of
# antecedent IDs are used.
add_to_stack.append((next_node, next_id, None, transitions))
if DEBUG_RUN_PIPELINE:
print "Adding to stack: %s [%d]." % (
bie3.get_node_name(next_node), next_id)
# Execute the modules in alphabetical order. So push them
# onto the stack in reverse alphabetical order.
schwartz = [(bie3.get_node_name(x[0]), x) for x in add_to_stack]
schwartz = list(schwartz) # to shut up pychecker (no attrib sort)
schwartz.sort()
schwartz.reverse()
add_to_stack = [x[-1] for x in schwartz]
stack.extend(add_to_stack)
elif isinstance(node, bie3.ModuleNode) and more_info is None:
# If the input data for this module doesn't exist, then
# just try it again later.
x = _get_available_input_combinations(
network, node_id, custom_attributes, pool, path_transitions)
all_antecedent_ids, not_available = x
if not all_antecedent_ids:
# No sets of inputs are ready to run. Put back to the
# bottom of the stack and try again later.
stack.insert(0, (node, node_id, None, transitions))
if DEBUG_RUN_PIPELINE:
print "Not ready to run yet. Will try again later."
for x in not_available:
input_id, reason = x
if input_id not in path_ids:
continue
n = bie3.get_node_name(network.nodes[input_id])
print " %s %s [%d]." % (reason, n, input_id)
else:
for antecedent_ids in all_antecedent_ids:
assert len(node.in_datatypes) == len(antecedent_ids)
for x in find_out_nodes(
network, path_ids, antecedent_ids, node_id, pool):
out_id, out_data_node = x
more_info = antecedent_ids, out_id, out_data_node
x = node, node_id, more_info, transitions
stack.append(x)
if DEBUG_RUN_PIPELINE:
print (
"%s ready to run. "
"Adding with antecedent IDs %s and "
"out_id %s." % (
node_id, antecedent_ids, out_id))
elif isinstance(node, bie3.ModuleNode):
# Run this module.
antecedent_ids, out_id, out_data_node = more_info
assert len(node.in_datatypes) == len(antecedent_ids)
if DEBUG_RUN_PIPELINE:
print "Running %s." % node_id
x = run_module(
network, antecedent_ids, node_id, out_data_node,
user_options, pool, transitions, user, job_name,
clean_up=clean_up, num_cores=num_cores, verbosity=verbosity)
if x is None:
# Can happen if this module has already been run. It
# might've gotten added to the stack because there are
# many input nodes that can go into this.
if DEBUG_RUN_PIPELINE:
print "Got None result. Already run or not compatible."
continue
if DEBUG_RUN_PIPELINE:
print "Successfully complete."
# Successfully completed this module.
out_path, next_node, run_time = x
# HACK: Add the out_path to the next_node object so that
# we can generate read receipts.
next_node.out_path = out_path
# Do not add data node to the pool here. It should be
# added to the pool when encountered.
# Many paths might have led to this module. Should merge
# the transitions from each of the antecedents.
#trans = transitions.copy()
trans = transition_cache.get(out_id, {})
for nid in antecedent_ids:
x = transition_cache.get(nid, {})
trans.update(x)
for x in antecedent_ids:
trans[(x, node_id)] = 1
trans[(node_id, out_id)] = 1
transition_cache[out_id] = trans
stack.append((next_node, out_id, None, trans))
if DEBUG_RUN_PIPELINE:
print "Adding to stack: %s [%d]." % (
bie3.get_node_name(next_node), out_id)
total_time += run_time
# Since new nodes are added to the stack, more modules may
# be ready now.
not_ready = {}
else:
raise AssertionError
if 0 not in pool:
print "This pipeline has completed unsuccessfully."
return None
x = parselib.pretty_time_delta(total_time)
print "[%s] Completed (total %s)" % (time.strftime('%a %I:%M %p'), x)
return pool, transitions
#if flag and next_node and module_utils.exists_nz(next_node.identifier):
#if next_node and module_utils.exists_nz(next_node.identifier):
# msg = "Completed successfully and generated a file:"
# print "[%s] %s" % (time.strftime('%I:%M %p'), msg)
# print next_node.identifier
# sys.stdout.flush()
# return next_node.identifier
#else:
# print 'This pipeline has completed unsuccessfully'
# raise ValueError('there is no output for this pipeline')
#return None
def _import_module(network, module_id):
import importlib
module_node = network.nodes[module_id]
# If module is missing, should raise ImportError with decent error
# message.
#x = __import__(
# 'modules.'+module_name, globals(), locals(), [module_name], -1)
x = importlib.import_module("Betsy.modules.%s" % module_node.name)
module = x.Module()
return module_node, module
def find_out_nodes(network, path_ids, input_ids, module_id, pool):
# Return list of (node_id, node) that can be outputs of this
# module.
from Betsy import bie3
# If there are no possible nodes to generate, return an empty list.
assert network.transitions[module_id]
x = network.transitions[module_id]
x = [x for x in x if x not in pool]
if not x:
return []
# Get the antecedents from the pool.
antecedents = [pool[x] for x in input_ids]
if len(antecedents) == 1:
antecedents = antecedents[0]
# Import module.
module_node, module = _import_module(network, module_id)
assert len(module_node.in_datatypes) == len(input_ids)
# ModuleNodes can point to multiple DataNodes. They should always
# be the same type, but their attributes may be different (e.g. is
# logged or not). Figure out which one is generated by this
# module.
# Create a list of all the output nodes that can be generated by
# this set of inputs.
if isinstance(antecedents, bie3.IdentifiedDataNode):
x = [antecedents.data]
else:
x = [x.data for x in antecedents]
out_data_nodes = bie3._fc_to_outputs(module_node, x)
# Make sure the DataTypes are the same.
assert out_data_nodes
# Can have multiple out_data_nodes:
# 1. If attribute is BASED_ON_DATA e.g.
# is_indexed=yes,no OR
# compressed=no,gz,bz2,xz
# 2. Module should be run twice, e.g.
# extract_rsem_signal -> genes
# extract_rsem_signal -> isoforms
x = sorted([x.datatype for x in out_data_nodes])
assert x[0] == x[-1], "ModuleNode points to different DataTypes."
# Optimization: set_out_attributes can be computationally
# expensive, so assume that the module will set the output
# attributes for all the out_nodes identically. For example, the
# out_nodes may differ by logged value. However, the module will
# set all the out_nodes to the same logged value, so they'll be
# identical.
i = 0
while i < len(out_data_nodes):
out_data_node = out_data_nodes[i]
attr = module.set_out_attributes(antecedents, out_data_node.attributes)
out_data_node.attributes = attr
# This might generate duplicate out_data_nodes. Get rid of them.
j = i+1
while j < len(out_data_nodes):
if out_data_node == out_data_nodes[j]:
del out_data_nodes[j]
else:
j += 1
i += 1
# This might generate duplicate out_data_nodes. Get rid of them.
i = 0
while i < len(out_data_nodes)-1:
j = i+1
while j < len(out_data_nodes):
if out_data_nodes[i] == out_data_nodes[j]:
del out_data_nodes[j]
else:
j += 1
i += 1
# Figure out which node in the network is compatible with out_node.
compatible = [] # list of (next_id, out_data_node)
for next_id in network.transitions[module_id]:
# If this ID has already been run, then ignore.
if next_id in pool:
continue
# If this ID is not on the path, then ignore.
if next_id not in path_ids:
continue
next_node = network.nodes[next_id]
for out_data_node in out_data_nodes:
# out_attributes should be subset of next_data.attributes.
if not bie3._is_data_compatible(out_data_node, next_node):
continue
compatible.append((next_id, out_data_node))
# There may be multiple compatible nodes, e.g.
# RSEMResults -> extract_rsem_signal -> genes
# -> isoforms
# Both of them should be run.
return compatible
def run_module(
network, input_ids, module_id, out_data_node, all_user_options,
pool, transitions, user, job_name='', clean_up=True, num_cores=8,
verbosity=0):
# Return tuple of (output_path, IdentifiedDataNode, node_id,
# elapsed time) for the node that was created. Returns None if
# this module fails (no compatible output nodes, or all output
# nodes already generated).
import os
import sys
import time
import logging
from genomicode import filelib
from Betsy import config
from Betsy import bie3
assert user
output_path = config.CACHE_PATH
filelib.assert_exists(output_path)
# Import module.
module_node, module = _import_module(network, module_id)
assert len(module_node.in_datatypes) == len(input_ids)
module_name = module_node.name
# Get the antecedents from the pool.
antecedents = [pool[x] for x in input_ids]
if len(antecedents) == 1:
antecedents = antecedents[0]
# Get the user_options for this module. all_user_options contains
# all options provided by the user. Pull out the ones relevant
# for this module. Use the defaults when necessary.
user_options = {}
for option in module_node.option_defs:
value = all_user_options.get(option.name)
if value is None:
value = option.default
assert value is not None, "Missing input: %s" % option.name
user_options[option.name] = value
# Set up the directories and outfile.
# Unfortunately, can't use timestamp in pathname, or else this
# will never re-use prior analyses. Have to be more clever about
# this.
h = _hash_module(
module_name, antecedents, out_data_node.attributes, user_options)
## Get milliseconds.
#x = time.time()
#ms = int((x-int(x))*100)
#ts = time.strftime("%y%m%d.%H%M%S", time.localtime())
#x = "%s.%02d__%s__B%03d__%s" % (ts, ms, module_name, VERSION, h)
x = "%s__B%03d__%s" % (module_name, VERSION, h)
result_dir = os.path.join(output_path, x)
outfile = module.name_outfile(antecedents, user_options)
# Create the IdentifiedDataNode that will be the output once the
# module has been run.
full_outfile = os.path.join(result_dir, outfile)
#x = bie3.DataNode(network.nodes[next_id].datatype, **out_data.attributes)
out_identified_data_node = bie3.IdentifiedDataNode(
out_data_node, full_outfile)
#time_str = "[%s]" % time.strftime('%I:%M %p')
time_str = "[%s]" % time.strftime('%a %I:%M %p')
# Check if this has already been run.
if _is_module_output_complete(result_dir):
# Update timestamp on LAST_ACCESSED_FILE.
open(os.path.join(result_dir, LAST_ACCESSED_FILE), 'w')
# Read parameter file.
filename = os.path.join(result_dir, BETSY_PARAMETER_FILE)
assert os.path.exists(filename)
params = _read_parameter_file(filename)
elapsed = params["elapsed"]
run_time = params["elapsed_pretty"]
if run_time == "instant":
x = "ran instantly"
else:
x = "took %s" % run_time
x = "%s %s (CACHED, previously %s)" % (time_str, module_name, x)
#parselib.print_split(x, prefixn=2)
print x
if verbosity >= 1:
# Print out the output directory.
indent = len("[Thu 10:06 PM] ")
x = os.path.split(result_dir)[1]
print "%s%s" % (" "*indent, x)
sys.stdout.flush()
return result_dir, out_identified_data_node, elapsed
#_debug_is_module_output_complete(
# module_name, antecedents, out_data_node.attributes, user_options,
# VERSION, h, result_dir)
# Running this module now.
x = "%s %s" % (time_str, module_name)
#parselib.print_split(x, prefixn=2)
print x
if verbosity >= 1:
# Print out the output directory.
indent = len("[Thu 10:06 PM] ")
x = os.path.split(result_dir)[1]
print "%s%s" % (" "*indent, x)
sys.stdout.flush()
# Run the analysis. If someone else is currently running the same
# analysis, then wait for them to finish. However, if they have
# somehow failed, then delete the incomplete results and start
# over.
#
# 1. Create directory.
# 2. Write in_progress.txt.
# 3. Run the analysis. Refresh in_progress.txt every 5 sec.
# 4. Write finished.txt.
# 5. Stop refreshing in_progress.txt.
#
# IN_PROGRESS FINISHED INTERPRETATION
# missing missing Starting analysis? Wait 5 sec, check again.
# If everything still missing, then overwrite.
# missing present Complete analysis.
# <5 sec old missing Still copying. Wait.
# <5 sec old present Finishing up. Consider complete.
# >5 sec old missing Abandoned. Overwrite.
# >5 sec old present Should not happen. rm copying.txt, check
# after 5 sec. If missing, consider
# complete. If back, consider error.
REFRESH = 5 # number of seconds to refresh copying.txt file.
success_file = os.path.join(result_dir, FINISHED_FILE)
last_accessed_file = os.path.join(result_dir, LAST_ACCESSED_FILE)
copying_file = os.path.join(result_dir, IN_PROGRESS_FILE)
exists = os.path.exists
i_run_analysis = None
while i_run_analysis is None:
# Try to make the result_dir. If I make it, then I should run the
# analysis. Otherwise, someone else has priority. Let them run
# the analysis.
if not os.path.exists(result_dir):
try:
os.mkdir(result_dir)
i_run_analysis = True
break
except OSError, x:
pass
# For debugging. If I want to re-run the module over
# the old one, then just keep this directory.
if not CLEAN_UP_PATH_FOR_NEW_MODULE:
i_run_analysis = True
break
last_refresh = None
if exists(copying_file):
last_refresh = time.time() - os.path.getctime(copying_file)
if not exists(copying_file) and not exists(success_file):
# BUG: This doesn't work. What if this was abandoned, but
# somebody else just happens to create the directory again
# while I'm checking? Will have result_dir, but nothing
# inside it.
# SOLUTION: Give them a cycle to create something.
# DEBUG: Skip this.
#i_run_analysis = True; break
time.sleep(REFRESH+1)
if not exists(copying_file) and not exists(success_file):
# Abandoned. Delete the result dir and try again.
if CLEAN_UP_PATH_FOR_NEW_MODULE:
_rmtree_multi(result_dir)
elif not exists(copying_file) and exists(success_file):
# Previous run is now finished.
i_run_analysis = False
# From here on down, copying_file should exist.
elif last_refresh < REFRESH and not exists(success_file):
# Still copying. Wait.
time.sleep(REFRESH)
elif last_refresh < REFRESH and exists(success_file):
# Finishing up. Consider complete.
i_run_analysis = False
elif last_refresh >= REFRESH*2 and not exists(success_file):
# Steal the file. This can cause a lot of problems, so
# don't do this unless we're sure the other process is
# really dead.
# Abandoned. Delete the result dir and try again.
if CLEAN_UP_PATH_FOR_NEW_MODULE:
_rmtree_multi(result_dir)
elif last_refresh >= REFRESH*2 and exists(success_file):
os.unlink(copying_file)
time.sleep(REFRESH)
# Should not be coming back if analysis has already
# completed successfully.
assert not exists(copying_file), "Zombie in progress file"
# At this point, no copying_file, but there is a
# success_file. Consider this analysis complete.
i_run_analysis = False
else:
# Does not fit one of these.
time.sleep(REFRESH)
assert i_run_analysis is not None
if not i_run_analysis:
# Analysis already is exists, and is completed.
# Update timestamp on LAST_ACCESSED_FILE.
open(last_accessed_file, 'w')
# Get the elapsed time.
filename = os.path.join(result_dir, BETSY_PARAMETER_FILE)
assert os.path.exists(filename)
params = _read_parameter_file(filename)
elapsed = params["elapsed"]
return result_dir, out_identified_data_node, elapsed
# Make sure nobody deleted this after I created it.
assert os.path.exists(result_dir)
# Run the module.
completed_successfully = False
cwd = os.getcwd()
refresher = None
try:
# Start refreshing the copying file.
refresher = _make_file_refresher(copying_file, interval=REFRESH)
os.chdir(result_dir)
# Clear out any old files in the directory.
if CLEAN_UP_PATH_FOR_NEW_MODULE:
_clear_analysis_path(result_dir, copying_file)
else:
print "DEBUG: Not cleaning up path."
start_time = time.localtime()
try:
metadata = module.run(
network, antecedents, out_data_node.attributes, user_options,
num_cores, full_outfile)
except (SystemError, KeyboardInterrupt, MemoryError), x:
raise
except Exception, x:
logging.exception("Exception in module: %s" % module_name)
raise
end_time = time.localtime()
elapsed = time.mktime(end_time) - time.mktime(start_time)
# Make sure the module generated the requested file.
assert os.path.exists(full_outfile)
# Don't do this check. The file may be empty, e.g. if the
# variant caller found no variants.
#assert filelib.fp_exists_nz(full_outfile), \
# "Module %s did not generate results: %s" % (
# module_name, full_outfile)
# Write parameters.
x = os.path.join(result_dir, BETSY_PARAMETER_FILE)
_write_parameter_file(
x, network, module_name, antecedents, out_data_node.attributes,
user_options, transitions, outfile, start_time, end_time, metadata,
user, job_name)
completed_successfully = True
handle = open(success_file, 'w')
handle.close()
# Update the last accessed time.
open(last_accessed_file, 'w')
time.sleep(1) # make sure file exists before stopping the refresher
finally:
if not completed_successfully and clean_up:
_clear_analysis_path(result_dir, copying_file)
if refresher is not None:
refresher.stop()
if not completed_successfully and clean_up:
_rmtree_multi(result_dir)
os.chdir(cwd)
return result_dir, out_identified_data_node, elapsed
class FileRefresher:
def __init__(self, stop_filename):
import os
self.stop_filename = stop_filename
self.stop_path = os.path.split(stop_filename)[0]
self.stopped = False
def stop(self):
import os
if not self.stopped:
if os.path.exists(self.stop_path):
open(self.stop_filename, 'w')
self.stopped = True
def __del__(self):
self.stop()
def _make_file_refresher(filename, interval=None):
# Will refresh # every <interval> seconds. Returns an
# object with one method: stop. Calling stop will stop refreshing
# the file and delete it.
import os
import time
# interval is the number of seconds before refreshing the file.
MAX_INTERVAL = 60*60*24 # 24 hours
if interval is None:
interval = 5
assert interval > 0 and interval <= MAX_INTERVAL
# Make sure I can write to this file.
filename = os.path.realpath(filename)
path = os.path.split(filename)[0]
assert os.path.exists(path)
# Make sure the stop file doesn't already exist.
stop_filename = "%s.stop" % filename
if os.path.exists(stop_filename):
os.unlink(stop_filename)
pid = os.fork()
if pid == 0: # child
# Repeatedly refresh filename until I see #.stop, or
# until the directory is deleted.
try:
while not os.path.exists(stop_filename):
# Someone might have deleted this path (and exited the
# program) while I was sleeping.
if not os.path.exists(path):
break
open(filename, 'w')
time.sleep(interval)
finally:
if os.path.exists(stop_filename):
_unlink_multi(stop_filename)
if os.path.exists(filename):
_unlink_multi(filename)
os._exit(0)
return FileRefresher(stop_filename)
def _clear_analysis_path(path, copying_file):
# Delete everything inside path, except for the copying file.
import os
x = os.listdir(path)
x = [os.path.join(path, x) for x in x]
x = [os.path.realpath(x) for x in x]
x = [x for x in x if x != os.path.realpath(copying_file)]
for x in x:
if os.path.isdir(x):
_rmtree_multi(x)
else:
_unlink_multi(x)
def _unlink_multi(filename):
import os
try:
# This can fail if files are deleted in the middle
# of the operation.
os.unlink(filename)
except OSError, x:
if str(x).find("No such file or directory"):
pass
else:
raise
def _rmtree_multi(path):
# Delete path, when multiple other processes may be deleting it at
# the same time.
import os
import shutil
while os.path.exists(path):
try:
# This can fail if files are deleted in the middle
# of the operation.
shutil.rmtree(path)
except OSError, x:
if str(x).find("No such file or directory"):
pass
else:
raise
def _get_available_input_combinations(
network, module_id, custom_attributes, pool, valid_transitions):
# Return two lists:
# 1. Valid sets of input data. Tuples of node_ids. Same order
# as module.in_datatypes.
# 2. Available in the pool. Tuple of (node_id, reason).
# node_ids are in the same order as the module.datatypes.
import bie3
module = network.nodes[module_id]
# Make a list of every possible combination of inputs that goes
# into this module.
x = bie3._bc_to_input_ids(network, module_id, custom_attributes)
all_input_ids = x
# For debugging, keep track of each of the input IDs that aren't
# available.
not_available = [] # list of (input_id, reason)
# See if we have the data to run this module.
available_input_ids = []
for input_ids in all_input_ids:
assert len(module.in_datatypes) == len(input_ids)
# Make sure all these input_ids are available.
available = True
for input_id in input_ids:
if input_id in pool:
continue
available = False
x = input_id, "Not available."
not_available.append(x)
if not available:
continue
available_input_ids.append(input_ids)
# Make sure all transitions are valid.
valid_input_ids = []
for input_ids in available_input_ids:
valid = True
for x in input_ids:
if module_id in valid_transitions.get(x, []):
continue
valid = False
x = input_id, "Not in path."
not_available.append(x)
if not valid:
continue
valid_input_ids.append(input_ids)
x = not_available
x = {}.fromkeys(x).keys()
x = sorted(x)
not_available = x
# Only want the input IDs that are both available and valid.
x = [x for x in available_input_ids if x in valid_input_ids]
return x, not_available
def _hash_module(module_name, antecedents, out_attributes, user_options):
# Return a hash that uniquely describes the input to this
# module. This is used so that the module won't be re-run on
# the same data.
#
# out_attributes has already been updated with
# set_out_attributes.
# user_options is a dictionary of the options for this module.
import hashlib
x = _make_hash_units(
module_name, antecedents, out_attributes, user_options)
tohash = [x[1] for x in x]
hasher = hashlib.md5()
for x in tohash:
hasher.update(x)
return hasher.hexdigest()
def _make_hash_units(module_name, antecedents, out_attributes, user_options):
# Return list of tuples (name, value). The values are used to
# uniquely hash the results of this module.
import operator
from Betsy import bhashlib
if not operator.isSequenceType(antecedents):
antecedents = [antecedents]
hash_units = []
hash_units.append(("module name", module_name))
# Hash the checksum of the inputs.
for data_node in antecedents:
if data_node.data.datatype.no_file:
continue
x = bhashlib.checksum_file_or_path_smart(data_node.identifier)
x = "file checksum", x
hash_units.append(x)
# Hash the outputs.
for key in sorted(out_attributes):
value = out_attributes[key]
if type(value) is not type("") and operator.isSequenceType(value):
value = ",".join(value)
x = "%s=%s" % (key, value)
x = "out_attributes", x
hash_units.append(x)
#attrs = out_attributes.copy()
#attrs.update(user_options)
for key in sorted(user_options):
value = user_options[key]
if type(value) is not type("") and operator.isSequenceType(value):
value = ",".join(value)
x = "%s=%s" % (key, value)
x = "user_options", x
hash_units.append(x)
return hash_units
def _is_module_output_complete(path):
# Return a True or False indicating whether path contains the
# complete results of a previously run module.
import os
if not os.path.exists(path):
return False
# Make sure completed file exists.
x = os.path.join(path, FINISHED_FILE)
if not os.path.exists(x):
return False
# Make sure BETSY_PARAMETER_FILE exists.
x = os.path.join(path, BETSY_PARAMETER_FILE)
if not os.path.exists(x):
return False
# Make sure no broken symlinks.
if _has_broken_symlinks(path):
return False
return True
def _has_broken_symlinks(path):
import os
for x in os.walk(path, followlinks=True):
dirpath, dirnames, filenames = x
for x in dirnames + filenames:
x = os.path.join(dirpath, x)
# Returns False for broken links.
if not os.path.exists(x):
return True
return False
def _debug_is_module_output_complete(
module_name, antecedents, out_attributes, user_options,
version, hash_, path):
import os
import sys
from Betsy import config
path_exists = os.path.exists(path)
finish_file = os.path.join(path, FINISHED_FILE)
finish_file_exists = os.path.exists(finish_file)
success_file = os.path.join(path, BETSY_PARAMETER_FILE)
success_file_exists = os.path.exists(success_file)
if path_exists:
print "%s exists" % path
if finish_file_exists:
print "%s exists" % finish_file
else:
print "%s does not exist" % finish_file
if success_file_exists:
print "%s exists" % success_file
else:
print "%s does not exist" % success_file
else:
print "%s does not exist" % path
if path_exists and success_file_exists:
if _has_broken_symlinks(path):
print "%s has broken symlinks" % path
else:
print "%s has no broken symlinks" % path
# Look for related paths.
output_path = config.CACHE_PATH
assert os.path.exists(output_path)
for prev_path in os.listdir(output_path):
x = os.path.join(output_path, prev_path)
if not os.path.isdir(x):
continue
prev_path_full = x
# index_bam_folder__B006__617b92ee4d313bcd0148b1ab6a91b12f
x = prev_path.split("__")
assert len(x) == 3, prev_path
prev_module_name, prev_version, prev_hash_ = x
assert prev_version.startswith("B") # B007
prev_version = int(prev_version[1:])
if module_name != prev_module_name:
continue
if version != prev_version:
continue
if hash_ == prev_hash_:
continue
# If not successfully completed, then skip.
filename = os.path.join(prev_path_full, BETSY_PARAMETER_FILE)
if not os.path.exists(filename):
print "%s not completed successfully yet." % prev_path
continue
# See why this doesn't match.
prev_params = _read_parameter_file(filename)
prev_hash_units = prev_params["hash"]
# Convert from unicode to strings.
for i in range(len(prev_hash_units)):
n, v = prev_hash_units[i]
prev_hash_units[i] = str(n), str(v)
hash_units = _make_hash_units(
module_name, antecedents, out_attributes, user_options)
# BUG: file checksum not aligned.
print "Checking %s" % prev_path
header = "NAME", "VALUE", "CACHED VALUE"
print "\t".join(header)
for (name, value) in hash_units:
# Find the closest match in the previous hash units.
v1 = v2 = None
if name == "out_attributes":
v1, v2 = value.split("=")
for prev_name, prev_value in prev_hash_units:
if prev_name != name:
continue
pv1 = pv2 = None
if prev_name == "out_attributes":
pv1, pv2 = prev_value.split("=")
if pv1 != v1:
continue
# Found match.
break
else:
prev_value = ""
x = name, value, prev_value
print "\t".join(x)
sys.exit(0)
def _format_pipeline(network, transitions):
# List of transitions (in_node_id, in_name, out_node_id, out_name).
# Make a list of all node IDs.
node2next = {} # node_id -> list of next_ids
for (node_id, next_id) in transitions:
if node_id not in node2next:
node2next[node_id] = []
node2next[node_id].append(next_id)
all_next_ids = {}
for node_id, next_id in transitions:
all_next_ids[next_id] = 1
# Start with all node IDs without a parent, and do a depth-first
# search across all nodes.
node_ids = [x for x in node2next if x not in all_next_ids]
pipeline = []
while node_ids:
node_id = node_ids.pop()
next_ids = node2next.get(node_id, [])
if not next_ids:
continue
# Add next_id to the pipeline.
next_id = next_ids.pop()
node2next[node_id] = next_ids
name1 = _get_node_name(network, node_id)
name2 = _get_node_name(network, next_id)
x = node_id, name1, next_id, name2
pipeline.append(x)
if next_ids:
node_ids.append(node_id)
node_ids.append(next_id)
return pipeline
def _get_node_name(network, node_id):
from Betsy import bie3
assert node_id >= 0 and node_id < len(network.nodes)
node = network.nodes[node_id]
if isinstance(node, bie3.DataNode):
return node.datatype.name
elif isinstance(node, bie3.ModuleNode):
return node.name
raise AssertionError
def _write_parameter_file(
filename, network, module_name, antecedents, out_attributes, user_options,
transitions, outfile, start_time, end_time, metadata, user, job_name):
# metadata is a dictionary containing whatever the module wants to
# save to the parameter files. Typically, it saves things like
# the version number of the software used, for reproducibility.
# Can be None if no metadata or not implemented.
import json
import time
import operator
from genomicode import parselib
params = {}
params["module_name"] = module_name
if not operator.isSequenceType(antecedents):
antecedents = [antecedents]
# TODO:
# Should write out the attributes for each antecedent.
ante = [(x.data.datatype.name, x.identifier) for x in antecedents]
params["antecedents"] = ante
params["out_attributes"] = out_attributes
params["user_options"] = user_options
params["pipeline"] = _format_pipeline(network, transitions)
params["outfile"] = outfile
params["start_time"] = time.strftime(TIME_FMT, start_time)
params["end_time"] = time.strftime(TIME_FMT, end_time)
elapsed = time.mktime(end_time) - time.mktime(start_time)
params["elapsed"] = elapsed
params["elapsed_pretty"] = parselib.pretty_time_delta(elapsed)
if metadata is None:
metadata = {}
params["metadata"] = metadata
params["user"] = user
params["job_name"] = job_name
hu = _make_hash_units(
module_name, antecedents, out_attributes, user_options)
params["hash"] = hu
x = json.dumps(params, sort_keys=True, indent=4)
x = x + "\n"
open(filename, 'w').write(x)
def _read_parameter_file(filename):
import os
import json
assert os.path.exists(filename)
return json.loads(open(filename).read())
```
#### File: Betsy/scripts/betsy_run.py
```python
def check_output_provided(rulebase, output_file):
if output_file:
return True
# If there is no output, just print the whole rulebase.
#print "Selecting --output DataType."
_print_rulebase(rulebase)
return False
def _print_rulebase(rulebase):
from Betsy import bie3
# Make a list of the DataType objects.
x = [getattr(rulebase, x) for x in dir(rulebase)]
x = [x for x in x if isinstance(x, bie3.DataType)]
datatypes = x
# Count the attributes.
num_attributes = 0
for dt in datatypes:
num_attributes += len(dt.attribute_defs)
# Make a list of the modules
modules = rulebase.all_modules
print "Rulebase contains %d data types, %d attributes, and %d modules." % (
len(datatypes), num_attributes, len(modules))
# Print each DataType object.
for dt in datatypes:
# Skip the private datatypes.
if dt.name.startswith("_"):
continue
_pretty_print_datatype(dt)
print
print
# Print the options from each module.
for module in modules:
if not module.option_defs:
continue
_pretty_print_module(module)
print
print
def _pretty_print_datatype(datatype, handle=None):
import sys
from genomicode import parselib
handle = handle or sys.stdout
LW = 72
print >>handle, "="*LW
print >>handle, "DataType: %s" % datatype.name
if datatype.help:
for x in parselib.linesplit(datatype.help, prefix1=10, prefixn=10):
print >>handle, x
print >>handle, "-"*LW
for attr in datatype.attribute_defs.itervalues():
x1 = "%-20s" % attr.name
x2 = []
for val in attr.values:
if val == attr.default_in:
val = val + " (in)"
if val == attr.default_out:
val = val + " (out)"
x2.append(val)
x2 = ", ".join(x2)
x = x1 + x2
lines = parselib.linesplit(x, prefixn=20)
for line in lines:
print >>handle, line
def _pretty_print_module(module, handle=None):
import sys
from genomicode import parselib
handle = handle or sys.stdout
LW = 72
print >>handle, "="*LW
print >>handle, "Module: %s" % module.name
if module.help:
for x in parselib.linesplit(module.help, prefix1=8, prefixn=8):
print >>handle, x
print >>handle, "-"*LW
for option in module.option_defs:
x1 = "%-20s" % option.name
x2 = ""
if option.help:
x2 = "%s " % option.help
default = ""
if option.default:
default = "(default %s)" % option.default
x3 = str(default)
x = x1 + x2 + x3
x = x.strip()
lines = parselib.linesplit(x)
for line in lines:
print >>handle, line
def generate_network(rulebase, outtype,
custom_attributes, out_custom_attribute):
import sys
from Betsy import bie3
assert outtype
# Get the out_datatype.
# BUG: Should separate the custom attributes for the out_datatype.
assert hasattr(rulebase, outtype), "Unknown datatype: %s" % outtype
attrs = {}
if out_custom_attribute:
assert out_custom_attribute.datatype.name == outtype
for x in out_custom_attribute.attributes:
assert x.name not in attrs
attrs[x.name] = x.value
out_datatype = getattr(rulebase, outtype)
out_data = out_datatype.output(**attrs)
#for cattr in custom_attributes:
# if cattr.datatype.name != out_datatype.name:
# continue
# for x in cattr.attributes:
# assert x.name not in attrs
# attrs[x.name] = x.value
a_or_an = "a"
if outtype.lower()[0] in "aeiou":
a_or_an = "an"
print "Generating a network that produces %s:\n%s" % (
a_or_an, out_data.datatype.name)
for name in sorted(out_data.attributes):
value = out_data.attributes[name]
x = " %s=%s" % (name, value)
print x
sys.stdout.flush()
# There may be a bug in here somehow where impossible networks can
# be created. e.g. FastqFolder:orientation="unknown" ->
# merge_reads -> FastqFolder:orientation="single", even though
# constraint forces them to be the same. Happens during
# complete_network or optimize_network step. Don't see anymore,
# because got rid of orientation attribute in FastqFolder.
network = bie3.make_network(
rulebase.all_modules, out_data, custom_attributes)
assert network, "Unexpected error: No network generated."
## # If the user specified some data types that should be excluded,
## # remove them from the network.
## if exclude_input:
## if len(exclude_input) <= 3:
## x = ", ".join(exclude_input)
## print "Remove as inputs: %s." % x
## else:
## print "Remove %d data types as inputs." % len(exclude_input)
## network = _remove_unwanted_input_nodes(
## network, exclude_input, user_attributes)
x = "nodes"
if len(network.nodes) == 1:
x = "node"
print "Made a network with %d %s." % (len(network.nodes), x)
return network
def check_more_than_one_node_network(network, rulebase):
#from genomicode import parselib
from Betsy import bie3
if len(network.nodes) > 1:
return True
# Print out the node.
node = network.nodes[0]
print "I cannot generate this network."
#print node.datatype.name
#for x in node.attributes.iteritems():
# name, value = x
# x = " %s=%s" % (name, value)
# print x
print "This can be caused by:"
print "1. An incompatibility in the attributes (most likely)."
print " Please verify the attributes in the output data object."
print "2. The knowledge base is incomplete and missing a rule."
# Look for rules that can almost generate this node, with at most
# 1 mismatch.
attr2values = {} # attr -> list of values
for module in rulebase.all_modules:
assert not bie3._is_valid_output(module, node, save_conflicts=True)
# Print out conflicts
conflicts = bie3.DEBUG_IS_VALID_OUTPUT_CONFLICTS
if not conflicts: # Can happen if data type doesn't match.
continue
if len(conflicts) > 1:
continue
attr_name, desired_value, data_value = conflicts[0]
#print module.name, attr_name, desired_value, data_value
v1 = attr2values.get(attr_name, [])
v2 = desired_value
if type(v2) is type(""):
v2 = [v2]
attr2values[attr_name] = v1+v2
if not attr2values:
return False
print "Is it possible that you meant:"
i = 1
for attr_name in sorted(attr2values):
x = attr2values[attr_name]
x = {}.fromkeys(x)
values = sorted(x)
#values_str = parselib.pretty_list(values, conjunction="or")
dtype = node.datatype.name
for value in values:
print "%d. %s.%s=%s" % (i, dtype, attr_name, value)
i += 1
return False
def check_inputs_provided(
network, in_data_nodes, custom_attributes, user_options,
max_inputs, network_png, verbose):
if in_data_nodes:
return True
# If there's an output and no inputs, then show the possible
# data types. The network is probably too big to show
# everything.
x = [x.data.datatype.name for x in in_data_nodes]
_print_input_datatypes(
network, x, custom_attributes, max_inputs=max_inputs)
plot_network(
network_png, network, user_options=user_options, verbose=verbose)
return False
def _print_input_datatypes(
network, required_datatypes, custom_attributes,
max_inputs=None, outhandle=None):
# required_datatypes is a list of the names of datatypes that must
# be in this combination.
import sys
from genomicode import parselib
from Betsy import bie3
outhandle = outhandle or sys.stdout
ps = parselib.print_split
print >>outhandle, \
"Looking for --input DataTypes that can generate this output."
outhandle.flush()
excluded_datatypes = None
datatype_combos = bie3.get_input_datatypes(
network, custom_attributes, skip_datatypes=excluded_datatypes,
skip_private_datatypes=True, max_inputs=max_inputs)
# Filter out the datatype_combos that do not contain all the
# datatypes in required_datatypes.
i = 0
while i < len(datatype_combos):
combo = datatype_combos[i]
names = [x.name for x in combo]
missing = False
for n in required_datatypes:
if n not in names:
missing = True
break
if missing:
del datatype_combos[i]
else:
i += 1
# Don't do this, because sometimes you want to convert between the
# same data type (e.g. to convert counts to cpm in an
# UnprocessedSignalFile).
## If there are other data types, then exclude the same datatype as
## the output. e.g. if output is a FastQCFolder, don't show an
## input that just consists of a FastQCFolder. That's just lame.
#out_dname = network.nodes[0].datatype.name
#i = 0
#while i < len(datatype_combos):
# combo = datatype_combos[i]
# if len(combo) == 1 and combo[0].name == out_dname:
# del datatype_combos[i]
# else:
# i += 1
# Sort by number of datatypes, then names.
schwartz = []
for combo in datatype_combos:
x1 = len(combo)
x2 = [x.name for x in combo]
schwartz.append((x1, x2, combo))
schwartz.sort()
datatype_combos = [x[-1] for x in schwartz]
if not datatype_combos:
print >>outhandle, \
"No combination of DataTypes can generate this network."
return
x1 = "These are"
x2 = ""
x3 = "combinations"
if len(datatype_combos) == 1:
x1 = "This is"
x2 = "only "
x3 = "combination"
x4 = ""
if max_inputs is not None:
x4 += " (up to %d)" % max_inputs
x5 = "%s the %sacceptable %s of --input DataTypes%s. " % (x1, x2, x3, x4)
x6 = "The exact number of each DataType needed is not shown."
x = x5 + x6
ps(x, prefixn=0, outhandle=outhandle)
for i, combo in enumerate(datatype_combos):
names = [x.name for x in combo]
for j in range(len(names)):
if names[j] not in required_datatypes:
names[j] = "%s*" % names[j]
ps("%3d. %s" % (
i+1, ", ".join(names)), prefixn=6, outhandle=outhandle)
def check_inputs_in_network(
network, user_options, in_data_nodes, network_png, verbose):
# Return a tuple of:
# all_inputs_found (boolean)
# list of (list of node IDs that match in_data_nodes).
import sys
import itertools
from Betsy import bie3
from genomicode import jmath
print "Assigning --input's to nodes in the network."
sys.stdout.flush()
data_nodes = [x.data for x in in_data_nodes]
# index of data_nodes -> list of compatible node_ids
node2ids = [bie3._find_compat_data(network, x) for x in data_nodes]
complete = [] # list of indexes into in_data_nodes
incomplete = []
for i, node_ids in enumerate(node2ids):
if node_ids:
complete.append(i)
else:
incomplete.append(i)
# Every node can be assigned to the network.
if len(complete) == len(in_data_nodes):
# Make sure there is a unique assignment of in_data_nodes to
# nodes in the network, e.g. in_data_nodes are not assigned to
# the same node. Test every possible combination of
# assignment.
x = [len(x) for x in node2ids]
num_combos = jmath.prod(x)
assert num_combos < 100000, "Too many possibilities"
found = False
for x in itertools.product(*node2ids):
num_nodes = len({}.fromkeys(x))
if num_nodes == len(in_data_nodes):
found = True
break
if not found:
print "Ambiguous or duplicated --inputs."
return False, node2ids
# Everything looks OK.
print "All --input data found in network."
return True, node2ids
# For each in_data_node that cannot be found, see if we can figure
# out the closest match.
scores = []
for data_index in incomplete:
x = bie3._score_compat_data(network, data_nodes[data_index])
# If x is empty, then there are no matching data types in the
# network.
print "--input %s is not in the network." % \
data_nodes[data_index].datatype.name
scores.extend(x)
if scores:
_print_node_score_table(network, scores)
# Make a list of all possible start_ids.
start_ids = bie3._uniq(bie3._flatten(node2ids))
#for i, node_ids in enumerate(node2ids):
# start_ids.extend(node_ids)
#start_ids = _uniq(start_ids)
plot_network(
network_png, network, user_options=user_options,
highlight_green=start_ids, verbose=verbose)
return False, node2ids
def _print_node_score_table(network, scores):
# scores from bie3._score_compat_data
# Figure out the score cutoff for each data type.
dt2scores = {}
for x in scores:
score, node_id, user_data, attr_values = x
dtname = network.nodes[node_id].datatype.name
if dtname not in dt2scores:
dt2scores[dtname] = []
dt2scores[dtname].append(score)
# Score cutoff is minimum score + 1
score_cutoffs = {} # dtname -> max score to print
for dtname, score in dt2scores.iteritems():
score_cutoffs[dtname] = min(score) + 1
# Make an output table.
table = []
header = ["Node", "D", "Datatype", "Attribute", "Yours", "Network"]
table.append(header)
for x in scores:
score, node_id, user_data, attr_values = x
dt_name = network.nodes[node_id].datatype.name
if not attr_values:
x = node_id, score, dt_name, "", "", ""
assert len(x) == len(header)
table.append(x)
for name, netw_value, user_value in attr_values:
x = node_id, score, dt_name, name, user_value, netw_value
assert len(x) == len(header)
if score > score_cutoffs[dt_name]:
continue
table.append(x)
# Figure out the maximum lengths of each column.
num_cols = len(header)
col_lengths = []
for i in range(num_cols):
x = [x[i] for x in table] # Get values in column.
x = [len(str(x)) for x in x] # calculate lengths.
x = max(x)
col_lengths.append(x)
# Set a maximum limit for Datatype and Attribute columns.
# Should be max 79 columns long, including 5 for spaces.
# Column MIN MAX Notes
# Node 4 4 As short as possible.
# Delta 1 5 As short as possible.
# Datatype 8 18 Datatype name.
# Attribute 9 19 Attribute name.
# User 4 10 Can be long. But usually short.
# Network 7 22 Might be long. Lots of values.
max_lengths = [4, 1, 18, 19, 10, 22]
assert len(col_lengths) == len(max_lengths)
# Just use the maximum lengths.
col_lengths = max_lengths
#for i in range(len(col_lengths)):
# col_lengths[i] = min(col_lengths[i], max_lengths[i])
# Make sure the values aren't too long.
for i in range(len(table)):
row = list(table[i])
for j in range(len(row)):
x = row[j]
x = str(x)
x = x.rstrip()
if len(x) > col_lengths[j]:
x = x[:col_lengths[j]-3] + "..."
row[j] = x
table[i] = row
fmt = "{!s:^%ds} {!s:^%ds} {:<%ds} {:<%ds} {:<%ds} {:<%ds}" % \
tuple(col_lengths)
for x in table:
print fmt.format(*x)
def build_pipelines(
network, user_options, in_data_nodes, data_node_ids, custom_attributes,
max_inputs, network_png, verbose):
# Return list of (path, start_ids). start_ids is parallel to
# in_data_nodes. If no paths found, will return an empty list.
import sys
from genomicode import parselib
from Betsy import bie3
print "Constructing pipelines that use --input data types."
sys.stdout.flush()
# data_node_ids is parallel to in_data_nodes. Each element is a
# list of the node_ids that that data node can map onto.
try:
paths = bie3.find_paths_by_start_ids(
network, custom_attributes, data_node_ids)
except AssertionError, x:
if str(x).startswith("Too many paths"):
# Helpful for debugging.
print "ERROR: Too many pipelines. Could not generate network."
plot_network(
network_png, network, user_options=user_options,
verbose=verbose)
raise
# Make sure all the --inputs are needed. Any unnecessary ones may
# indicate a problem.
inputs_used = {} # list of indexes of --inputs that are used
for p in paths:
used = [i for (i, x) in enumerate(p.start_ids) if x is not None]
inputs_used.update({}.fromkeys(used))
has_unused_inputs = len(inputs_used) != len(in_data_nodes)
good_paths = [x for x in paths if not x.missing_ids]
if good_paths and not has_unused_inputs:
x = "pipelines"
if len(good_paths) == 1:
x = "pipeline"
print "Found %d possible %s." % (len(good_paths), x)
return good_paths
# Print out --inputs that aren't used.
if has_unused_inputs:
for i in range(len(in_data_nodes)):
if i in inputs_used:
continue
name = in_data_nodes[i].data.datatype.name
x = (
"%s is not used in any pipelines. "
"Please make sure the proposed pipelines are acceptable and "
"remove this input." % name)
parselib.print_split(x, prefixn=2)
if not good_paths:
print "No pipelines found. Examine network to diagnose."
print "Make sure that no --input is missing."
x = [x.data.datatype.name for x in in_data_nodes]
_print_input_datatypes(
network, x, custom_attributes, max_inputs=max_inputs)
print
# DEBUG
#for i, path in enumerate(paths):
# print "%2d. %s %s" % (i, path.start_ids, path.missing_ids)
# For each in_data_node, see if it might be a better match to
# another node.
scores = []
for i in range(len(in_data_nodes)):
x = bie3._score_compat_data(network, in_data_nodes[i].data)
scores.extend(x)
if scores:
print ("Make sure that the attributes for the --input's are "
"correct.")
_print_node_score_table(network, scores)
print
# Plot out the network.
plot_network_show_pipelines(
network_png, network, paths, user_options=user_options,
verbose=verbose)
return []
def check_attributes_complete(
network, user_options, paths, network_png, prune_network, verbose):
# user_options is dict of name to value
import sys
from genomicode import parselib
from Betsy import bie3
# Make sure all required mattr are provided. This can only be run
# after the final network is generated.
print "Making sure all required attributes (--mattr) are provided."
sys.stdout.flush()
assert paths
all_missing = {}
all_extra = []
good_paths = []
for i, p in enumerate(paths):
module_ids = [
x for x in p.node_ids if
isinstance(network.nodes[x], bie3.ModuleNode)]
modules = [network.nodes[x] for x in module_ids]
missing = {}
all_opt2mods = get_module_options(modules)
required_opt2mods = get_module_options(modules, required_only=True)
x = [x for x in required_opt2mods if x not in user_options]
for on in x:
for mn in all_opt2mods[on]:
missing[(mn, on)] = 1
extra = [x for x in user_options if x not in all_opt2mods]
if not missing:
good_paths.append(p)
all_missing.update(missing)
all_extra.extend(extra)
all_extra = {}.fromkeys(all_extra)
if all_extra:
names = sorted(all_extra)
x = parselib.pretty_list(names)
x = ("The following --mattr options were provided, but may not "
"be needed: %s" % x)
parselib.print_split(x, prefixn=2)
if good_paths:
if len(good_paths) < len(paths):
num_removed = len(paths)-len(good_paths)
assert all_missing
if len(all_missing) == 1:
mn, on = all_missing.keys()[0]
x = ("Removed %d pipelines because --mattr option %r was "
"not provided." % (num_removed, on))
else:
names = sorted([x[1] for x in all_missing.keys()])
x = parselib.pretty_list(names)
x = ("Removed %d pipelines because the following --mattr "
"options were not provided: %s" % (num_removed, x))
parselib.print_split(x, prefixn=2)
return good_paths
for (mn, on) in all_missing:
print 'Missing --mattr: %s requires attribute "%s".' % (mn, on)
plot_network_show_pipelines(
network_png, network, paths, user_options=user_options,
prune=prune_network, verbose=verbose)
return []
def prune_pipelines(
network, user_options, custom_attributes, paths,
network_png, prune_network, verbose):
# Any any pipelines look weird, then remove them.
import sys
from Betsy import bie3
print "Pruning redundant pipelines."
sys.stdout.flush()
paths_orig = paths[:]
## Optimizations:
## 1. Convert node_ids to dictionaries.
## 2. Convert the transitions to sets for easier comparison.
#for i, p in enumerate(paths):
# # Not much more efficient.
# #p.node_ids_set = set(p.node_ids)
# p.node_ids = {}.fromkeys(p.node_ids)
# for key, value in p.transitions.iteritems():
# p.transitions[key] = set(value)
paths = bie3.prune_paths(paths, network, custom_attributes)
## Convert node_ids back to lists.
#for i, p in enumerate(paths):
# p.node_ids = p.node_ids.keys()
# for key, value in p.transitions.iteritems():
# p.transitions[key] = list(value)
if not paths:
print "All pipelines pruned. This can happen if:"
print " 1. There is a conflicting data attribute in the pipeline."
print " Maybe an attribute is set for the wrong DataType?"
print " 2. A --mattr option is missing."
print " 3. There is a bug in the network generation."
print " 4. There is a bug in the pipeline pruning."
print "Please review the network, attributes, and --mattr options."
plot_network_show_pipelines(
network_png, network, paths_orig, user_options=user_options,
prune=prune_network, verbose=verbose)
return paths
num_pruned = len(paths_orig) - len(paths)
if not num_pruned:
x = "s"
if len(paths) == 1:
x = ""
print "No redundant pipelines found. %d final pipeline%s." % (
len(paths), x)
else:
x1 = "s"
if num_pruned == 1:
x1 = ""
x2 = "s"
if len(paths) == 1:
x2 = ""
print "Pruned %d pipeline%s. %d final pipeline%s." % (
num_pruned, x1, len(paths), x2)
return paths
def _print_attributes_pruned(pruned_attributes):
# Print out a message about what was deleted.
from Betsy import bie3
from genomicode import parselib
attr2values = {}
attr2deleted = {}
attr2user = {} # whether this was deleted based on user request
for x in pruned_attributes:
datatype_name, attr_name, kept_value, del_value, user = x
name = "%s.%s" % (datatype_name, attr_name)
if name not in attr2values:
attr2values[name] = []
if name not in attr2deleted:
attr2deleted[name] = []
attr2values[name].append(kept_value)
attr2values[name].append(del_value)
attr2deleted[name].append(del_value)
# Multiple values may be deleted for each attribute
# (e.g. multiple aligners may be deleted. If any one is
# deleted based on a user attribute, then this should be True.
if user or not name in attr2user:
attr2user[name] = user
for name in sorted(attr2values):
all_values = sorted(bie3._uniq(attr2values[name]))
kept_values = [x for x in all_values if x not in attr2deleted[name]]
assert len(kept_values) == 1
kept_value = kept_values[0]
x1 = "%s can be %s." % (name, repr(all_values))
x2 = "Arbitrarily using %s." % repr(kept_value)
if attr2user[name]:
x2 = "Using %s because it was specified by the user." % \
repr(kept_value)
x = "%s %s" % (x1, x2)
parselib.print_split(x, prefixn=2)
def _dict_diff(dict1, dict2, _as_dict=False):
# Only elements that are in dict1 and not in dict2.
# Using set comparisons does not make this any faster than dicts.
if _as_dict:
dict3 = {}
for x in dict1:
if x not in dict2:
dict3[x] = 1
else:
dict3 = [x for x in dict1 if x not in dict2]
return dict3
def check_input_files(network, in_data_nodes, user_options, paths,
network_png, prune_network, verbose):
import os
import sys
print "Making sure all input files provided."
sys.stdout.flush()
missing = False
for x in in_data_nodes:
if x.data.datatype.no_file:
# This DataType does not require a file.
continue
elif not x.identifier:
print "No file given: %s." % (x.data.datatype.name)
missing = True
elif not os.path.exists(x.identifier):
print "File not found: %s." % x.identifier
missing = True
if not missing:
#print "All input files found."
#sys.stdout.flush()
return True
plot_network_show_pipelines(
network_png, network, paths, user_options=user_options,
prune=prune_network, verbose=verbose)
return []
def check_output_file(filename):
if not filename:
print "No --output_file specified. Will not save results."
return True
def manually_verify_network(
network, user_options, paths, run, network_png, prune_network, verbose):
import sys
if run:
return True
print "Please review the network to make sure the analysis is correct."
print "Add --run when ready to run the analysis."
sys.stdout.flush()
plot_network_show_pipelines(
network_png, network, paths, user_options=user_options,
prune=prune_network, verbose=verbose)
return False
def plot_network_show_pipelines(filename, network, paths, **keywds):
# Pass keywds through to plot_network.
from Betsy import bie3
x1 = [x.node_ids for x in paths]
# Hack: For optimization, sometimes node_ids is dict.
if x1 and type(x1[0]) is type({}):
x1 = [x.keys() for x in x1]
# Hack: For optimization, sometimes node_ids is frozenset.
if x1 and type(x1[0]) is frozenset:
x1 = [list(x) for x in x1]
x2 = [x.start_ids for x in paths]
x1 = bie3._uniq(bie3._flatten(x1))
x2 = bie3._uniq(bie3._flatten(x2))
x2 = [x for x in x2 if x is not None]
all_node_ids = x1
all_start_ids = x2
# Pull out the missing IDs from the pathways.
x = [x.missing_ids for x in paths]
if x and type(x[0]) is type({}):
x = [x.keys() for x in x]
elif x and type(x[0]) is frozenset:
x = [list(x) for x in x]
x = bie3._uniq(bie3._flatten(x))
missing_ids = x
if not missing_ids:
# Find nodes with no parents in the network that aren't start_ids.
nodeid2parents = bie3._make_parents_dict(network)
no_parents = {}
for node_id in all_node_ids:
x = nodeid2parents.get(node_id, [])
x = [x for x in x if x in all_node_ids]
if not x:
no_parents[node_id] = 1
x = all_node_ids
x = [x for x in x if x in no_parents] # no parents
x = [x for x in x if x not in all_start_ids] # not start id
missing_ids = x
transitions = {}
for path in paths:
for node_id, next_ids in path.transitions.iteritems():
for next_id in next_ids:
transitions[(node_id, next_id)] = 1
highlight_yellow = None
if not keywds.get("prune"):
x = all_node_ids
x = [x for x in x if x not in all_start_ids] # not start_ids
x = [x for x in x if x not in missing_ids]
highlight_yellow = x
plot_network(
filename, network, bold=all_node_ids,
bold_transitions=transitions, highlight_green=all_start_ids,
highlight_orange=missing_ids, highlight_yellow=highlight_yellow,
**keywds)
def plot_network(
filename, network, user_options=None,
bold=[], bold_transitions=[],
highlight_green=[], highlight_orange=[], highlight_purple=[],
highlight_yellow=[], prune=False, verbose=False):
# If show_node_ids is not None, should be a list of dict of the
# node_ids to include.
import sys
from Betsy import bie3
if filename is None:
return
print "Plotting (%d node) network to %s." % (len(network.nodes), filename)
sys.stdout.flush()
show_node_ids = None
if prune:
x = set()
if bold:
x.update(bold)
if highlight_green:
x.update(highlight_green)
if highlight_orange:
x.update(highlight_orange)
if highlight_purple:
x.update(highlight_purple)
if highlight_yellow:
x.update(highlight_yellow)
for (y, z) in bold_transitions:
x.update([y, z])
show_node_ids = x
bie3.plot_network_gv(
filename, network, options=user_options, bold=bold,
bold_transitions=bold_transitions,
highlight_green=highlight_green, highlight_orange=highlight_orange,
highlight_purple=highlight_purple, highlight_yellow=highlight_yellow,
show_node_ids=show_node_ids, verbose=verbose)
def write_network(filename, network):
from Betsy import bie3
if not filename:
return
print "Writing network in json format: %s." % filename
bie3.write_network(filename, network)
def plot_pipelines(filestem, network, paths, user_options, max_pipelines=None,
prune=False, verbose=False):
if max_pipelines is not None:
paths = paths[:max_pipelines]
for i, path in enumerate(paths):
filename = "%s-%02d.png" % (filestem, i)
plot_network_show_pipelines(
filename, network, [path], user_options=user_options,
prune=prune, verbose=verbose)
def write_receipt(outfilename, network, start_ids, transitions, node_dict):
import os
import sys
from genomicode import parselib
from Betsy import bie3
from Betsy import rule_engine
from Betsy import module_utils as mlib
print "Writing receipt to %s." % outfilename
# Figure out the order to write the nodes.
node_order = []
stack = start_ids
# Do a breadth first search.
while stack:
nid = stack.pop(0)
if nid in node_order:
continue
node_order.append(nid)
for (n1, n2) in transitions:
if nid == n1:
stack.append(n2)
# No module nodes.
node_order = [
x for x in node_order
if not isinstance(network.nodes[x], bie3.ModuleNode)]
handle = open(outfilename, 'w')
# Write the command line.
print >>handle, "COMMAND"
print >>handle, "-------"
cmd = " ".join(sys.argv)
parselib.print_split(cmd, outhandle=handle)
print >>handle
# Write out each module.
for nid in node_order:
inode = node_dict[nid]
#node = inode.data
# Input data type.
if not hasattr(inode, "out_path"):
#print >>handle, "Input: %s" % node.datatype.name
#if inode.identifier:
# print >>handle, inode.identifier
#print >>handle
continue
# Module.
# module name.
x = os.path.join(inode.out_path, rule_engine.BETSY_PARAMETER_FILE)
params = rule_engine._read_parameter_file(x)
metadata = params.get("metadata", {})
module_name = params.get("module_name")
x = "%s [Node %d]" % (module_name, nid)
print >>handle, x
print >>handle, "-"*len(x)
# run time.
assert "start_time" in params, "Missing: start_time"
start_time = params["start_time"]
run_time = params.get("elapsed_pretty")
if run_time == "instant":
x = "ran instantly"
else:
x = "took %s" % run_time
print >>handle, "RUN: %s (%s)." % (start_time, x)
#time_ = time.strptime(start_time, rule_engine.TIME_FMT)
# input files.
antecedents = params.get("antecedents")
if antecedents:
print >>handle, "INPUTS:"
for i, (name, filename) in enumerate(antecedents):
x = "%d. %s" % (i+1, name)
parselib.print_split(
x, prefix1=4, prefixn=8, outhandle=handle)
if filename:
parselib.print_split(
filename, prefix1=8, prefixn=8, outhandle=handle)
# output files.
outfile = params["outfile"]
outfilename = os.path.join(inode.out_path, outfile)
size = parselib.pretty_filesize(mlib.get_dirsize(outfilename))
x = "OUTPUT: %s (%s)" % (outfile, size)
parselib.print_split(x, prefix1=0, prefixn=4, outhandle=handle)
# working path.
x = "WORKING PATH: %s" % inode.out_path
parselib.print_split(x, outhandle=handle)
# user options (--mattr)
user_options = params.get("user_options")
if user_options:
print >>handle, "MODULE ATTRIBUTES:"
for name in sorted(user_options):
value = user_options[name]
print >>handle, " %s=%s" % (name, value)
# commands
commands = metadata.get("commands")
if commands:
if type(commands) is type(""):
commands = [commands]
for x in metadata["commands"]:
print >>handle, x
# miscellaneous metadata
meta_lines = []
for key, value in metadata.iteritems():
if key == "commands":
continue
x = "%s=%s" % (key.upper(), value)
meta_lines.append(x)
if meta_lines:
print >>handle, "METADATA:"
for x in meta_lines:
parselib.print_split(x, prefix1=4, prefixn=8, outhandle=handle)
print >>handle
def get_all_option_names():
# Return a list of the names for all OptionDef objects in the
# rulebase.
from Betsy import rulebase
names = {}
for module in rulebase.all_modules:
for option in module.option_defs:
names[option.name] = 1
return sorted(names)
def get_module_options(modules, required_only=False):
# From a list of modules, return a dictionary where the keys are
# option name and the values are a list of the modules that
# use it. If required_only is True, then will only return the
# options that are required.
option2modules = {}
for module in modules:
for option in module.option_defs:
is_required = option.default is None
if required_only and not is_required:
continue
on, mn = option.name, module.name
if on not in option2modules:
option2modules[on] = []
option2modules[on].append(mn)
return option2modules
def _list_differences_in_nodelists(nodes1, nodes2):
# nodes1 and nodes2 are lists of nodes. Should have same
# datatypes. Return a list of tuples (node index, attribute name)
# that are different.
assert len(nodes1) == len(nodes2)
diffs = []
for i, (n1, n2) in enumerate(zip(nodes1, nodes2)):
assert n1.datatype.name == n2.datatype.name
for name in n1.attributes:
v1, v2 = n1.attributes[name], n2.attributes[name]
x = i, name
if type(v1) is type("") and type(v2) is type(""):
if v1 != v2:
diffs.append(x)
elif type(v1) is type("") and type(v2) is not type(""):
if v1 not in v2:
diffs.append(x)
elif type(v1) is not type("") and type(v2) is type(""):
if v2 not in v1:
diffs.append(x)
elif type(v1) is not type("") and type(v2) is not type(""):
if v1 != v2:
diffs.append(x)
else:
raise AssertionError
return diffs
def _merge_nodelists(nodes1, nodes2):
from Betsy import bie3
assert len(nodes1) == len(nodes2)
merged = []
for (n1, n2) in zip(nodes1, nodes2):
assert n1.datatype.name == n2.datatype.name
attr = {}
for name in n1.attributes:
v1, v2 = n1.attributes[name], n2.attributes[name]
if type(v1) is type("") and type(v2) is type(""):
value = v1
if v1 != v2:
value = [v1, v2]
elif type(v1) is type("") and type(v2) is not type(""):
value = v1
if v1 not in v2:
value = v2 + [v1]
elif type(v1) is not type("") and type(v2) is type(""):
value = v1
if v2 not in v1:
value = v1 + [v2]
elif type(v1) is not type("") and type(v2) is not type(""):
value = v1
if v1 != v2:
value = sorted({}.fromkeys(v1 + v2))
attr[name] = value
n = bie3.DataNode(n1.datatype, **attr)
merged.append(n)
return merged
def _find_lowest_datatype(network, datatype_name, allowed_node_ids):
# Return a node_id or None if not found.
from Betsy import bie3
x = allowed_node_ids
x = [x for x in x if isinstance(network.nodes[x], bie3.DataNode)]
x = [x for x in x if network.nodes[x].datatype.name == datatype_name]
node_ids = x
if not node_ids:
return None
if len(node_ids) == 1:
return node_ids[0]
descendents = bie3._make_descendent_dict(network)
good_node_ids = []
for nid in node_ids:
x = descendents.get(nid, [])
x = [x for x in x if isinstance(network.nodes[x], bie3.DataNode)]
x = [x for x in x if network.nodes[x].datatype.name == datatype_name]
x = [x for x in x if x in allowed_node_ids]
if not x:
good_node_ids.append(nid)
if len(good_node_ids) == 1:
return good_node_ids[0]
return None
def _find_highest_datatype(network, datatype_name, allowed_node_ids):
# Return a node_id or None if not found.
from Betsy import bie3
x = allowed_node_ids
x = [x for x in x if isinstance(network.nodes[x], bie3.DataNode)]
x = [x for x in x if network.nodes[x].datatype.name == datatype_name]
node_ids = x
if not node_ids:
return None
if len(node_ids) == 1:
return node_ids[0]
ancestors = bie3._make_ancestor_dict(network)
good_node_ids = []
for nid in node_ids:
x = ancestors.get(nid, [])
x = [x for x in x if isinstance(network.nodes[x], bie3.DataNode)]
x = [x for x in x if network.nodes[x].datatype.name == datatype_name]
x = [x for x in x if x in allowed_node_ids]
if not x:
good_node_ids.append(nid)
if len(good_node_ids) == 1:
return good_node_ids[0]
return None
def _parse_dattr(dattr_str):
# Format: <datatype>[*].<key>=<value>
# Return <datatype>, <key>, <value>, <all_nodes>.
#err_msg = "--dattr should be <datatype>.<key>=<value>[.<next_id>]"
err_msg = "--dattr should be <datatype>[*].<key>=<value>"
x = dattr_str.split(".", 1)
assert len(x) == 2, err_msg
#assert len(x) in [2, 3], err_msg
datatype, kv = x[:2]
#next_id = None
#if len(x) == 3:
# next_id = int(x[2])
x = kv.split("=", 1)
assert len(x) == 2, err_msg
key, value = x
all_nodes = False
if datatype.endswith("*"):
datatype = datatype[:-1]
all_nodes = True
return datatype, key, value, all_nodes
def _parse_args(args):
inputs = [] # list of names of datatypes
in_identifiers = {} # index (into inputs) -> identifier (e.g. filename)
in_parameters = {} # index (into inputs) -> list of (key, value)
output = None # name of datatype
out_identifier = None # identifier
out_parameters = [] # list of (datatype, key, value, all_nodes)
# out_parameters can refer to output, or other internal nodes in
# the network.
input_or_output = None
i = 0
while i < len(args):
arg = args[i]
if arg == "--input":
assert len(args) >= i+1
inputs.append(args[i + 1])
input_or_output = arg
i += 2
elif arg == "--output":
assert len(args) >= i+1
assert not output, "multiple --output not allowed"
output = args[i+1]
input_or_output = arg
i += 2
elif arg == "--dattr" and input_or_output == "--input":
assert inputs
assert len(args) >= i+1
dattr = args[i+1]
i += 2
datatype, key, value, all_nodes = _parse_dattr(dattr)
#assert next_id is None
assert not all_nodes
index = len(inputs) - 1
assert datatype == inputs[index], (
"Datatype mismatch: --dattr %s and --input %s. Wrong "
"order?" % (datatype, inputs[index]))
if index not in in_parameters:
in_parameters[index] = []
# Make sure key not in these parameters already.
x = [x[1] for x in in_parameters[index] if x[0] == key]
x = {}.fromkeys(x) # sorted list of unique values.
assert not x, "Duplicate --dattr: %s" % key
in_parameters[index].append((key, value))
elif arg == "--dattr" and input_or_output == "--output":
assert len(args) >= i+1
dattr = args[i+1]
i += 2
datatype, key, value, all_nodes = _parse_dattr(dattr)
assert output
# This check won't work. out_parameters also includes
# attributes for internal nodes in the network that might
# have different data types.
#assert datatype == output, \
# "Datatype mismatch: --dattr %s and --output %s." % (
# datatype, output)
out_parameters.append((datatype, key, value, all_nodes))
elif arg == "--dattr":
raise AssertionError, "--dattr before --input or --output"
elif arg.startswith("--input_file"):
# Possible formats:
# 1. --input_file fastq01
# 2. --input_file=fastq01
assert input_or_output == "--input", \
"--input_file must be after --input and before --output"
if arg == "--input_file":
assert len(args) >= i+1
filename = args[i+1]
i += 2
else:
x = arg.split("=")
assert len(x) == 2, "Invalid arg: %s" % arg
assert x[0] == "--input_file"
filename = x[1]
i += 1
index = len(inputs) - 1
assert index >= 0
assert index not in in_identifiers, \
"Multiple --input_file provided for %s" % inputs[-1]
in_identifiers[index] = filename
elif arg == '--output_file':
assert input_or_output == "--output", \
"--output must precede --output_file"
assert len(args) >= i+1
filename = args[i+1]
i += 2
assert not out_identifier
out_identifier = filename
elif arg.startswith("--output_file"):
x = arg.split("=")
assert len(x) == 2, "Invalid arg: %s" % arg
assert x[0] == "--output_file"
filename = x[1]
i += 1
assert not out_identifier
out_identifier = filename
else:
i += 1
in_results = []
for i, input_ in enumerate(inputs):
x = input_, in_identifiers.get(i, ""), in_parameters.get(i, [])
in_results.append(x)
out_result = output, out_identifier, out_parameters
return in_results, out_result
def _make_custom_attr(rulebase, datatype, attributes, all_nodes):
from Betsy import bie3
assert attributes
assert hasattr(rulebase, datatype), "Unknown datatype: %s" % datatype
fn = getattr(rulebase, datatype)
x = [bie3.Attribute(fn, name, value) for (name, value) in attributes]
x = bie3.CustomAttributes(x, all_nodes)
return x
def main():
import os
import sys
import argparse
import getpass
#import itertools
from Betsy import config
from Betsy import rule_engine
from Betsy import userfile
from Betsy import reportlib
from Betsy import rulebase
from Betsy import bie3
WORKFLOW = (
" 1. Look through the rulebase for the DataType of interest.\n"
" Run betsy_run.py (by itself)\n"
" 2. Specify the output DataType and its attributes.\n"
" Add argument: --output\n"
" Add argument(s): --dattr\n"
" 3. Browse through the list of possible combinations of input\n"
" DataTypes.\n"
" 4. Select one or more input DataTypes.\n"
" Now shows just the combinations that includes the input\n"
" DataTypes requested.\n"
" Add argument(s): --input\n"
" 5. Configure the attributes of the inputs.\n"
" Shows the detailed information about each node.\n"
" Add argument(s): --dattr\n"
" 6. System makes sure each node can be found in the network.\n"
" If not, try to diagnose differences.\n"
" 7. System makes sure this is a complete set of input nodes.\n"
" 8. System makes sure all required module attributes are given.\n"
" 9. System makes sure all input files are provided.\n"
"10. Visualize the pipeline to make sure this is what you want.\n"
" Plot the network: --network_png\n"
"11. Configure the attributes to alter the pipeline, if necessary.\n"
" Add argument(s): --dattr\n"
" Add argument(s): --mattr\n"
"12. Actually run the analysis.\n"
" Add argument: --run\n"
)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Hi! I'm BETSY, and I like to do bioinformatics.\n\n"
"Workflow:\n%s\n\n" % WORKFLOW)
#parser.add_argument(
# "--inputs_complete", action="store_const", const=True,
# help="Have finished specifying --input DataTypes.")
#parser.add_argument(
# "--attrs_complete", action="store_const", const=True,
# help="Have finished configuring --dattr attributes.")
parser.add_argument(
"--run", action="store_true",
help="Perform the analysis.")
parser.add_argument(
'--user', default=getpass.getuser(),
help='the username who run the command')
parser.add_argument(
'--job_name', default='', help='the name of this job')
parser.add_argument(
'--num_cores', default=4, type=int,
help='number of cores used in the processing')
DEFAULT_MAX_INPUTS = 6
parser.add_argument(
'--max_inputs', default=DEFAULT_MAX_INPUTS, type=int,
help="Maximum number of inputs to be shown (default %d). "
"(For optimization)" % DEFAULT_MAX_INPUTS)
#parser.add_argument(
# "--save_failed_data", action="store_true",
# help="If a module failed, do not clean up its working files.")
parser.add_argument(
"--cache_input_files", action="store_true",
help="Save a copy of the input files. "
"Helpful for very big files.")
parser.add_argument(
"-v", "--verbose", default=0, action="count",
help="Give more output.")
group = parser.add_argument_group(title="Input/Output Nodes")
group.add_argument(
'--input', action='append', help='DataType of the input')
group.add_argument(
'--input_file', action='append',
help='File corresponding to the previous --input.')
group.add_argument(
'--mattr', default=[], action='append',
help='Set the option for a module. Format should be: '
'<key>=<value>.')
#group.add_argument(
# "--exclude_input", action="append", help="Experimental.")
group.add_argument(
"--dont_find_mattr_files", action="store_true",
help="By default, if an --mattr option looks like a filename, "
"will convert to full path so that modules can find them. "
"This will turn off that feature.")
group.add_argument(
'--output', help='Desired DataType for the output.')
group.add_argument(
'--dattr', default=[], action='append',
help="Specify a Datatype's attribute. Since there may be multiple "
"inputs or outputs with the same Datatype, the position of "
"this argument is important. --dattr should be given "
"immediately following the Datatype that it refers to. "
"No --dattr should be given before the first --input. "
"--dattr that refer to internal nodes in the network should be "
"given after the --output. Format: <datatype>.<key>=<value>.")
group.add_argument(
'--output_file', help='file or folder of output result')
group.add_argument(
'--also_save_lowest', default=[], action="append",
help="Will save the contents of other nodes. "
"Format: <datatype>,#. Will save the bottom-most "
"node with this datatype.")
group.add_argument(
'--also_save_highest', default=[], action="append",
help="Will save the contents of other nodes. "
"Format: <datatype>,#. Will save the top-most "
"node with this datatype.")
group = parser.add_argument_group(title="Output")
group.add_argument(
'--network_png',
help='Generate a PNG that shows the data flow graph.')
group.add_argument(
'--sparse_network_png', action="store_true",
help="Leave out details in network plot.")
group.add_argument(
'--prune_network', action="store_true",
help="Prune nodes that are not included in any pipeline. "
"Mostly for debugging.")
#group.add_argument(
# '--network_text', help='generate the output network text file')
group.add_argument(
'--network_json', help='generate the output network json file')
group.add_argument(
'--restart_from_network', action="store_true",
help="If the --network_json file already exists, "
"then will use this network rather than recreating a new one.")
group.add_argument(
"--receipt",
help="Name of file to write a receipt (as a text file) "
"describing the analysis. ")
#parser.add_argument(
# '--clobber', action='store_const', const=True, default=False,
# help='overwrite the output_data if it already exists')
print "Starting rule engine."
sys.stdout.flush()
# Parse the arguments.
args = parser.parse_args()
input_list, x = _parse_args(sys.argv)
outtype, out_identifier, out_attributes = x
verbose_network = (not args.sparse_network_png)
print "Checking parameters."
sys.stdout.flush()
args.clobber = True
assert args.num_cores > 0, "num_cores should be greater than 0"
assert args.max_inputs > 0 and args.max_inputs <= 20
args.save_failed_data = True # Always save now.
#if args.inputs_complete or args.attrs_complete or args.run:
# if args.run:
# x = "--run"
# elif args.attrs_complete:
# x = "--attrs_complete"
# else:
# x = "--inputs_complete"
# assert input_list, "%s given, but no --input." % x
#if args.run:
# x = "--run"
# assert input_list, "%s given, but no --input." % x
## TODO: Make sure args.exclude_input is valid.
## args.exclude_input
if args.restart_from_network:
assert args.network_json, \
"Cannot restart_from_network: no --network_json specified."
# Make sure configuration directory exists.
assert hasattr(config, "CACHE_PATH"), \
"Not defined in .genomicoderc: CACHE_PATH"
if not os.path.exists(config.CACHE_PATH):
print "Making BETSY working path: %s." % config.CACHE_PATH
os.mkdir(config.CACHE_PATH)
# Make sure files exist.
for x in input_list:
intype, identifier, attributes = x
if identifier:
# Can be empty if no file given by user.
assert os.path.exists(identifier), \
"File not found: %s" % identifier
if args.output_file:
args.output_file = os.path.realpath(args.output_file)
if os.path.exists(args.output_file):
assert args.clobber, "Output already exists: %s" % args.output_file
# Making the IdentifiedDataNode objects.
# List of identified data nodes.
in_data_nodes = []
for x in input_list:
intype, identifier, attributes = x
assert hasattr(rulebase, intype), "Unknown datatype: %s" % intype
fn = getattr(rulebase, intype)
params = {}
for key, value in attributes:
params[key] = value
# Make sure the identifier is a full path since we'll be
# changing directories.
if identifier:
identifier = os.path.realpath(identifier)
in_data = fn.input(**params)
x = bie3.IdentifiedDataNode(in_data, identifier)
in_data_nodes.append(x)
# test outtype and build the list of custom_attributes.
custom_attributes = [] # List of bie3.CustomAttributes objects.
out_custom_attribute = None # bie3.CustomAttributes
if outtype:
# Pull out the custom_attributes. These attributes can refer
# to:
# 1. The input data (to guide the inferencing engine).
# e.g. Input has indel_realigned=yes, so downstream nodes
# should also have indel_realigned=yes.
# 2. The output data (out_attributes).
# 3. Internal nodes (out_attributes, different data type).
# Case 1. Get the custom attributes for the input nodes.
attrs = [] # list of (datatype, list of (key, value), all_nodes)
for x in input_list:
intype, identifier, attributes = x
if not attributes:
continue
x = intype, attributes, False
attrs.append(x)
for x in attrs:
datatype, attributes, all_nodes = x
x = _make_custom_attr(rulebase, datatype, attributes, all_nodes)
custom_attributes.append(x)
# Case 2 and 3. out_attributes contains attributes for both
# output and internal nodes. Group them by datatype.
# out_attributes is a list of (datatype, key, value, all_nodes)
x = [x[0] for x in out_attributes]
x = sorted({}.fromkeys(x))
datatypes = x
# datatypes can be empty if no attributes specified.
for dt in datatypes:
# Get all_nodes for this data type. Should all be the same.
x = [x[3] for x in out_attributes if x[0] == dt] # all_nodes
x = sorted({}.fromkeys(x))
assert len(x) == 1, "Inconsistent all_nodes: %s" % dt
all_nodes = x[0]
out_attrs = [x for x in out_attributes
if x[0] == dt and x[3] == all_nodes]
assert out_attrs
attributes = [(x[1], x[2]) for x in out_attrs]
# Since there's only one output object, all attributes for
# that data type should be put into the same
# CustomAttributes object.
if dt == outtype:
x = _make_custom_attr(rulebase, dt, attributes, all_nodes)
out_custom_attribute = x
# Otherwise, make separate CustomAttribute objects.
else:
for x in attributes:
x = _make_custom_attr(rulebase, dt, [x], all_nodes)
custom_attributes.append(x)
#x = _make_custom_attr(
# rulebase, dt, attributes, all_nodes)
#custom_attributes.append(x)
## # out_attributes contains attributes for the output node, as
## # well as other nodes in the network. Just group them by
## # datatype.
## # Since there's only one output, it should be put into the
## # same CustomAttributes object.
## # out_attributes is a list of (datatype, key, value, all_nodes)
## if out_attributes:
## x1 = [x[0] for x in out_attributes] # datatype
## x2 = [x[3] for x in out_attributes] # all_nodes
## x1 = sorted({}.fromkeys(x1))
## x2 = sorted({}.fromkeys(x2))
## for x in itertools.product(x1, x2):
## datatype, all_nodes = x
## out_attrs = [x for x in out_attributes
## if x[0] == datatype and x[3] == all_nodes]
## if not out_attrs:
## continue
## attributes = [(x[1], x[2]) for x in out_attrs]
## x = datatype, attributes, all_nodes
## attrs.append(x)
# Cache the files in the user's directory. Don't depend on the
# user keeping the file in the same place. Needed for the first
# module.
if args.cache_input_files:
print "Making a local copy of the input files."
sys.stdout.flush()
for i_data_node in in_data_nodes:
filename = i_data_node.identifier
if not filename or not os.path.exists(filename):
continue
x = userfile.set(getpass.getuser(), filename)
i_data_node.identifier = x
# Parse out the module attributes (AKA user_options).
user_options = {}
if args.mattr:
#print "Parsing module attributes."
all_mattrs = get_all_option_names()
for x in args.mattr:
assert '=' in x, "--mattr should be in format: <option>=<value>\n%s" %x
key, value = x.split('=', 1)
assert key in all_mattrs, "Unknown module attribute: %s" % key
user_options[key] = value
# Since the modules will be run from their own directory, any
# module attributes that point to files (as relative paths) will
# be messed up. See if any of these look like files and convert
# them into full path names.
if not args.dont_find_mattr_files:
for key, value in user_options.iteritems():
if not os.path.exists(value):
continue
x = os.path.realpath(value)
user_options[key] = x
# Step 1: Make sure there's an output provided.
# If not, show the rulebase.
# Step 2: Generate network that produces this output.
# Step 3: Make sure there are inputs provided.
# If not, show the input datatypes.
# Step 4: Make sure the inputs can be found in the network.
# Step 5: Create the pipelines.
# Step 6: Make sure required attributes are given.
# Step 7: Prune redundant pipelines.
# Step 8: Make sure input files exist.
# Step 9: Manual verification of the network by the user.
# Step 1: Make sure an output is provided.
if not check_output_provided(rulebase, args.output):
return
# Step 2: Generate network.
if args.network_json and os.path.exists(args.network_json) and \
args.restart_from_network:
network = bie3.read_network(args.network_json)
else:
network = generate_network(
rulebase, outtype, custom_attributes, out_custom_attribute)
if args.network_json:
write_network(args.network_json, network)
#plot_network(
# "network.png", network, user_options=user_options,
# verbose=verbose_network)
# Step 2.5: Make sure network has more than one node.
if not check_more_than_one_node_network(network, rulebase):
return
# Step 3: Make sure some inputs are provided.
if not check_inputs_provided(
network, in_data_nodes, custom_attributes, user_options,
args.max_inputs, args.network_png, verbose_network):
return
# Step 4: Make sure each of the input nodes match a node in the
# network.
x = check_inputs_in_network(
network, user_options, in_data_nodes,
args.network_png, verbose_network)
inputs_ok, data_node_ids = x
if not inputs_ok:
return
# Step 5: Search for pipelines that can be run given the INPUT
# nodes.
paths = build_pipelines(
network, user_options, in_data_nodes, data_node_ids, custom_attributes,
args.max_inputs, args.network_png, verbose_network)
#plot_pipelines(
# "pipeline", network, paths, user_options, max_pipelines=16,
# prune=True, verbose=True)
if not paths:
return
# Step 6: Make sure required attributes are given.
paths = check_attributes_complete(
network, user_options, paths,
args.network_png, args.prune_network, verbose_network)
if not paths:
return
# DEBUG: To debug the pruning, save the network and paths so we
# don't have to re-generate them each time we run.
#import pickle
#open("network.txt", 'w').write(pickle.dumps(network))
#open("paths.txt", 'w').write(pickle.dumps(paths))
#import sys; sys.exit(0)
#network = pickle.loads(open("network.txt").read())
#paths = pickle.loads(open("paths.txt").read())
# DEBUG: Print out each of the pipelines.
#plot_pipelines(
# "pipeline", network, paths, user_options, max_pipelines=16,
# verbose=True)
#plot_network_show_pipelines(
# args.network_png, network, paths, user_options=user_options,
# verbose=verbose_network)
# Step 7: Prune undesired pipelines.
paths = prune_pipelines(
network, user_options, custom_attributes, paths,
args.network_png, args.prune_network, verbose_network)
if not paths:
return
# DEBUG: Print out each of the pipelines.
#plot_pipelines(
# "pipeline", network, paths, user_options, max_pipelines=16,
# verbose=True)
# Step 8: Look for input files.
if not check_input_files(
network, in_data_nodes, user_options, paths,
args.network_png, args.prune_network, verbose_network):
return
# Step 9: Look for output file.
if not check_output_file(args.output_file):
return
#print "The network (%d nodes) is complete." % len(network.nodes)
#print "There are %d possible pipelines." % len(paths)
x = "core"
if args.num_cores > 1:
x = "cores"
print "Ready to go! Will run the analysis using a maximum of %d %s." % (
args.num_cores, x)
# Step 10: Manual verification of the network.
if not manually_verify_network(
network, user_options, paths, args.run,
args.network_png, args.prune_network, verbose_network):
return
#plot_network_show_pipelines(
# args.network_png, network, paths, user_options=user_options,
# verbose=args.verbose)
print "Running the analysis."
sys.stdout.flush()
clean_up = not args.save_failed_data
node_dict = transitions = None
try:
x = rule_engine.run_pipeline(
network, in_data_nodes, custom_attributes, user_options, paths,
user=args.user, job_name=args.job_name, clean_up=clean_up,
num_cores=args.num_cores, verbosity=args.verbose)
if x:
node_dict, transitions = x
except AssertionError, x:
if str(x).startswith("Inference error"):
node_ids = rule_engine.DEBUG_POOL.keys()
plot_network(
args.network_png, network, user_options=user_options,
highlight_green=node_ids, verbose=verbose_network)
#write_network(args.network_json, network)
raise
if args.output_file and node_dict and 0 in node_dict:
print "Saving output %s to %s." % (outtype, args.output_file)
sys.stdout.flush()
output_file = node_dict[0].identifier
reportlib.copy_file_or_path(output_file, args.output_file)
# See what else to save.
also_save = [] # list of (arg, "lowest" or "highest")
for arg in args.also_save_lowest:
also_save.append((arg, "lowest"))
for arg in args.also_save_highest:
also_save.append((arg, "highest"))
for arg, which_one in also_save:
# Format: <datatype>,<file_or_path>
x = arg.split(",", 1)
assert len(x) == 2, "Invalid also_save: %s" % arg
dname, out_filename = x
if which_one == "lowest":
node_id = _find_lowest_datatype(network, dname, node_dict.keys())
else:
node_id = _find_highest_datatype(network, dname, node_dict.keys())
assert node_id, "Unable to find: %s" % dname
in_filename = node_dict[node_id].identifier
print "Saving %s to %s." % (dname, out_filename)
reportlib.copy_file_or_path(in_filename, out_filename)
# Draw the final network.
node_dict = node_dict or {}
transitions = transitions or {}
start_ids = []
for x in data_node_ids:
x = [x for x in x if x in node_dict]
start_ids.extend(x)
node_ids = [x for x in node_dict if x not in start_ids]
# node_dict only contains DataNodes. Add the ModuleNodes where
# there is a transition into and out of.
for nid in range(len(network.nodes)):
if not isinstance(network.nodes[nid], bie3.ModuleNode):
continue
if nid in node_ids:
continue
found1 = found2 = False
for (n1, n2) in transitions:
if nid == n1:
found1 = True
elif nid == n2:
found2 = True
if found1 and found2:
break
if found1 and found2:
node_ids.append(nid)
plot_network(
args.network_png, network,
user_options=user_options,
bold=start_ids+node_ids,
bold_transitions=transitions,
highlight_green=start_ids, highlight_yellow=node_ids,
verbose=verbose_network)
if args.receipt:
write_receipt(args.receipt, network, start_ids, transitions, node_dict)
#if args.network_text:
# print "Writing detailed network: %s." % args.network_text
# bie3.print_network(network, outhandle=args.network_text)
print "Done."
if __name__ == '__main__':
#import cProfile as profile
#profile.runctx("main()", globals(), locals())
main()
```
#### File: changlab/genomicode/affprop.py
```python
def cluster(similarity_matrix, dampfact=0.5, update_fn=None):
# similarity_matrix is a NxN matrix of similarity scores. More
# similar points should have higher scores. The score can be a
# really small value if the two points are not connected. Returns
# a list of length N indicating the exemplar for each item.
#import itertools
assert dampfact >= 0 and dampfact <= 1.0
# Do some checking on the input matrix.
nr = len(similarity_matrix)
nc = len(similarity_matrix[0])
assert nr == nc
for x in similarity_matrix:
assert len(x) == nc
N = nr
## if not topology_matrix:
## # Start with a fully connected graph.
## topology_matrix = [[1]*N for i in range(N)]
## # Make sure topology matrix has the right dimensions.
## assert N == len(topology_matrix)
## assert N == len(topology_matrix[0])
## for x in topology_matrix:
## assert len(x) == N
## # Make sure topology matrix is symmetric.
## for (i, j) in itertools.product(range(N), range(i, N)):
## assert topology_matrix[i][j] == topology_matrix[j][i]
## topology = {} # node -> list of connecting nodes.
## for i in range(N):
## nodes = []
## for j in range(N):
## if topology_matrix[i][j]:
## nodes.append(j)
## topology[i] = nodes
S = similarity_matrix
# Availability matrix.
A = [[0]*N for i in range(N)]
# Responsibility matrix.
R = [[0]*N for i in range(N)]
df = dampfact
num_iter = 0
same_exemplars = 0
exemplars = [None] * N
#MAXITS = 2000
#CONVITS = 200
MAXITS = 100
CONVITS = 5
while num_iter < MAXITS and same_exemplars < CONVITS:
num_iter += 1
# Update the responsibility matrix.
for i in range(N):
# Find the largest and 2nd largest score.
score1 = score_k = score2 = None
for k in range(N):
s = A[i][k] + S[i][k]
if score1 is None or s > score1:
score1, score_k, score2 = s, k, score1
elif score2 is None or s > score2:
score2 = s
for k in range(N):
if k != score_k:
R[i][k] = df*R[i][k] + (1-df)*(S[i][k]-score1)
else:
R[i][k] = df*R[i][k] + (1-df)*(S[i][k]-score2)
# Update the availability matrix.
for k in range(N):
scores = [max(0, R[i][k]) for i in range(N)]
sum_scores = sum(scores)
for i in range(N):
s = sum_scores-scores[i]
A[i][k] = df*A[i][k] + (1-df)*min(0, R[k][k]+s)
s = sum_scores - scores[k]
A[k][k] = df*A[k][k] + (1-df)*s
# Calculate the exemplars.
old_exemplars = exemplars
exemplars = [None] * N
for i in range(N):
k_max = k_value = None
for k in range(N):
value = A[i][k] + R[i][k]
if k_max is None or value > k_value:
k_max, k_value = k, value
exemplars[i] = k_max
# Count the number of changes in exemplars.
changed = [int(x1 != x2) for (x1, x2) in zip(exemplars, old_exemplars)]
num_changes = sum(changed)
if num_changes == 0:
same_exemplars += 1
else:
same_exemplars = 0
if update_fn:
update_fn(num_iter, num_changes, exemplars, A, R)
clusters = _find_clusters(exemplars)
return clusters, exemplars, S, A, R, num_iter, num_changes
def set_preference_median(similarity_matrix):
# set s[k][k] to the median similarity.
import jmath
S = [x[:] for x in similarity_matrix] # Make a copy.
# Make sure S is a square matrix.
N = len(S)
for x in S:
assert len(x) == N
for i in range(len(S)):
x = S[i][:]
x.pop(i)
S[i][i] = jmath.median(x)
return S
def _find_clusters(exemplars):
# Return a list of cluster IDs, where each cluster ID is an
# integer from [0, N). N is the number of clusters.
# Figure out the topology of the graph. Topology does not contain
# nodes that point to themselves.
topology = {} # node -> list of connecting nodes.
for i, j in enumerate(exemplars):
if i == j:
continue
if i not in topology:
topology[i] = []
if j not in topology:
topology[j] = []
if j not in topology[i]:
topology[i].append(j)
if i not in topology[j]:
topology[j].append(i)
# Find the centers of each cluster. A node is a cluster center
# if:
# 1. It is connected to at least two other nodes.
# 2. Or it is a pair of nodes that are only connected to each
# other. In this case, arbitrarily pick one to be the center.
centers = []
for node, partners in topology.iteritems():
assert len(partners) > 0
if len(partners) >= 2:
centers.append(node)
elif len(topology[partners[0]]) == 1 and partners[0] not in centers:
centers.append(node)
# Assign each node to a cluster, as defined by the centers.
# 1. If a node is a center, then set it to its own cluster.
# 2. If the node is connected to a center, then set it to that
# cluster.
# 3. Otherwise, set it to cluster 0.
node2cluster = {}
for node in topology:
cluster = 0
if node in centers:
cluster = centers.index(node) + 1
elif node in topology:
for p in topology[node]:
if p not in centers:
continue
cluster = centers.index(p) + 1
break
node2cluster[node] = cluster
clusters = [node2cluster.get(i, 0) for i in range(len(exemplars))]
return clusters
def test_cluster():
# 0,1 similar, 2,3,4 similar
similarity_matrix = [
[-3, -2, -4, -5, -6],
[-3, -3, -6, -5, -4],
[-4, -7, -3, -8, -3],
[-2, -4, -2, -2, -1],
[-5, -7, -5, -2, -2],
]
x = cluster(similarity_matrix)
print x
if __name__ == '__main__':
test_cluster()
```
#### File: changlab/genomicode/AnnotationMatrix.py
```python
class AnnotationMatrix:
def __init__(self, headers, headers_h, header2annots, headerlines=[]):
# headers is a list of the original headers.
# headers_h are the headers, hashed to ensure uniqueness.
# headers2annots is a dictionary of hashed headers to the list
# of annotations. headerlines is a list of lines that occur
# at the front of the file. These might be the comment lines
# that make up the "header" of VCF files.
assert headers
assert headers_h
assert len(headers) == len(headers_h), "%d %d" % (
len(headers), len(headers_h))
assert sorted(headers_h) == sorted(header2annots)
for x in headers_h[1:]:
assert len(header2annots[x]) == len(header2annots[headers_h[0]])
self.headers = headers[:]
self.headers_h = headers_h[:]
self.header2annots = header2annots.copy()
for x in headerlines:
assert type(x) is type("")
self.headerlines = headerlines[:] # no newlines
#def get_annots(self, header):
# # Return a list of the annotations for this header.
# h = self.normalize_header(header)
# if annots is None:
# raise KeyError, header
# return self.header2annots[h]
def num_headers(self):
return len(self.headers)
def num_annots(self):
if not self.headers_h:
return 0
h = self.headers_h[0]
return len(self.header2annots[h])
def copy(self):
return AnnotationMatrix(
self.headers, self.headers_h, self.header2annots, self.headerlines)
def __getitem__(self, header):
# Return a list of the annotations for this header.
h = self.normalize_header(header)
if h is None:
raise KeyError, header
return self.header2annots[h]
def __contains__(self, header):
if self.normalize_header(header) is None:
return False
return True
def normalize_header(self, header, index_base1=False):
# Return the hashed header. header may be either a header,
# hashed header, or a 0-based index. If index_base1 is True,
# then indexes will be interpreted as 1-based. If header
# cannot be found, return None.
# header can be:
# 1. Unique match to headers.
# 2. Unique match to headers_h.
# 3. Index. (may be actual int or str)
# Case 1.
I = [i for i in range(len(self.headers)) if self.headers[i] == header]
if len(I) == 1:
return self.headers_h[I[0]]
# Case 2.
if header in self.headers_h:
return header
# Case 3.
try:
header_i = int(header)
except ValueError, x:
pass
else:
if index_base1:
header_i = header_i - 1
assert header_i >= 0 and header_i < len(self.headers)
return self.headers_h[header_i]
return None
#raise KeyError, header
def normalize_header_i(self, header, index_base1=False):
# Return the 0-based index. If index_base1 is a true value,
# then will return a 1-based index. Same arguments as
# normalize_header.
h = self.normalize_header(header, index_base1=index_base1)
if h is None:
return None
return self.headers_h.index(h)
def create_from_annotations(headers, all_annots, headerlines=[]):
# headers is a list of headers.
# all_annots is a list (parallel to headers) that contain the
# annotations.
assert headers
assert len(headers) == len(all_annots)
num_annots = len(all_annots[0])
for x in all_annots[1:]:
assert len(x) == num_annots, "Inconsistent annot lengths"
headers_h = uniquify_headers(headers)
assert len(headers_h) == len(all_annots)
header2annots = {}
for (header_h, annots) in zip(headers_h, all_annots):
header2annots[header_h] = annots
x = AnnotationMatrix(
headers, headers_h, header2annots, headerlines=headerlines)
return x
def colslice(MATRIX, I):
for i in I:
assert i >= 0 and i < len(MATRIX.headers_h)
new_headers = [MATRIX.headers[i] for i in I]
old_headers_h = [MATRIX.headers_h[i] for i in I]
new_headers_h = uniquify_headers(new_headers)
header2annots = {}
for i in range(len(old_headers_h)):
oh, nh = old_headers_h[i], new_headers_h[i]
header2annots[nh] = MATRIX.header2annots[oh]
x = AnnotationMatrix(
new_headers, new_headers_h, header2annots, MATRIX.headerlines)
return x
def rowslice(MATRIX, I):
num_annots = MATRIX.num_annots()
for i in I:
assert i >= 0 and i < num_annots
header2annots = {}
for header, old_annots in MATRIX.header2annots.iteritems():
new_annots = [old_annots[i] for i in I]
header2annots[header] = new_annots
x = AnnotationMatrix(
MATRIX.headers, MATRIX.headers_h, header2annots, MATRIX.headerlines)
return x
def replace_headers(MATRIX, headers):
# Return a new AnnotationMatrix with these headers.
assert len(headers) == len(MATRIX.headers)
headers_h = uniquify_headers(headers)
header2annots = {}
for header_old in MATRIX.header2annots:
# Use the index to get the hashed header.
i = MATRIX.headers_h.index(header_old)
header_new = headers_h[i]
header2annots[header_new] = MATRIX.header2annots[header_old]
x = AnnotationMatrix(headers, headers_h, header2annots, MATRIX.headerlines)
return x
def uniquify_headers(headers):
# Return a parallel list of unique headers.
header2I = {} # header -> list of indexes
for i, header in enumerate(headers):
if header not in header2I:
header2I[header] = []
header2I[header].append(i)
nodup = headers[:]
for (header, I) in header2I.iteritems():
if len(I) < 2:
continue
for i in range(len(I)):
nodup[I[i]] = "%s_%d" % (header, i+1)
return nodup
def read(filename, is_csv=False, header_char=None, nrows=None):
# Everything are strings. No numeric conversion.
import re
from genomicode import genesetlib
delimiter = "\t"
if is_csv:
delimiter = ","
# re.sub takes a lot of time (25% of all running time!). Compile
# it.
re_naive = re.compile("na\\W+ve")
all_headers, all_annots = [], []
all_comments = []
for x in genesetlib.read_tdf(
filename, preserve_spaces=True, allow_duplicates=True,
delimiter=delimiter, yield_lines_startswith=header_char, nrows=nrows):
if type(x) is type(""):
all_comments.append(x)
continue
name, description, annots = x
# Hack: Some files contain special characters, which mess up
# alignment. Fix this here.
# na\xc3\xafve-WIBR3.5 hESC
# na\xe2\x80\x9a\xc3\xa0\xc3\xb6\xe2\x88\x9a\xc3\xb2ve-C1.2 hiPSC
#annots = [re.sub("na\\W+ve", "naive", x) for x in annots]
# This takes a long time. Don't do it unless necessary.
if False:
annots = [re_naive.sub("naive", x) for x in annots]
all_headers.append(name)
all_annots.append(annots)
assert all_headers, "Empty file: %s" % filename
headers_h = uniquify_headers(all_headers)
header2annots = {}
for (header_h, annots) in zip(headers_h, all_annots):
header2annots[header_h] = annots
return AnnotationMatrix(
all_headers, headers_h, header2annots, headerlines=all_comments)
def write(handle_or_file, annot_matrix, delim=None):
from genomicode import jmath
if delim is None:
delim = "\t"
matrix = []
for i, header_h in enumerate(annot_matrix.headers_h):
header = annot_matrix.headers[i]
annots = annot_matrix.header2annots[header_h]
x = [header] + annots
matrix.append(x)
# Transpose the matrix.
matrix = jmath.transpose(matrix)
handle = handle_or_file
if type(handle) is type(""):
handle = open(handle, 'w')
for x in annot_matrix.headerlines:
print >>handle, x
for x in matrix:
print >>handle, delim.join(map(str, x))
```
#### File: changlab/genomicode/antibodypedialib.py
```python
def search_gene(gene_name, wait=5):
# Return a URL or None if gene not found.
import urlparse
html = _search_gene_raw(gene_name, wait=wait)
for (name, url) in _parse_gene_search(html):
if name.strip().upper() == gene_name.strip().upper():
break
else:
return None
x = urlparse.urljoin("http://www.antibodypedia.com", url)
return x
def find_antibodies(gene_name, wait=5):
# Return list of tuples (provider, antibody, num_references,
# clonality, western, elisa, immunocytochemistry, immunoprecipitation,
# immunohistochemistry, flow_cytometry).
import urllib
import timer
# Search for the gene name.
url = search_gene(gene_name, wait=wait)
if not url:
return []
# Read and parse the antibodies.
timer.wait(wait)
html = urllib.urlopen(url).read()
#open("test01.html", 'w').write(html)
results_iter = _parse_gene_page_table(html)
return list(results_iter)
def _search_gene_raw(gene_name, wait=5):
# Searches for a gene and returns the raw HTML. BUG: Will
# generate Firefox browser in UI.
from contextlib import closing
#from selenium.webdriver import Firefox
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
import timer
timer.wait(wait)
#with closing(Firefox()) as browser:
with closing(webdriver.PhantomJS()) as browser:
URL = "http://www.antibodypedia.com/explore/%s" % gene_name
browser.get(URL)
# While searching, will show:
# <div class="txtThree" id="search_results_title">
# Searching for '<strong>e2f1</strong>' ...</div>
#
# When found, will show:
# <div class="txtThree" id="search_results_title">
# Found <strong>8</strong> gene products for
# '<strong>e2f1</strong>'</div>
wait = WebDriverWait(browser, 10)
condition = EC.text_to_be_present_in_element(
(By.ID, "search_results_title"), "Found")
wait.until(condition)
html = browser.page_source
html = unicode(html).encode("utf-8")
return html
def _parse_gene_search(html):
# Yield (Name, URL)
from bs4 import BeautifulSoup
soup = BeautifulSoup(html)
tables = soup.find_all("table", id="search_results_table")
assert tables, "Could not find antibodies table"
assert len(tables) == 1, "Found too many tables"
results_table = tables[0]
for row in results_table.find_all("tr"):
if row.find_all("th"):
# Skip the header or footer row.
continue
cols = row.find_all("td")
# # Name Description Family Chromosome UniProt
# Mouse_ortholog Antibodies
if len(cols) == 1:
col = cols[0]
assert str(col.text).find("Search result is empty") >= 0
continue
assert len(cols) == 8, "Invalid cols: %s" % repr(cols)
name = cols[1]
hrefs = name.find_all("a")
assert hrefs, "Could not find href"
assert len(hrefs) == 1
href = hrefs[0]
name = str(href.text).strip()
URL = href["href"]
yield name, URL
def _parse_gene_page_table(html):
# Yield tuples (provider, antibody, num_refs, clonality, western, elisa,
# immunocytochemistry, immunoprecipitation, immunohistochemistry,
# flow_cytometry).
for antibody_tups in _group_keyvalue_by_antibody(html):
antibody_dict = {}
for (key, value) in antibody_tups:
antibody_dict[key] = value
ad = antibody_dict
x = ad["PROVIDER"], ad["ANTIBODY"], ad.get("NUM_REFERENCES", 0), \
ad["CLONALITY"], \
ad.get("WESTERN", ""), ad.get("ELISA", ""), \
ad.get("IMMUNOCYTOCHEMISTRY", ""), \
ad.get("IMMUNOPRECIPITATION", ""), \
ad.get("IMMUNOHISTOCHEMISTRY", ""), \
ad.get("FLOW_CYTOMETRY", "")
yield x
def _parse_gene_page_keyvalue(html):
# Yield tuples (KEY, VALUE). Will have PROVIDER and
# NUM_ANTIBODIES, followed by a number of ANTIBODY records.
#
# KEY VALUE
# PROVIDER <name>
# NUM_ANTIBODIES <num> (OPTIONAL)
# ANTIBODY <name>
# NUM_REFERENCES (OPTIONAL)
# CLONALITY Polyclonal, Monoclonal
# WESTERN <evidence> (OPTIONAL)
# ELISA <evidence> (OPTIONAL)
# IMMUNOCYTOCHEMISTRY <evidence> (OPTIONAL)
# IMMUNOPRECIPITATION <evidence> (OPTIONAL)
# IMMUNOHISTOCHEMISTRY <evidence> (OPTIONAL)
# FLOW_CYTOMETRY <evidence> (OPTIONAL)
#
# <EVIDENCE>
# Supportive data in Antibodypedia
# Data presented on provider website
# Data in Antibodypedia (inconclusive)
# Recommended by provider
from bs4 import BeautifulSoup
SCORE2DESC = {
"/images/score_1.png" : "Supportive data in Antibodypedia",
"/images/score_2.png" : "Data presented on provider website",
"/images/score_3.png" : "Data in Antibodypedia (inconclusive)",
"/images/score_4.png" : "Recommended by provider",
}
soup = BeautifulSoup(html)
#print soup.prettify()
tables = soup.find_all("table", id="antibodies_table")
assert tables, "Could not find antibodies table"
assert len(tables) == 1, "Found too many tables"
antibodies_table = tables[0]
#print antibodies_table.prettify()
for row in antibodies_table.find_all("tr"):
#print row.prettify(); continue
if row.find_all("th"):
# Skip the header or footer row.
#print "HEADER OR FOOT"
continue
elif row.find_all("td", **{"class" : "provider"}):
span = row.td.div.span.span
descendants = list(span.descendants)
assert len(descendants) == 6
yield "PROVIDER", str(descendants[0].strip())
# 26 antibodies
x = descendants[4]
x = x.replace("antibody", "")
x = x.replace("antibodies", "")
x = x.strip()
x = int(x) # make sure it's an int
yield "NUM_ANTIBODIES", x
elif row.find_all("td", colspan="10"):
#<tr><td colspan="10" style="text-align:center;">
#- No antibodies -
#</td></tr>
#
# Must do this after "class=provider", because that also
# has colspan=10.
#print row.prettify()
assert str(row.text).find("No antibodies") >= 0
continue
elif row.find_all("td", **{"class" : "title"}):
cols = row.find_all("td")
assert len(cols) == 10
#print row.prettify()
yield "ANTIBODY", str(cols[1].text.strip())
num_refs = 0
x = cols[2].text.strip()
if x:
num_refs = int(x)
yield "NUM_REFERENCES", num_refs
x = str(cols[3].text.strip())
# Multiple variants. Don't bother checking.
# Polyclonal
# Monoclonal
# <blank>
# Polyclonal (Antigen purified)
#assert x in ["Polyclonal", "Monoclonal", ""], x
yield "CLONALITY", x
c4, c5, c6, c7 = cols[4], cols[5], cols[6], cols[7]
c8, c9 = cols[8], cols[9]
if c4.find("img"):
assert str(c4).find("Western blot") >= 0
yield "WESTERN", SCORE2DESC[c4.img["src"]]
if c5.find("img"):
assert str(c5).find("ELISA") >= 0, str(cols[5])
yield "ELISA", SCORE2DESC[c5.img["src"]]
if c6.find("img"):
assert str(c6).find("Immunocytochemistry") >= 0
yield "IMMUNOCYTOCHEMISTRY", SCORE2DESC[c6.img["src"]]
if c7.find("img"):
assert str(c7).find("Immunoprecipitation") >= 0
yield "IMMUNOPRECIPITATION", SCORE2DESC[c7.img["src"]]
if c8.find("img"):
assert str(c8).find("Immunohistochemistry") >= 0
yield "IMMUNOHISTOCHEMISTRY", SCORE2DESC[c8.img["src"]]
if c9.find("img"):
assert str(c9).find("Flow cytometry") >= 0
yield "FLOW_CYTOMETRY", SCORE2DESC[c9.img["src"]]
else:
raise AssertionError, "Unknown row\n%s" % row.prettify()
def _group_keyvalue_by_antibody(html):
# Yield list of (key, value) associated with one antibody.
provider = None
antibody_tups = [] # list of (key, value)
for x in _parse_gene_page_keyvalue(html):
key, value = x
# PROVIDER or ANTIBODY starts a new antibody. Get rid of the
# old information.
if key in ["PROVIDER", "ANTIBODY"] and antibody_tups:
# Don't store PROVIDER in the antibody_tups because there
# are multiple ANTIBODY per PROVIDER. Add it on now.
assert provider
antibody_tups = [("PROVIDER", provider)] + antibody_tups
yield antibody_tups
antibody_tups = []
if key == "PROVIDER":
provider = value
elif key == "NUM_ANTIBODIES":
pass
else:
antibody_tups.append((key, value))
if antibody_tups:
assert provider
antibody_tups = [("PROVIDER", provider)] + antibody_tups
yield antibody_tups
```
#### File: changlab/genomicode/aptamerlib.py
```python
START = "START"
MAIN = "MAIN"
INSERT = "INSERT"
END = "END"
INSERTEND = "INSERT-END"
class Node:
def __init__(self, node_type, i_seqset=None, i_sequence=None, i_base=None):
assert node_type in [MAIN, START, END, INSERT, INSERTEND]
self.node_type = node_type
self.i_seqset = i_seqset
self.i_sequence = i_sequence
self.i_base = i_base
def __str__(self):
x = [self.node_type]
if self.i_seqset is not None:
x += [self.i_seqset, self.i_sequence, self.i_base]
return "Node(%s)" % (", ".join(map(str, x)))
def __hash__(self):
x = self.node_type, self.i_seqset, self.i_sequence, self.i_base
return hash(x)
def _sortkey(self):
# (order1, i_seqset, i_sequence, i_base, order2)
nt2order1 = {
START : 0,
INSERT : 1,
MAIN : 1,
INSERTEND : 2,
END : 3,
}
nt2order2 = {
START : 0,
INSERT : 0,
MAIN : 1,
INSERTEND : 0,
END : 0,
}
assert self.node_type in nt2order1
assert self.node_type in nt2order2
order1 = nt2order1[self.node_type]
order2 = nt2order2[self.node_type]
x = order1, self.i_seqset, self.i_sequence, self.i_base, order2
return x
def __cmp__(self, other):
x1 = self._sortkey()
x2 = other._sortkey()
return cmp(x1, x2)
class SequenceSet:
def __init__(self, name, is_barcode, is_random, alternates):
# alternates is a list of the alternative sequences (as
# strings).
self.name = name
self.is_barcode = is_barcode
self.is_random = is_random
self.alternates = alternates[:]
def _iter_main_graph(library):
for i_seqset, seqset in enumerate(library):
name = library[i_seqset].name
for i_sequence, sequence in enumerate(seqset.alternates):
for i_base, base in enumerate(sequence):
yield (name, seqset, sequence, base,
i_seqset, i_sequence, i_base)
def _get_all_nodes(*graphs):
# Return a list of all nodes in a graph (multiple graphs).
nodes = {}
for graph in graphs:
for node, next_nodes in graph.iteritems():
nodes[node] = 1
for node in next_nodes:
nodes[node] = 1
return nodes
def _get_first_nodes(library):
assert len(library)
nodes = []
seqset = library[0]
for i_sequence, sequence in enumerate(seqset.alternates):
assert len(sequence)
n = Node(MAIN, 0, i_sequence, 0)
nodes.append(n)
return nodes
def _get_next_nodes(library, i_seqset, i_sequence, i_base):
assert i_seqset < len(library)
seqset = library[i_seqset]
assert i_sequence < len(seqset.alternates)
sequence = seqset.alternates[i_sequence]
next_nodes = []
# If there are more bases in this sequence, then the next node
# is the next base in the sequence.
if i_base < len(sequence)-1:
n = Node(MAIN, i_seqset, i_sequence, i_base+1)
next_nodes.append(n)
# If this is the last node of the last seqset, then the next
# node is the end node.
elif i_seqset == len(library)-1:
n = Node(END)
next_nodes.append(n)
# If this is the last node of a sequence, then the next node
# is the first nodes of the next sequence.
else:
next_seqset = library[i_seqset+1]
for i in range(len(next_seqset.alternates)):
n = Node(MAIN, i_seqset+1, i, 0)
next_nodes.append(n)
return next_nodes
def _node_dist(library, start_node, end_node):
# Return the distance from the start_node to the end_node as a
# tuple of (total_bases, num_seqsets, seqset_sizes, num_bases).
# total_bases is the minimum number of bases from the start_node
# to the end_node. num_seqset and num_bases is the minimum number
# of (random region) seqsets and bases that need to be traversed.
# seqset_sizes is a list of the minimum number of bases in each of
# the seqsets. The length of seqset_sizes is num_seqsets.
# Possible cases:
# 1. Both are START or both are END (or INSERTEND).
# 2. Nodes are in the same seqset, same sequence.
# 3. start_node is START and end_node is END (or INSERTEND).
# 4. start_node is START and end_node is internal.
# 5. start_node is internal and end_node is END (or INSERTEND).
# 6. start_node and end_node are internal.
#
# There is no distance between bases inserted at the end.
min_bases = [] # minimum number of bases in each seqset.
for seqset in library:
seqlens = [len(x) for x in seqset.alternates]
x = min(seqlens)
min_bases.append(x)
assert start_node.node_type in [MAIN, START, END, INSERT, INSERTEND]
assert end_node.node_type in [MAIN, START, END, INSERT, INSERTEND]
# Case 1.
if start_node.node_type == START and end_node.node_type == START:
total_bases, num_seqset, seqset_sizes, num_bases = 0, 0, [], 0
elif start_node.node_type in ["END", "INSERTEND"] and \
end_node.node_type in ["END", "INSERTEND"]:
total_bases, num_seqset, seqset_sizes, num_bases = 0, 0, [], 0
# Case 2:
elif start_node.i_seqset == end_node.i_seqset:
assert start_node.i_sequence == end_node.i_sequence
assert start_node.i_base < end_node.i_base
total_bases = end_node.i_base - start_node.i_base
num_seqset, seqset_sizes = 0, []
num_bases = total_bases
# Case 3-6.
else:
total_bases = num_bases = num_seqset = 0
seqset_sizes = []
start_i = 0
if start_node.node_type != START:
start_i = start_node.i_seqset+1
seqset = library[start_node.i_seqset]
seq = seqset.alternates[start_node.i_sequence]
num_bases += len(seq)-(start_node.i_base+1)
total_bases += len(seq)-(start_node.i_base+1)
end_i = len(library)
if end_node.node_type != END:
end_i = end_node.i_seqset
num_bases += end_node.i_base + 1 # base 0 is 1 base
total_bases += end_node.i_base + 1
for i in range(start_i, end_i):
seqset = library[i]
if seqset.is_random:
num_seqset += 1
seqset_sizes.append(min_bases[i])
else:
num_bases += min_bases[i]
total_bases += min_bases[i]
assert len(seqset_sizes) == num_seqset
return total_bases, num_seqset, seqset_sizes, num_bases
def _make_main_graph(library):
graph = {} # node (as string) -> list of next nodes
# START points to the first bases of the first seqsets.
graph[Node(START)] = _get_first_nodes(library)
# Add the other nodes.
next_nodes = []
for x in _iter_main_graph(library):
name, seqset, sequence, base, i_seqset, i_sequence, i_base = x
node = Node(MAIN, i_seqset, i_sequence, i_base)
next_nodes = _get_next_nodes(library, i_seqset, i_sequence, i_base)
assert node not in graph
graph[node] = next_nodes
return graph
def _make_insertion_graph(library):
graph = {} # node (as string) -> list of next nodes
# START goes to insert to first base of all sequences in the first
# seqset.
first_nodes = _get_first_nodes(library)
insert_nodes = [
Node(INSERT, n.i_seqset, n.i_sequence, n.i_base) for n in first_nodes]
graph[Node(START)] = insert_nodes
# Inserts point to themselves and the main node.
for x in _iter_main_graph(library):
name, seqset, sequence, base, i_seqset, i_sequence, i_base = x
main_node = Node(MAIN, i_seqset, i_sequence, i_base)
insert_node = Node(INSERT, i_seqset, i_sequence, i_base)
assert insert_node not in graph
graph[insert_node] = [insert_node, main_node]
# INSERTEND absorbs all remaining bases until the END.
graph[Node(INSERTEND)] = [Node(INSERTEND), Node(END)]
# Each node points to the insert of the next node.
for x in _iter_main_graph(library):
name, seqset, sequence, base, i_seqset, i_sequence, i_base = x
main_node = Node(MAIN, i_seqset, i_sequence, i_base)
next_nodes = _get_next_nodes(library, i_seqset, i_sequence, i_base)
insert_nodes = []
for n in next_nodes:
if n.node_type == MAIN:
nn = Node(INSERT, n.i_seqset, n.i_sequence, n.i_base)
elif n.node_type == END:
nn = Node(INSERTEND, n.i_seqset, n.i_sequence, n.i_base)
insert_nodes.append(nn)
assert main_node not in graph
graph[main_node] = insert_nodes
return graph
def _make_deletion_graph(library):
graph = {} # node -> list of next nodes
# START points to everything. Any number of the initial bases can
# be deleted.
nodes = []
for x in _iter_main_graph(library):
x, x, x, x, i_seqset, i_sequence, i_base = x
x = Node(MAIN, i_seqset, i_sequence, i_base)
nodes.append(x)
graph[Node(START)] = nodes
# Make internal edges.
for x in _iter_main_graph(library):
x, x, x, x, i_seqset1, i_sequence1, i_base1 = x
node1 = Node(MAIN, i_seqset1, i_sequence1, i_base1)
for x in _iter_main_graph(library):
x, x, x, x, i_seqset2, i_sequence2, i_base2 = x
node2 = Node(MAIN, i_seqset2, i_sequence2, i_base2)
is_next_node = False
if i_seqset1 > i_seqset2:
pass
# Each node points to all subsequent nodes in the same
# sequence.
elif i_seqset1 == i_seqset2 and \
i_sequence1 == i_sequence2:
if i_base1 < i_base2:
is_next_node = True
# Each node points to all nodes of subsequent seqsets.
elif i_seqset1 < i_seqset2:
is_next_node = True
if not is_next_node:
continue
if node1 not in graph:
graph[node1] = []
graph[node1].append(node2)
# Everything points to END.
for x in _iter_main_graph(library):
x, x, x, x, i_seqset, i_sequence, i_base = x
node = Node(MAIN, i_seqset, i_sequence, i_base)
if node not in graph:
graph[node] = []
assert Node(END) not in graph[node]
graph[node].append(Node(END))
# Filter out all the main paths (because they're not deletes).
main_graph = _make_main_graph(library)
for node, next_nodes in main_graph.iteritems():
if node not in graph:
# Missing for last member of the sequences in the last set.
continue
x = [x for x in graph[node] if x not in next_nodes]
graph[node] = x
if not x:
del graph[node]
return graph
def _calc_emission_probs(library, base2emission, p_mismatch):
# Can cache _iter_main_graph to optimize.
assert p_mismatch > 0 and p_mismatch < 0.5
p_match = 1.0 - p_mismatch
# Make a list of all the bases that can be emitted.
emissions = {}
for x in _iter_main_graph(library):
x, x, x, base, x, x, x = x
e = base2emission.get(base, base)
e = e.upper() # do case insensitive search
emissions[e] = 1
assert "START" not in emissions
assert "END" not in emissions
emissions_in_library = sorted(emissions)
emissions = ["START", "END"] + emissions_in_library
probabilities = {} # (node, emission) -> p
# Set emission probabilities for the main graph.
for x in _iter_main_graph(library):
x, x, x, base, i_seqset, i_sequence, i_base = x
node = Node(MAIN, i_seqset, i_sequence, i_base)
emission = base2emission.get(base, base) # what this base emits
emission = emission.upper()
probs = {} # base -> probability
for e in emissions_in_library:
p = p_match
if e != emission:
p = p_mismatch
probs[e] = p
# Normalize the probabilities to 1.0.
#total = sum(probs.values())
#for b in probs:
# probs[b] = probs[b] / total
for b, p in probs.iteritems():
probabilities[(node, b)] = p
# Set emission probabilities for the insertions.
for x in _iter_main_graph(library):
x, x, x, x, i_seqset, i_sequence, i_base = x
node = Node(INSERT, i_seqset, i_sequence, i_base)
for e in emissions_in_library:
#probabilities[(node, e)] = 1.0/len(emissions_in_library)
probabilities[(node, e)] = p_mismatch
# Set emission probabilities for INSERTEND.
for e in emissions_in_library:
#probabilities[(Node(INSERTEND), e)] = 1.0/len(emissions_in_library)
probabilities[(Node(INSERTEND), e)] = p_mismatch
# Set emission probabilities for start and end.
probabilities[(Node(START), "START")] = 1.0
probabilities[(Node(END), "END")] = 1.0
return probabilities
def _calc_transition_probs(library, p_mismatch, p_insert, p_delete,
p_insert_random, p_delete_random):
# Return a dictionary of (node1, node2) -> transition probability.
# Transition probabilities.
assert p_mismatch > 0 and p_mismatch < 0.5
assert p_insert > 0 and p_insert < 0.5
assert p_delete > 0 and p_delete < 0.5
assert p_insert_random > 0 and p_insert_random < 0.5
assert p_delete_random > 0 and p_delete_random < 0.5
p_match = 1.0 - p_mismatch
p_noindel = 1.0 - min(p_insert, p_delete, p_insert_random, p_delete_random)
# Make the graph.
main_graph = _make_main_graph(library)
insert_graph = _make_insertion_graph(library)
delete_graph = _make_deletion_graph(library)
# Make a list of all nodes in all graphs.
nodes = _get_all_nodes(main_graph, insert_graph, delete_graph)
# Calculate all the probabilities. The probabilities will not be
# normalized and may not sum to 1.
probabilities = {} # node -> next_node -> probability.
for start_node in sorted(nodes):
# start_node can be START, MAIN, INSERT, INSERTEND, END.
#print "%s-%s-%s-%s" % (
# start_node.node_type, start_node.i_seqset,
# start_node.i_sequence, start_node.i_base)
probs = {} # next_node -> p
for next_node in main_graph.get(start_node, []):
probs[next_node] = p_noindel
for next_node in insert_graph.get(start_node, []):
assert start_node.node_type in [START, MAIN, INSERT, INSERTEND], \
start_node.node_type
assert next_node.node_type in [MAIN, END, INSERT, INSERTEND]
p = p_noindel
# We have a different insertion penalty for the random
# region as for other regions.
# 1. If the next node is INSERT or INSERTEND, use p_insert.
# 2. If the next node is INSERT or INSERTEND and both
# this and the next node are in the random region, use
# p_insert_random.
next_type = next_node.node_type
start_random = next_random = None
if start_node.i_seqset is not None:
start_random = library[start_node.i_seqset].is_random
if next_node.i_seqset is not None:
next_random = library[next_node.i_seqset].is_random
# Case 1.
if next_type in [INSERT, INSERTEND]:
p = p_insert
# Case 2.
if next_type in [INSERT, INSERTEND] and \
start_random and next_random:
p = p_insert_random
probs[next_node] = p
for next_node in delete_graph.get(start_node, []):
assert next_node not in probs
x = _node_dist(library, start_node, next_node)
total_bases, num_seqsets, seqset_sizes, num_bases = x
assert total_bases >= 1
assert num_bases >= 1
# If deleting a seqset is lower probability than deleting
# the individual bases, then just model as deleting the
# individual bases.
# Deleting a single base (direct transition to a MAIN
# node) should be equivalent to a delete transition (to a
# DELETE node), mismatch, then a noindel transition back
# to MAIN.
#
# Deleting two bases would be a 2*(p_delete * p_mismatch)
# * noindel.
p_delb = p_delete*p_mismatch
p1 = pow(p_delb, total_bases-1) * p_noindel
# Deleting a 2-base chunk should be equivalent to a
# deleting a base, matching the next base, and then
# transitioning to the beginning of the next chunk.
# p_delete*p_mismatch Deleting a base.
# p_noindel To next base.
# p_match Match the next base.
# p_noindel Next chunk.
#
# Deleting a 3-base chunk would be:
# p_delete*p_mismatch * (p_noindel*p_match)*2 * p_noindel
p2 = pow(p_delb, num_bases-1) * p_noindel # leftover bases
for size in seqset_sizes:
p2 *= p_delb * pow(p_noindel*p_match, size-1)
probs[next_node] = max(p1, p2)
if not probs: # no next nodes
continue
probabilities[start_node] = probs
# Normalize the probabilities.
# If we normalize the probabilities for each node, then later
# deletions will have higher probability than earlier deletions
# (because they have fewer transitions). The algorithm will favor
# deleting later bases.
#
# To solve this, keep track of the normalization for the first
# node in the random region. This should have the most
# transitions. Normalize all the nodes in the random region the
# same way.
# The START node has the most transitions. Transitions:
# 1. To all MAIN nodes.
# 2. To INSERT of MAIN node in seqset 0.
# 3. To INSERTEND (not currently used).
# 4. To END (not currently used).
norm_factor = None
normalized = {} # start_node -> next_nodes -> p
for start_node in sorted(probabilities):
probs = probabilities[start_node]
if norm_factor is None:
total = sum(probs.values())
norm_factor = 1.0 / total
# Don't bother normalizing this.
#for next_node in probs:
# probs[next_node] = probs[next_node] * norm_factor
# Make sure probabilities sum to 1.
#assert abs(sum(probs.values()) - 1.0) < 1E-10
# Make sure probabilities sum to <= 1.
#total = sum(probs.values())
#assert total <= 1+1E-8, "%s Prob: %s" % (start_node, total)
normalized[start_node] = probs
clean = {}
for start_node in probabilities:
probs = probabilities[start_node]
for next_node, p in probs.iteritems():
clean[(start_node, next_node)] = p
return clean
def make_markov_model(library, base2emission, p_mismatch, p_insert, p_delete,
p_insert_random, p_delete_random):
# Return a MarkovModel.
import numpy
from genomicode import MarkovModel
assert p_mismatch > 0 and p_mismatch < 0.50
assert p_insert > 0 and p_insert < 0.50
assert p_delete > 0 and p_delete < 0.50
assert p_insert_random > 0 and p_insert_random < 0.50
assert p_delete_random > 0 and p_delete_random < 0.50
# Calculate the transition probabilities.
transition_probs = _calc_transition_probs(
library, p_mismatch, p_insert, p_delete,
p_insert_random, p_delete_random)
emission_probs = _calc_emission_probs(library, base2emission, p_mismatch)
#for (start, end) in sorted(transition_probs):
# x = transition_probs[(start, end)], start, end
# print "\t".join(map(str, x))
#import sys; sys.exit(0)
# Make a list of all the nodes.
nodes = {}
for (node1, node2) in transition_probs:
nodes[node1] = 1
nodes[node2] = 1
nodes = sorted(nodes)
# Make a list of all the emissions.
emissions = {}
for (node, emission) in emission_probs:
emissions[emission] = 1
emissions = sorted(emissions)
# N number of states
# M number of emissions
# p_initial N-vector of initial starting probabilities.
# p_emission N x M matrix. Each row sums to 1.0.
# p_transition N x N matrix.
# Index the nodes and emissions for the matrices.
node2i = {}
for i, s in enumerate(nodes):
node2i[s] = i
emission2i = {}
for i, s in enumerate(emissions):
emission2i[s] = i
N = len(nodes)
M = len(emissions)
#p_initial = [0.0] * N
#p_emission = [[0.0]*M for i in range(N)]
#p_transition = [[0.0]*N for i in range(N)]
p_initial = numpy.zeros(N)
p_emission = numpy.zeros((N,M))
p_transition = numpy.zeros((N,N))
# Start at the START node.
p_initial[node2i[Node(START)]] = 1.0
# Set the transition probabilities.
for (start, end) in transition_probs:
i = node2i[start]
j = node2i[end]
p_transition[i][j] = transition_probs[(start, end)]
# Set the emissions probabilities.
for (node, emission) in emission_probs:
i = node2i[node]
j = emission2i[emission]
p_emission[i][j] = emission_probs[(node, emission)]
mm = MarkovModel.MarkovModel(
nodes, emissions, p_initial, p_transition, p_emission)
return mm
def _add_deletions_to_alignment(library, alignment):
# Return list of (node, match_type, base, base in sequence).
# For each seqset, figure out which alternate is used in the
# alignment.
iseqset2isequence = {}
for x in alignment:
#print repr(x)
node, log_score, match_type, base_in_library, base_in_sequence = x
if node.node_type in [START, END, INSERTEND]:
continue
if node.i_seqset in iseqset2isequence:
assert iseqset2isequence[node.i_seqset] == node.i_sequence
iseqset2isequence[node.i_seqset] = node.i_sequence
# If a seqset is completely deleted, then arbitrarily use sequence
# 0.
for i in range(len(library)):
if i not in iseqset2isequence:
iseqset2isequence[i] = 0
# Make a list of all the bases in the library.
all_bases = []
for i_seqset, seqset in enumerate(library):
i_sequence = iseqset2isequence[i_seqset]
sequence = seqset.alternates[i_sequence]
for i_base in range(len(sequence)):
x = i_seqset, i_sequence, i_base
all_bases.append(x)
# Make a list of the bases that are matched.
indexes2align = {} # Either (i_seqset, i_sequence, i_base) or INSERTEND.
for align in alignment:
node = align[0]
if node.node_type in [MAIN, INSERT]:
key = node.i_seqset, node.i_sequence, node.i_base
else:
assert node.node_type == INSERTEND
key = INSERTEND
if key not in indexes2align:
indexes2align[key] = []
indexes2align[key].append(align)
# Iterate through the library, adding deletions when necessary.
full_alignment = []
for i_seqset, seqset in enumerate(library):
i_sequence = iseqset2isequence[i_seqset]
sequence = seqset.alternates[i_sequence]
for i_base, base in enumerate(sequence):
x = i_seqset, i_sequence, i_base
if x in indexes2align:
full_alignment.extend(indexes2align[x])
else:
node = Node(MAIN, i_seqset, i_sequence, i_base)
align = node, 0, "DELETE", base, "-"
full_alignment.append(align)
full_alignment.extend(indexes2align.get(INSERTEND, []))
return full_alignment
def _score_sequence_h(mm, library, base2emission, sequence):
# Return ln(score), list of
# (node, ln(score), match_type, base in lib, base in sequence).
import math
from genomicode import MarkovModel
# Add "START" and "END" emissions to the ends.
assert type(sequence) is type("")
sequence = ["START"] + list(sequence) + ["END"]
sequence_u = [x.upper() for x in sequence] # score case insensitive
# Score the alignment.
alignments = MarkovModel.find_states(mm, sequence_u)
states, scores = alignments[0]
log_scores = [
math.log(max(x, MarkovModel.VERY_SMALL_NUMBER)) for x in scores]
lscore = log_scores[-1]
#print sequence
#for i in range(len(states)):
# print states[i], scores[i]
#import sys; sys.exit(0)
# Remove the ["START"] and ["END"] states.
assert states[0] == Node(START)
assert states[-1] == Node(END)
states = states[1:-1]
scores = scores[1:-1]
log_scores = log_scores[1:-1]
sequence = sequence[1:-1]
alignment = []
for i, node in enumerate(states):
base_in_seq = sequence[i]
if node.node_type == INSERTEND:
x = node, lscore, "INSERT", "-", base_in_seq
alignment.append(x)
continue
assert node.node_type in [MAIN, INSERT]
seq = library[node.i_seqset].alternates[node.i_sequence]
base_in_lib = "-"
if node.node_type == MAIN:
base_in_lib = seq[node.i_base]
b1, b2 = base2emission.get(base_in_lib, base_in_lib), base_in_seq
match_type = "MATCH"
if node.node_type == "INSERT":
match_type = "INSERT"
elif b1.upper() != b2.upper():
match_type = "MISMATCH"
x = node, log_scores[i], match_type, base_in_lib, base_in_seq
alignment.append(x)
return lscore, alignment
def guess_sequence_orientation(sequence, library):
# Return 1, 0, or -1. 1 if the sequence is in the right
# orientation, -1 if it needs to be reverse complemented, and 0 if
# I can't tell.
from Bio import Seq
from Bio import pairwise2
sequence = sequence.upper()
sequence_rc = Seq.Seq(sequence).reverse_complement().tostring()
# First, see if it matches the first seqset of the library exactly.
for seq in library[0].alternates:
if sequence[:len(seq)] == seq.upper():
return 1
if sequence_rc[:len(seq)] == seq.upper():
return -1
# If it doesn't match exactly, see if it matches to within 2
# mismatches.
for seq in library[0].alternates:
if len(seq) < 6: # if sequence too short, hard to align.
continue
score = pairwise2.align.globalxx(
sequence[:len(seq)], seq.upper(), score_only=True)
if score >= len(seq)-2:
return 1
score = pairwise2.align.globalxx(
sequence_rc[:len(seq)], seq.upper(), score_only=True)
if score >= len(seq)-2:
return -1
# I can't tell.
return 0
def score_sequence(mm, library, base2emission, sequence):
# Return score, is_revcomp,
# list of (node, match_type, base, base in sequence).
from Bio import Seq
assert library
orientation = guess_sequence_orientation(sequence, library)
if orientation == 1:
x = _score_sequence_h(mm, library, base2emission, sequence)
lscore, alignment = x
is_revcomp = False
elif orientation == -1:
sequence_rc = Seq.Seq(sequence).reverse_complement().tostring()
x = _score_sequence_h(mm, library, base2emission, sequence_rc)
lscore, alignment = x
is_revcomp = True
else:
# Score both the sequence and its reverse complement. Return the
# one with the higher score.
sequence_rc = Seq.Seq(sequence).reverse_complement().tostring()
x1 = _score_sequence_h(mm, library, base2emission, sequence)
x2 = _score_sequence_h(mm, library, base2emission, sequence_rc)
lscore1, alignment1 = x1
lscore2, alignment2 = x2
lscore, is_revcomp, alignment = lscore1, False, alignment1
if lscore2 > lscore1:
lscore, is_revcomp, alignment = lscore2, True, alignment2
#for x in alignment:
# print repr(x)
#import sys; sys.exit(0)
alignment = _add_deletions_to_alignment(library, alignment)
return lscore, is_revcomp, alignment
def pretty_sequences(alignment):
ideal_seq = []
real_seq = []
prev_i_seqset = None
for i, x in enumerate(alignment):
node, log_score, match_type, base_in_lib, base_in_seq = x
if node.i_seqset != prev_i_seqset and prev_i_seqset is not None:
ideal_seq.append("*")
real_seq.append("*")
prev_i_seqset = node.i_seqset
ideal_seq.append(base_in_lib)
real_seq.append(base_in_seq)
ideal_seq = "".join(ideal_seq)
real_seq = "".join(real_seq)
return ideal_seq, real_seq
def parse_node(node_str):
assert node_str.startswith("Node(") and node_str.endswith(")")
x = node_str[5:-1]
x = x.split(",")
x = [x.strip() for x in x if x.strip()]
assert len(x) in [1, 4]
return Node(*x)
def read_library(filename):
"read_library(filename) -> list of (name, is_barcode, is_random, seqset)"
import filelib
# Format of file is:
# "Name" "Bar Code" "Random Region" "Alternate" ["Alternate", ...]
# <name> <1/0> <1/0> <alt1> <alt2> <altN>
# <name> <1/0> <1/0> <alt1> <alt2> <altN>
#
# Columns are separated by tabs. Each line contains a seqset. A
# seqset is represented as a list of sequences.
#
# Should have only 1 Bar Code.
handle = filelib.read_cols(filename)
header = handle.next()
# Check the header format.
assert len(header) >= 4, "invalid library format"
assert header[0].upper() == "Name".upper()
assert header[1].upper() == "Bar Code".upper()
assert header[2].upper() == "Random Region".upper()
for i in range(3, len(header)):
assert header[i].upper() == "Alternate".upper()
barcode_seen = False
library = [] # list of (is_random, name, seqset)
for i, cols in enumerate(handle):
assert len(cols) >= 4
name = cols[0]
is_barcode = cols[1]
is_random = cols[2]
alternates = [x.strip() for x in cols[3:] if x.strip()]
assert is_barcode in ["0", "1"]
assert is_random in ["0", "1"]
assert len(alternates) >= 1
is_barcode, is_random = int(is_barcode), int(is_random)
if is_barcode:
assert not barcode_seen, "Can only have 1 barcode."
barcode_seen = True
x = SequenceSet(name, is_barcode, is_random, alternates)
library.append(x)
assert len(library)
return library
def parse_fastq(filename):
# Iterator that yields tuples (title, sequence, quality).
from genomicode import filelib
# Format of FASTQ files:
# @4GEOU:00042:00049 Title
# ACTGCTAATTCACACTGGATTAGTTGGGCTACTTCATCGT Sequence
# + Always "+"
# =<>>A77.7.54>4444.46-444,44*3333:9:44443 Quality
handle = filelib.openfh(filename)
while True:
x = [handle.readline() for x in range(4)]
lines = [x.strip() for x in x]
if not lines[0]:
break
title, sequence, x, quality = lines
assert x == "+"
assert len(sequence) == len(quality)
assert quality
yield title, sequence, quality
```
#### File: changlab/genomicode/clusterio.py
```python
def read_gtr_file(filename):
# Reads gtr or atr files.
# Return a list of (left_id, right_id, dist).
# The dist is the joining correlation between the left and right
# nodes.
# The index of each element in the list determines the node id.
# node id = -(index+1)
tree = []
for i, line in enumerate(open(filename)):
x = line.rstrip().split()
node_str, left_str, right_str, dist = x
x = map(parse_node, (node_str, left_str, right_str))
node_id, left_id, right_id = x
dist = float(dist)
assert node_id == -(i+1)
x = left_id, right_id, dist
tree.append(x)
return tree
def read_atr_file(filename):
return read_gtr_file(filename)
def read_gtc_file(filename):
# Format is:
# GENE0X <cluster>
data = {}
for line in open(filename):
x = line.strip().split()
assert len(x) == 2
s, cluster = x[:2]
cluster = int(cluster)
if cluster == -1:
cluster = None
id = parse_node(s)
data[id] = cluster
return data
def read_atc_file(filename):
return read_gtc_file(filename)
def read_kgg_file(filename):
import jmath
# Header:
# <ID> GROUP
#
# GROUP should be integer.
handle = open(filename)
x = handle.readline().strip().split()
assert len(x) == 2
assert x[1] == "GROUP"
cluster = []
for line in handle:
x = line.rstrip("\r\n").split("\t")
id, group = x
group = jmath.safe_int(group)
x = id, group
cluster.append(x)
return cluster
def read_kag_file(filename):
return read_kgg_file(filename)
def _write_gtr_file_h(data, handle, item):
import math
# Calculate the width of the column.
# NODE 10 X
if not data:
return
colwidth = 4 + int(math.ceil(math.log(len(data), 10))) + 1
for i, x in enumerate(data):
left_id, right_id, dist = x
node_id = -i-1
node_str = format_node(node_id, "NODE")
left_str = format_node(left_id, item)
right_str = format_node(right_id, item)
print >>handle, "%-*s %-*s %-*s %.6f" % (
colwidth, node_str, colwidth, left_str, colwidth, right_str, dist)
def write_gtr_file(data, handle):
# Format:
# <node_str> <left_str> <right_str> <distance>
_write_gtr_file_h(data, handle, "GENE")
def write_atr_file(data, handle):
_write_gtr_file_h(data, handle, "ARRY")
def _write_gtc_file_h(data, handle, item):
# data is id -> cluster
ids = sorted(data)
for id in ids:
s = format_node(id, item)
cluster = data[id]
if cluster is None:
cluster = -1
print >>handle, "%s %d" % (s, cluster)
def write_gtc_file(data, handle):
# Format:
# <node_str> <cluster>
_write_gtc_file_h(data, handle, "GENE")
def write_atc_file(data, handle):
_write_gtc_file_h(data, handle, "ARRY")
def write_kgg_file(data, handle):
# Bug: Does not preserve original ID name.
# Format (has header):
# ID GROUP
# <id> <cluster>
#
# The <id> might be a node string or the IDs provided in the PCL
# file. cluster seems to use whichever IDs are in the first
# column of the PCL file.
x = "ID", "GROUP"
print >>handle, "\t".join(x)
for id, cluster in data:
x = id, cluster
if cluster is None:
cluster = "NA"
print >>handle, "\t".join(map(str, x))
def write_kag_file(data, handle):
write_kgg_file(data, handle)
def parse_node(s):
assert (s.startswith("NODE") or
s.startswith("GENE") or s.startswith("ARRY")), s
assert s.endswith("X")
id = int(s[4:-1])
if s.startswith("NODE"):
id = -id
return id
def format_node(id, item):
# item should be NODE, GENE, or ARRY.
if id < 0:
s = "NODE%dX" % -id
else:
s = "%s%dX" % (item, id)
return s
```
#### File: changlab/genomicode/config.py
```python
def read_config():
import os
import ConfigParser
config_file = os.path.join(os.environ["HOME"], ".genomicoderc")
assert os.path.exists(config_file), "File not found: %s" % config_file
# Read the configuration.
config = ConfigParser.ConfigParser()
config.optionxform = str # use case sensitive option names
config.read(config_file)
# Set a dictionary of name=value from the configuration file,
# ignoring section headings.
var_dict = {}
for section in config.sections():
for (name, value) in config.items(section):
var_dict[name] = value
return var_dict
var_dict = read_config()
for name, value in var_dict.iteritems():
vars()[name] = value
del var_dict
```
#### File: changlab/genomicode/consclust.py
```python
import os, sys
class ConsensusScore:
"""
C Matrix of consensus scores.
C_flat List of the scores on the upper diagonal.
score2cdf Dictionary of consensus score -> CDF.
auc Area under the curve.
"""
def __init__(self, C, C_flat, score2cdf, auc):
self.C = C
self.C_flat = C_flat
self.score2cdf = score2cdf
self.auc = auc
def calc_consensus(X):
"""X is a gene x clustering matrix of genes that have been
clustered multiple times. Each column is one run of the
clustering algorithm. Each cell is an integer indicating the
cluster assignment of the gene in that clustering. It can be None
indicating that the gene was not included in that clustering.
Returns a ConsensusScore object.
"""
ngene = len(X)
nruns = len(X[0])
assert ngene > 0, "not enough genes (%d)" % ngene
assert nruns >= 2, "not enough clusterings (%d)" % nruns
for x in X:
assert len(x) == nruns
# For each pair of genes, count the number of data sets in which
# they occur together.
C = _make_consensus_matrix(X)
# Get a list of the pairwise consensus values.
C_flat = []
for i in range(len(C)-1):
x = C[i][i+1:]
C_flat.extend(x)
TOTAL = len(C)*(len(C)-1)/2
assert len(C_flat) == TOTAL
# For each consensus value, calculate the cdf.
score2cdf = {} # dict of consensus score -> CDF
for i, score in enumerate(sorted(C_flat, reverse=True)):
if score in score2cdf:
continue
cdf = 1.0 - float(i)/len(C_flat)
score2cdf[score] = cdf
# Calculate the area under the curve.
scores = sorted(score2cdf)
n = len(scores)
auc = 0.0
for i in range(1, len(scores)):
auc += (scores[i]-scores[i-1]) * score2cdf[scores[i]]
x = ConsensusScore(C, C_flat, score2cdf, auc)
return x
def count_clusters(X):
"""X is a gene x clustering matrix of genes that have been
clustered multiple times. Each column is one run of the
clustering algorithm. Each cell is an integer indicating the
cluster assignment of the gene in that clustering. It can be None
indicating that the gene was not included in that clustering.
Returns a tuple of (nclust, nc_mean, nc_sd) where nclust is a list
with the same number of columns as X. Each item is the number of
clusters in that clustering. nc_mean is the mean of the number of
clusters, and nc_sd is the standard deviation.
"""
import jmath
ngene = len(X)
nruns = len(X[0])
if not ngene or not nruns:
return [], 0, 0
nclust = [0] * nruns
for j in range(nruns):
x = [X[i][j] for i in range(ngene) if X[i][j] is not None]
nclust[j] = len({}.fromkeys(x))
nc_mean = jmath.mean(nclust)
nc_sd = 0
if min(nclust) != max(nclust):
nc_sd = jmath.stddev(nclust)
return nclust, nc_mean, nc_sd
def _make_consensus_matrix(X):
# Return a consensus matrix from X. Only fills in the upper
# diagonal of the matrix.
# For each pair of genes, count the number of data sets in which
# they are in the same cluster.
ngenes, nruns = len(X), len(X[0])
C = [[0]*ngenes for i in range(ngenes)]
for i in range(ngenes):
X_i = X[i]
for j in range(i, ngenes):
X_j = X[j]
# Count the number of clusterings this pair of genes
# co-occur. May be less than the total number of
# clusterings if different subsets of genes are used each
# time.
t = 0
# Count the number of clusterings each pair of genes are
# in the same cluster.
c = 0
for k in range(nruns):
if X_i[k] is None or X_j[k] is None:
continue
t += 1
c += int(X_i[k] == X_j[k])
if t > 0:
C[i][j] = C[j][i] = float(c) / t
return C
## def calc_consensus_p(dataset, NUM_SAMPLES=100):
## import math
## import random
## import jmath
## R = jmath.start_R()
## # Calculate the consensus score for this gene set.
## X_gset = dataset.matrix()
## C_gset = calc_consensus_score_from_matrix(X_gset)
## rand_nlp = [None] * NUM_SAMPLES
## for zzz in range(NUM_SAMPLES):
## # Randomly permute the gene set.
## X_rand = [X_gset[i][:] for i in range(len(X_gset))]
## for i in range(len(X_rand[0])):
## for j in range(len(X_rand)-1, 0, -1):
## k = random.randint(0, j)
## X_rand[j][i], X_rand[k][i] = X_rand[k][i], X_rand[j][i]
## C_rand = calc_consensus_score_from_matrix(X_rand)
## # Calculate the p-value from the Kolmogorov-Smirnov test.
## ow = R.options("warn")
## R.options(warn=-1)
## x = R.ks_test(C_gset.C_flat, C_rand.C_flat, exact=True)
## R.options(**ow)
## p_value = max(x["p.value"], 1E-10)
## nlp = -math.log(p_value, 10)
## rand_nlp[zzz] = nlp
## mean_nlp = float(sum(rand_nlp)) / len(rand_nlp)
## return mean_nlp
```
#### File: changlab/genomicode/genepattern.py
```python
ATTRIBUTES = [
"fileFormat", "TYPE", "MODE", "optional", "type",
"domain", "default_value", "prefix_when_specified"]
class ParameterInfo:
def __init__(self, name, value, description, attributes):
self.name = name
self.value = value
self.description = description
self.attributes = attributes
class RTBParameter:
# Parameter class from RunTaskBean.
def __init__(
self, name, displayName, displayDesc, optional, inputType,
defaultValue, choices):
# name Name of the parameter. "rma_expression_file"
# displayName Usually the same as <name>.
# displayDesc Description for the user.
# optional Is this optional? True/False.
# inputType "password", "file", "select" or "text".
# defaultValue Not necessary for "select" inputs.
# choices List of dictionaries. Each dict describes one option.
#
# In choices, each option is a dictionary of:
# value Passed to module. "HIERARCHICAL"
# label Pretty label shown to user. "Hierarchical"
# defaultOption True/False
# Do some basic checking.
if inputType == "select":
assert choices
self.name = name
self.displayName = displayName
self.displayDesc = displayDesc
self.optional = optional
self.inputType = inputType
self.defaultValue = defaultValue
self.choices = choices
# For Element.
TEXT, INTEGER, FLOAT, PASSWORD = range(4)
class Element:
def __init__(self, name, description, default=None, optional=None):
optional = optional or False
self.name = name
self.description = description
self.default = default
self.optional = optional
class EFileChooser(Element):
def __init__(
self, name, description, formats, default=None, optional=None,
prefix_when_specified=None):
Element.__init__(
self, name, description, default=default, optional=optional)
assert type(formats) is type([])
self.formats = formats
self.prefix_when_specified = prefix_when_specified
class EDropdown(Element):
def __init__(
self, name, description, choices, formats=None, default=None,
optional=None, input_type=None):
Element.__init__(
self, name, description, default=default, optional=optional)
assert formats is None or type(formats) is type([])
self.formats = formats
if input_type is None:
input_type = TEXT
assert input_type in range(4)
assert type(choices) is type([])
# Make sure choices is a list of (value, pretty_value). If an
# item is a singleton, then make value and pretty_value the
# same.
for i in range(len(choices)):
if type(choices[i]) is type(()):
continue
assert type(choices[i]) is type(""), "choice must be a string."
value = choices[i]
choices[i] = value, value
self.choices = choices
self.input_type = input_type
# Make sure default is one of the choices.
if default:
for value, pretty_value in choices:
if value == default:
break
else:
raise AssertionError, (
"default value [%s] is not a choice" % default)
class ETextBox(Element):
def __init__(
self, name, description, formats=None, default=None, optional=None,
input_type=None, prefix_when_specified=None):
Element.__init__(
self, name, description, default=default, optional=optional)
formats = formats or []
assert type(formats) is type([])
self.formats = formats
if input_type is None:
input_type = TEXT
assert input_type in range(4)
self.input_type = input_type
self.prefix_when_specified = prefix_when_specified
def fix_environ_path(binpath=[]):
# For some reason, GenePattern screws up the PATH environment
# variable. It's automatically set to:
# '/sbin:/usr/sbin:/bin:/usr/bi /opt/GenePatternServer/taskLib/ \
# BinReg.20.204'
# Detect this and fix it.
import os
if os.environ["PATH"].find("GenePatternServer") < 0 and not binpath:
return
# Make sure binpath is specified correctly.
for x in binpath:
assert ":" not in x
# BUG: Should allow configuration of path, java path.
PATH = [
#"/usr/java/jdk1.5.0_14/bin", # Need Java for matlab.
"/usr/local/bin",
"/usr/bin",
"/bin",
#"/sbin",
]
PATH = PATH + binpath
# /usr/java/jdk1.5.0_14/bin:/usr/local/bin:/bin:/usr/bin:
os.environ["PATH"] = ":".join(PATH)
def format_parameter_info(params):
import StringIO
handle = StringIO.StringIO()
w = handle.write
w('<?xml version="1.0" encoding="UTF-8"?>\n')
w("\n")
w("<ANALYSISPARAMETERS>\n")
need_newline1 = False
for param in params:
if need_newline1:
w("\n")
need_newline1 = True
w(' <PARAMETER name="%s" value="%s">\n' % (param.name, param.value))
# Should do a better job of this.
x = param.description
x = x.replace("&", "&")
x = x.replace(">", ">")
x = x.replace("<", "<")
w(' <DESCRIPTION>%s</DESCRIPTION>\n' % x)
# Make sure all the attributes are known.
for key in param.attributes:
assert key in ATTRIBUTES
need_newline2 = False
for key in ATTRIBUTES:
if key not in param.attributes:
continue
if need_newline2:
w("\n")
need_newline2 = True
value = param.attributes[key]
w(' <ATTRIBUTE key="%s">%s</ATTRIBUTE>' % (key, value))
w('</PARAMETER>')
w("</ANALYSISPARAMETERS>\n")
handle.seek(0)
return handle.read()
def parse_parameter_info(parameter_info):
# Parse an XML formatted string into a list of ParameterInfo
# objects.
from xml.parsers import expat
class Parser:
def __init__(self):
self._reset_parameters()
self.params = []
def _reset_parameters(self):
self.name = self.value = None
self.description = ""
self.attrs = {} # dict of key -> value
self._tag = None
self._attr_key = ""
self._attr_value = ""
def start_element(self, name, attrs):
# Wrapped around all the parameters.
if name == "ANALYSISPARAMETERS":
return
elif name == "PARAMETER":
assert self.name is None
assert self.value is None
self.name = attrs["name"]
self.value = attrs["value"]
elif name == "DESCRIPTION":
assert not self.description
assert self._tag is None
self._tag = name
elif name == "ATTRIBUTE":
assert self._tag is None
assert self._attr_key is ""
self._tag = name
self._attr_key = attrs["key"]
#print 'Start element:', name, attrs
def end_element(self, name):
if self._tag == "DESCRIPTION":
pass
elif self._tag == "ATTRIBUTE":
assert self._attr_key
# _attr_value might be blank.
self.attrs[self._attr_key] = self._attr_value
self._attr_key = ""
self._attr_value = ""
if name == "PARAMETER":
x = ParameterInfo(
name=self.name, value=self.value,
description=self.description, attributes=self.attrs)
self.params.append(x)
self._reset_parameters()
self._tag = None
#print 'End element:', name
def char_data(self, data):
if self._tag == "DESCRIPTION":
self.description = self.description + str(data)
elif self._tag == "ATTRIBUTE":
self._attr_value = self._attr_value + str(data)
#print 'Character data:', repr(data)
parser = Parser()
p = expat.ParserCreate()
p.StartElementHandler = parser.start_element
p.EndElementHandler = parser.end_element
p.CharacterDataHandler = parser.char_data
p.Parse(parameter_info, True)
return parser.params
def format_rtbparameter(params):
import json
x = json.dumps([p.__dict__ for p in params])
return x
def parse_rtbparameter(rtbparameter_str):
# Parse a JSON formatted string into a list of RTBParameter objects.
import json
x = json.loads(rtbparameter_str)
params = [RTBParameter(**x) for x in x]
return params
def param2elem(param):
# Figure out which type of parameter this is.
# CONDITION TYPE
# <value> given EDropdown
# attributes["type"] == java.io.File EFileChooser
# otherwise ETextBox
attrs = param.attributes
# Make sure I know how to interpret each attribute in this
# parameter.
for k in attrs:
assert k in ATTRIBUTES, "Unknown attribute: %s" % k
if param.value:
elem_type = EDropdown
elif attrs["type"] == "java.io.File":
elem_type = EFileChooser
else:
elem_type = ETextBox
attr_type2input_type = {
"java.lang.String" : TEXT,
"java.lang.Integer" : INTEGER,
"java.lang.Float" : FLOAT,
"PASSWORD" : PASSWORD,
}
elem = None
if elem_type == EDropdown:
assert param.value
choices = _parse_choices(param.value)
# Sometimes fileFormat is missing, sometimes it's empty.
formats = _parse_formats(attrs.get("fileFormat", ""))
assert "TYPE" not in attrs
# Sometimes MODE is missing, sometimes it's empty or "IN".
assert attrs.get("MODE", "") in ["", "IN"]
optional = attrs["optional"]
assert attrs["type"] in attr_type2input_type, \
"Unknown attribute type: %s" % attrs["type"]
input_type = attr_type2input_type[attrs["type"]]
assert attrs.get("domain", "") == ""
default = attrs["default_value"]
assert attrs["prefix_when_specified"] == ""
elem = EDropdown(
param.name, param.description, choices, default=default,
optional=optional, input_type=input_type)
elif elem_type == EFileChooser:
assert not param.value
# Sometimes fileFormat is missing.
formats = _parse_formats(attrs.get("fileFormat", ""))
assert attrs["TYPE"] == "FILE"
assert attrs["MODE"] == "IN"
optional = attrs["optional"]
assert attrs["type"] == "java.io.File"
assert attrs.get("domain", "") == ""
# Sometimes default_value is missing.
default = attrs.get("default_value", "")
prefix_when_specified = attrs["prefix_when_specified"]
elem = EFileChooser(
param.name, param.description, formats, default=default,
optional=optional, prefix_when_specified=prefix_when_specified)
elif elem_type == ETextBox:
assert not param.value
# Sometimes fileFormat is missing, sometimes it's empty.
formats = _parse_formats(attrs.get("fileFormat", ""))
assert "TYPE" not in attrs
# Sometimes MODE is missing, sometimes it's empty or "IN".
assert attrs.get("MODE", "") in ["", "IN"]
optional = attrs["optional"]
# type might be missing. If it's missing, then assume String.
attrs_type = attrs["type"]
if not attrs_type:
attrs_type = "java.lang.String"
assert attrs_type in attr_type2input_type, \
"Unknown attribute type: %s" % attrs_type
input_type = attr_type2input_type[attrs_type]
assert attrs.get("domain", "") == ""
default = attrs["default_value"]
prefix = attrs["prefix_when_specified"]
elem = ETextBox(
param.name, param.description, formats=formats, default=default,
optional=optional, input_type=input_type,
prefix_when_specified=prefix)
else:
raise AssertionError, "Unknown elem_type."
return elem
def elem2param(elem):
name = elem.name
value = ""
description = elem.description
attributes = {}
attributes["default_value"] = elem.default or ""
attributes["optional"] = ""
if elem.optional:
attributes["optional"] = "on"
input_type2attr_type = {
TEXT : "java.lang.String",
INTEGER : "java.lang.Integer",
FLOAT : "java.lang.Float",
PASSWORD : "PASSWORD",
}
if elem.__class__ is EFileChooser:
attributes["fileFormat"] = _format_formats(elem.formats)
attributes["TYPE"] = "FILE"
attributes["MODE"] = "IN"
attributes["type"] = "java.io.File"
attributes["prefix_when_specified"] = elem.prefix_when_specified or ""
elif elem.__class__ is EDropdown:
value = _format_choices(elem.choices)
# Leave fileFormat empty unless it's explicitly specified.
if elem.formats:
attributes["fileFormat"] = _format_formats(elem.formats)
attributes["type"] = input_type2attr_type[elem.input_type]
attributes["prefix_when_specified"] = ""
elif elem.__class__ is ETextBox:
# Leave fileFormat empty unless it's explicitly specified.
if elem.formats:
attributes["fileFormat"] = _format_formats(elem.formats)
attributes["type"] = input_type2attr_type[elem.input_type]
attributes["prefix_when_specified"] = elem.prefix_when_specified
else:
raise AssertionError, "Unknown element: %s." % elem.__class__
param = ParameterInfo(name, value, description, attributes)
return param
def param2rtbparam(param):
name = param.name
x = param.name
x.replace(".", " ")
displayName = x
displayDesc = param.description
optional = param.attributes.get("optional", False)
defaultValue = param.attributes.get("default_value", "")
choices = []
if param.value:
for x in _parse_choices(param.value):
value, pretty_value = x
x = {}
x["value"] = value
x["label"] = pretty_value
d = defaultValue in [value, pretty_value]
x["defaultOption"] = d
choices.append(x)
# Should be: "password", "file", "select" or "text"
if choices:
inputType = "select"
elif param.attributes.get("type") == "java.io.File":
inputType = "file"
elif param.attributes.get("type") == "PASSWORD":
inputType = "password"
else:
inputType = "text"
x = RTBParameter(
name, displayName, displayDesc, optional, inputType, defaultValue,
choices)
return x
def elem2rtbparam(elem):
name = elem.name
displayName = name
displayDesc = elem.description
optional = elem.optional
defaultValue = elem.default or ""
choices = []
if elem.__class__ is ETextBox:
inputType = "text"
if elem.input_type == PASSWORD:
inputType = "password"
elif elem.__class__ is EFileChooser:
inputType = "file"
elif elem.__class__ is EDropdown:
inputType = "select"
for value, pretty_value in elem.choices:
x = {}
x["value"] = value
x["label"] = pretty_value
x["defaultOption"] = elem.default in [value, pretty_value]
choices.append(x)
else:
raise AssertionError, "Unknown element: %s." % elem.__class__
x = RTBParameter(
name, displayName, displayDesc, optional, inputType, defaultValue,
choices)
return x
def _parse_formats(formats_str):
# Semicolon-separated list of choices.
# Examples:
# gct;res;Dataset
import urllib
x = urllib.unquote(formats_str)
return x.split(";")
def _format_formats(formats):
import urllib
x = ";".join(formats)
return urllib.quote(x)
def _parse_choices(choices_str):
# Parse the choices string and return a list of (value,
# pretty_value).
#
# Semicolon-separated list of choices.
# Examples:
# <value>=<pretty_value>;<value>=<pretty_value>
# HIERARCHICAL=Hierarchical;SOM;NMF;KMEANS=KMeans
# -q=Yes;=No
#
# <value> can be blank, indicating a blank parameter will be
# passed to the module if this option is chosen.
# =<pretty_value> is optional. If not specified, then
# <pretty_value> is the same as <value>.
import urllib
x = urllib.unquote(choices_str)
assert ";" in x, "no choices"
choices = []
for x in x.split(";"):
x = str(x) # no unicode.
value = pretty = x
if "=" in x:
value, pretty = x.split("=", 1)
choices.append((value, pretty))
return choices
def _format_choices(choices):
import urllib
choices_str = []
for value, pretty in choices:
x = value
if value != pretty:
x = "%s=%s" % (value, pretty)
choices_str.append(x)
x = ";".join(choices_str)
x = urllib.quote(x)
return x
```
#### File: changlab/genomicode/geo_format.py
```python
import os, sys
import arrayio
from genomicode import Matrix
SAMPLE_NAME = arrayio.tdf.SAMPLE_NAME
class GeoMatrix(Matrix.AbstractMatrix):
def __init__(self, geo_path, GSEID, GPLID, filename=None,
datatype=None, subrows=None, subcols=None,
synonyms=None):
# filename is an optional path to the signal_rank_bin file.
# subrows and subcols are optional lists of indexes to specify
# a subset of this matrix.
from genomicode import filelib
import arrayio
import geolib
Matrix.AbstractMatrix.__init__(self, synonyms)
self.geo_path = geo_path
self.GSEID, self.GPLID = GSEID, GPLID
if filename is None:
filename = geolib.FileFactory.find_signal_rank_file(
geo_path, GSEID, GPLID)
assert filename is not None, "Missing signal rank file"
self.filename = filename
self.handle = open(filename, "rb")
## Test whether we can load the files faster from Redis.
## import StringIO
## import redis
## key = "%s-%s" % (self.GSEID, self.GPLID)
## r = redis.Redis(host='localhost', port=6379, db=0)
## x = r.get(key)
## self.handle = StringIO.StringIO(x)
self.datatype = datatype
nrow, ncol = geolib.load_signal_bin_matrix_size(self.handle)
if subrows is None:
subrows = range(nrow)
if subcols is None:
subcols = range(ncol)
assert not subrows or max(subrows) < nrow
assert not subcols or max(subcols) < ncol
self.subrows = list(subrows)
self.subcols = list(subcols)
self._num_rows = len(self.subrows) # optimization
self._num_cols = len(self.subcols)
# For optimization.
self._full_rows = (subrows == range(nrow))
self._full_cols = (subcols == range(ncol))
self._rownames = None
self._rowcache = {}
self._cache = {} # index -> expression values for row
# Set the cache to hold at most X numbers in memory at a time.
# Test on GSE2109-GPL570, 1785 samples.
# X MEMORY
# 1m 250m
# 5m 600m
# 10m 800m
MAX_NUMBERS = 2E6
# MAX_CACHE_SIZE is the number of rows.
self.MAX_CACHE_SIZE = int(max(10, MAX_NUMBERS/ncol))
def row_names(self, header=None):
import geolib
if header is None:
if self._rownames is None:
self._rownames = geolib.load_signal_bin_rownames(self.handle)
return self._rownames
if header not in self._rowcache:
ids = geolib.load_signal_bin_rownames(self.handle, header)
if not self._full_rows:
ids = [ids[i] for i in self.subrows]
self._rowcache[header] = ids
return self._rowcache[header]
def col_names(self, header=None):
import geolib
if header is None:
return [SAMPLE_NAME]
assert header == SAMPLE_NAME
ids = geolib.load_signal_bin_sample_ids(self.handle)
if not self._full_cols:
ids = [ids[i] for i in self.subcols]
return ids
def _matrix(self, row_indexes, col_indexes):
# Return another instance of this object.
assert not row_indexes or max(row_indexes) < self._num_rows
assert not col_indexes or max(col_indexes) < self._num_cols
subrows, subcols = self.subrows, self.subcols
if row_indexes is not None:
subrows = [subrows[i] for i in row_indexes]
if col_indexes is not None:
subcols = [subcols[i] for i in col_indexes]
x = GeoMatrix(
self.geo_path, self.GSEID, self.GPLID, filename=self.filename,
datatype=self.datatype, subrows=subrows, subcols=subcols)
x._cache = self._cache # provide a pointer to share caches.
x._rownames = self._rownames
for header, ids in self._rowcache.iteritems():
if row_indexes is None:
x._rowcache[header] = ids
else:
x._rowcache[header] = [ids[i] for i in row_indexes]
return x
def _load_rows_cached(self, indexes):
import geolib
I = [i for i in indexes if i not in self._cache]
if I:
X = geolib.load_signal_bin_many_genes(self.handle, I)
for i, x in zip(I, X):
self._cache[i] = x
X = [self._cache[i] for i in indexes]
# Clear the cache if it's too big.
indexes_dict = {}.fromkeys(indexes)
if len(self._cache) > self.MAX_CACHE_SIZE:
keys_to_clear = self._cache.keys()[:self.MAX_CACHE_SIZE/2]
for key in keys_to_clear:
del self._cache[key]
return X
def _slice(self, row_indexes, col_indexes):
# Return just the underlying matrix.
subrows, subcols = self.subrows, self.subcols
if row_indexes is not None:
subrows = [subrows[i] for i in row_indexes]
if col_indexes is not None:
subcols = [subcols[i] for i in col_indexes]
## Not worth optimizing by taking the subset only when
## necessary.
#subrows = [self.subrows[i] for i in row_indexes]
#subcols = [self.subcols[i] for i in col_indexes]
#X = geolib.load_signal_bin_many_genes(self.handle, subrows)
X = self._load_rows_cached(subrows)
if X and subcols != range(len(X[0])):
X = [[x[i] for i in subcols] for x in X]
return X
def dim(self):
# Return tuple of (nrow, ncol).
return self._num_rows, self._num_cols
def make_locator(geo_path, GSEID, GPLID, filename=None):
x = ["geo_format", geo_path, GSEID, GPLID]
if filename is not None:
x.append(filename)
x = ":".join(x)
return x
def is_format(locator_str, hrows=None, hcols=None):
if locator_str.startswith("geo_format"):
return True
return False
def is_matrix(X):
if not hasattr(X, "filename"):
return False
if not hasattr(X, "subrows") or not hasattr(X, "subcols"):
return False
if not hasattr(X, "MAX_CACHE_SIZE"):
return False
return True
def read(handle, hrows=None, hcols=None, datatype=None):
import arrayio
assert type(handle) is type("")
assert is_format(handle)
parts = handle.split(":")
assert len(parts) in [4, 5]
x, geo_path, GSEID, GPLID = parts[:4]
filename = None
if len(parts) == 5:
filename = parts[4]
#print handle, filename
synonyms = {
arrayio.ROW_ID : "Probe.Set.ID",
arrayio.GENE_ID : "LocusLink",
arrayio.GENE_SYMBOL : "Gene.Symbol",
arrayio.COL_ID : SAMPLE_NAME,
}
X = GeoMatrix(
geo_path, GSEID, GPLID, filename=filename, datatype=datatype,
synonyms=synonyms)
return X
def write(X, handle):
raise NotImplementedError, "Saving of GEO DataSets not supported"
def _geo_to_tdf(X):
import arrayio
from genomicode import Matrix
assert is_matrix(X)
assert X.row_names()
assert X.col_names()
_X = X.slice()
row_order = X.row_names()
col_order = X.col_names()
row_names = {}
col_names = {}
synonyms = {}
for name in X.row_names():
row_names[name] = X.row_names(name)
for name in X.col_names():
col_names[name] = X.col_names(name)
# Find a suitable ROW_ID.
row_ids = ["Probe.Set.ID", "Probe Set ID", "PSID", "ID"]
for row_id in row_ids:
if row_id in row_order:
break
else:
row_id = row_order[0]
# Make row_id the first column.
i = row_order.index(row_id)
row_order.pop(i)
row_order = [row_id] + row_order
# Set the synonyms.
synonyms[arrayio.ROW_ID] = row_id
synonyms[arrayio.COL_ID] = col_order[0]
if "Gene.Symbol" in row_order:
synonyms[arrayio.GENE_SYMBOL] = "Gene.Symbol"
if "Description" in row_order:
synonyms[arrayio.GENE_DESCRIPTION] = "Description"
if "LocusLink" in row_order:
synonyms[arrayio.GENE_ID] = "LocusLink"
x = Matrix.InMemoryMatrix(
_X, row_names=row_names, col_names=col_names,
row_order=row_order, col_order=col_order, synonyms=synonyms)
#x = Matrix.add_synonyms(x, synonyms)
assert arrayio.tab_delimited_format.is_matrix(x)
return x
this_module = sys.modules[__name__]
if this_module not in arrayio.FORMATS:
arrayio.FORMAT_NAMES.insert(0, sys.modules[__name__])
arrayio.FORMATS.insert(0, this_module)
x = "geo_format", "tab_delimited_format", _geo_to_tdf
arrayio.CONVERTERS.insert(0, x)
del this_module
```
#### File: changlab/genomicode/graphviz.py
```python
def make_graph(
nodes, edges, node2attributes=None, edge2attributes=None,
prog=None, subgraphs=None, directed=False, rank=False):
# nodes is a list of the names of the nodes, given as strings.
# edges is a list of tuples (<node_a>, <node_b>) that indicate
# which nodes are connected. node2attributes is a dictionary
# where the key is the name of the node and the value is a
# dictionary of attributes. edge2attributes is a dictionary where
# the key is a tuple from edges, and the value is a dictionary of
# attributes. prog is typically "neato" or "dot". subgraph is
# dictionary where the key is the name of the subgraph and value
# is the list of nodes in that subgraph. rank is to set
# nodes in a subgraph to be same level,rank='same'
#
# G = make_graph(...)
# G.draw(filename)
# G.write(filename)
# Node attributes:
# style filled
# shape box, circle, ellipse, point, triangle, diamond, octagon
# note, tab, folder
# fillcolor Color of background. (style must be filled).
# color Color of outline, #FFFFFF
#
# Edge attributes:
# style dotted, bold
# len length
# arrowhead
import pygraphviz as pgv
node2attributes = node2attributes or {}
edge2attributes = edge2attributes or {}
# Uses "neato" by default.
prog = prog or "neato"
subgraphs = subgraphs or {}
forcelabels = True
# To speed up dot:
# - nslimit Big effect, but not much time savings.
# nslimit1 Not much effect.
# - maxiter fdp, neato
# - mclimit dot ~100 speeds up a bit. 1-10 fast, but bad
# - splines=line Looks bad. Not much speedup over mclimig.
# Run with -v to see where it is spending its time.
# Probably mclimit makes biggest difference.
G = pgv.AGraph(dim=2, directed=directed)
#G.graph_attr["splines"] = "line"
#G.graph_attr["nslimit"] = 0.5
#G.graph_attr["nslimit1"] = 0.5
G.graph_attr["mclimit"] = 1
if forcelabels:
G.graph_attr["forcelabels"] = "true"
for node in nodes:
attr = node2attributes.get(node, {})
G.add_node(node, **attr)
for i, j in edges:
attr = edge2attributes.get((i, j), {})
G.add_edge(i, j, **attr)
for name in subgraphs:
G.add_subgraph(subgraphs[name], name, rank=rank)
G.layout(prog=prog)
return G
def layout(nodes, edges, prog=None, subgraphs=None):
# Return a list of the (x, y) coordinates, parallel to nodes.
G = make_graph(nodes, edges, prog=prog, subgraphs=subgraphs)
coords = []
for node in nodes:
n = G.get_node(node)
x = n.attr["pos"]
x, y = x.split(",")
x, y = float(x), float(y)
coords.append((x, y))
return coords
```
#### File: changlab/genomicode/hashlib.py
```python
RE_NONWORD = RE_PUNCTUATION = None
def hash_R(s):
# Hash a string using the R algorithm for list names.
global RE_NONWORD
global RE_PUNCTUATION
import re
if RE_NONWORD is None:
RE_NONWORD = re.compile(r"[^a-zA-Z]")
RE_PUNCTUATION = re.compile(r"\W")
#s_orig = s
# If the string starts with a non word character, prepend an "X".
if RE_NONWORD.match(s):
s = "X%s" % s
# Convert all punctuation (except for _) to ".".
s = RE_PUNCTUATION.sub(".", s)
return s
def hash_R_many(names):
hash_R("") # make sure global variables are set
hashed = [None] * len(names)
for i, s in enumerate(names):
if RE_NONWORD.match(s):
s = "X%s" % s
s = RE_PUNCTUATION.sub(".", s)
hashed[i] = s
return hashed
def hash_var(name):
import re
# Fix the header to be a python variable.
x = str(name)
# Replace all non-word character with _.
x = re.sub(r"\W", "_", x)
# Replace initial numbers with Xnumber.
x = re.sub(r"^(\d)", r"X\1", x)
return x
def hash_var_many(names):
import re
x = names
# Fix the header to be a python variable.
x = [str(x) for x in x]
# Replace all non-word character with _.
r = re.compile(r"\W")
x = [r.sub("_", x) for x in x]
# Replace initial numbers with Xnumber.
r = re.compile(r"^(\d)")
x = [r.sub(r"X\g<1>", x) for x in x]
return x
def hash_alnum(name):
import re
# Fix the header to be a python variable.
x = str(name)
# Replace all non-word character with _.
x = re.sub(r"\W", "_", x)
return x
def hash_geneid(id_):
return id_.strip().lower()
def hash_sampleid(id_):
# Hash the sample names so that small differences are ignored. R
# will change the sample names, so if one data set has been
# through R but not the other, the names will be different.
# 2554_6933_32492_Mock1_HG-U133A+2
# X2554_6933_32492_Mock1_HG.U133A.2
import re
x = id_
# If there are alphanumeric characters, then assume that
# punctuation isn't meaningful.
if re.search(r"\w", x):
# Change all non-words to '.', like R does. (This does not
# change underscores.)
x = re.subn(r"\W", ".", x)[0]
# Ignore initial X.
if re.match(r"X[\d\w]", x):
x = x[1:]
# Make case insensitive.
x = x.lower()
return x
def hash_many_geneids(ids):
#return [_hash_geneid(x) for x in ids]
# Optimization: do this without a function call.
return [x.strip().lower() for x in ids]
def hash_many_sampleids(ids):
return [hash_sampleid(x) for x in ids]
def uniquify_by_num(ids):
id2I = {} # id -> list of indexes
for i, id in enumerate(ids):
if id not in id2I:
id2I[id] = []
id2I[id].append(i)
nodup = ids[:]
for (id, I) in id2I.iteritems():
if len(I) < 2:
continue
for i in range(len(I)):
nodup[I[i]] = "%s_%d" % (id, i+1)
return nodup
```
#### File: changlab/genomicode/iolib.py
```python
import os, sys
def split_tdf(data, strip=False):
# Bug: will skip blank lines.
matrix = [
x.rstrip("\r\n").split("\t") for x in data.split("\n") if x.strip()]
if strip:
for i in range(len(matrix)):
for j in range(len(matrix[i])):
matrix[i][j] = matrix[i][j].strip()
return matrix
## def read_tdf(handle, num_header_rows, num_header_cols):
## # Format:
## # - First num_header_cols contain metadata, last ones samples.
## # - First row headings, last ones samples.
## # - Return as a single matrix.
## import filelib
## import jmath
## handle = filelib.openfh(handle)
## data = split_tdf(handle.read())
## X = []
## num_cols = None
## for cols in data:
## if num_cols is None:
## num_cols = len(cols)
## assert len(cols) == num_cols
## if len(X) >= num_header_rows:
## x = map(jmath.safe_float, cols[num_header_cols:])
## cols[num_header_cols:] = x
## X.append(cols)
## return X
## def write_tdf(data, outhandle):
## for x in data:
## x = map(str, x)
## print >>outhandle, "\t".join(x)
CLEAN_RE = None
CLEAN_DISALLOWED = None
def _py_cleanwrite(data, outhandle, delim="\t"):
global CLEAN_RE
global CLEAN_DISALLOWED
import re
disallowed = "\r\n" + delim
if CLEAN_RE is None or CLEAN_DISALLOWED != disallowed:
CLEAN_RE = re.compile("[%s]" % disallowed)
CLEAN_DISALLOWED = disallowed
for x in data:
x = x[:]
for i in range(len(x)):
if x[i] is None:
x[i] = ""
x = map(str, x)
x = [CLEAN_RE.subn(" ", x)[0].strip() for x in x]
x = delim.join(x)
print >>outhandle, x
_cleanwrite = _py_cleanwrite
def cleanwrite(data, outhandle, delim="\t"):
import types
# The C version of _cleanwrite requires outhandle to be a file
# object and won't work with StringIO objects. If the user does
# not provide a real file object, then use the python
# implementation.
if type(outhandle) is not types.FileType:
_py_cleanwrite(data, outhandle, delim=delim)
return
#print "Calling _cleanwrite"
_cleanwrite(data, outhandle, delim=delim)
def strip_each(L):
return [x.strip() for x in L]
# Try and load C implementations of functions. If I can't,
# then just ignore and use the pure python implementations.
try:
#raise ImportError
import ciolib
except ImportError:
pass
else:
this_module = sys.modules[__name__]
for name in ciolib.__dict__.keys():
if not name.startswith("__"):
this_module.__dict__[name] = ciolib.__dict__[name]
```
#### File: changlab/genomicode/matrixlib.py
```python
def align_rows(*matrices):
"""Aligns matrices by ROW_ID. Return a list of the matrices after
the rows are aligned. Raises an exception if no rows are common
to all matrices.
"""
import arrayio
import hashlib
header = arrayio.ROW_ID
if not matrices:
return []
for m in matrices:
assert m.nrow() > 0 and m.ncol() > 0
# Make sure each of the matrices contain the row ID.
for m in matrices:
assert header in m.row_names() or header in m._synonyms, \
"Matrix is missing row IDs (%s)." % header
# Get the intersection of the IDs.
all_ids = [None]*len(matrices) # matrix index -> list of hids
for i in range(len(matrices)):
all_ids[i] = hashlib.hash_many_geneids(matrices[i].row_names(header))
ids_hash = {} # hid -> matrix_i -> row_i, id
for i, ids in enumerate(all_ids):
for j, id_ in enumerate(ids):
hid = hashlib.hash_geneid(id_)
# If I have not seen this hashed ID before, then add it to
# the dictionary.
if hid not in ids_hash:
ids_hash[hid] = {}
# If a dataset has duplicate IDs, then take the first one only.
if i in ids_hash[hid]:
continue
ids_hash[hid][i] = j, id_
# Use only the IDs that occur in all data files. Use the order of
# the samples from first data set.
ids = [x for x in all_ids[0] if len(ids_hash[x]) == len(matrices)]
assert len(ids) > 0, "The data sets all have different row IDs."
# Align the rows by the ids.
aligned = [None] * len(matrices)
for i in range(len(matrices)):
I = [ids_hash[x][i][0] for x in ids]
x = matrices[i].matrix(row=I)
aligned[i] = x
assert are_rows_aligned(*aligned), "matrices not aligned"
return aligned
def align_cols(*matrices):
"""Aligns matrices by COL_ID. Return a list of the matrices after
the columns are aligned. Raises an exception if no columns are
common to all matrices.
"""
import arrayio
import hashlib
header = arrayio.COL_ID
if not matrices:
return []
for m in matrices:
assert m.nrow() > 0 and m.ncol() > 0
# Get the intersection of the IDs.
all_ids = [None]*len(matrices) # matrix index -> list of hids
for i in range(len(matrices)):
all_ids[i] = hashlib.hash_many_sampleids(matrices[i].col_names(header))
ids_hash = {} # hid -> matrix_i -> col_i, id
for i, ids in enumerate(all_ids):
for j, id_ in enumerate(ids):
hid = hashlib.hash_sampleid(id_)
# If I have not seen this hashed ID before, then add it to
# the dictionary.
if hid not in ids_hash:
ids_hash[hid] = {}
# If a dataset has duplicate IDs, then take the first one only.
if i in ids_hash[hid]:
continue
ids_hash[hid][i] = j, id_
# Use only the IDs that occur in all data files. Use the order of
# the samples from first data set.
ids = [x for x in all_ids[0] if len(ids_hash[x]) == len(matrices)]
assert len(ids) > 0, "The data sets have different column IDs."
# Align the columns by the ids.
aligned = [None] * len(matrices)
for i in range(len(matrices)):
I = [ids_hash[x][i][0] for x in ids]
x = matrices[i].matrix(col=I)
aligned[i] = x
assert are_cols_aligned(*aligned), "matrices not aligned"
return aligned
def are_rows_aligned(*matrices):
import arrayio
import hashlib
header = arrayio.ROW_ID
if len(matrices) <= 1:
return True
# Check the number of rows in each matrix.
for i in range(1, len(matrices)):
if matrices[0].nrow() != matrices[i].nrow():
return False
# Check the names of the rows.
hnames = [hashlib.hash_many_geneids(x.row_names(header)) for x in matrices]
for i in range(1, len(matrices)):
if hnames[0] != hnames[i]:
return False
return True
def are_cols_aligned(*matrices):
import arrayio
import hashlib
header = arrayio.COL_ID
if len(matrices) <= 1:
return True
# Check the number of columns in each matrix.
for i in range(1, len(matrices)):
if matrices[0].ncol() != matrices[i].ncol():
return False
# Check the names of the columns.
hnames = [
hashlib.hash_many_sampleids(x.col_names(header)) for x in matrices]
for i in range(1, len(matrices)):
if hnames[0] != hnames[i]:
return False
return True
def assert_rows_aligned(*matrices):
# Ignore None.
matrices = [x for x in matrices if x]
assert are_rows_aligned(*matrices)
def describe_unaligned_rows(*matrices):
# Return a text string that describes where the rows are not aligned.
import arrayio
import parselib
import hashlib
header = arrayio.ROW_ID
if len(matrices) <= 1:
return "Only 1 matrix. Must be aligned."
# Check the number of rows in each matrix.
num_rows = [x.nrow() for x in matrices]
if min(num_rows) != max(num_rows):
x = "Matrices have differing number of rows: %s" % ", ".join(
map(parselib.pretty_int, num_rows))
return x
# Check the names of the rows.
names = [x.row_names(header) for x in matrices]
hnames = [hashlib.hash_many_geneids(x.row_names(header)) for x in matrices]
bad_rows = []
for i in range(matrices[0].nrow()):
unaligned =False
for j in range(1, len(matrices)):
if hnames[0][i] != hnames[j][i]:
unaligned = True
if unaligned:
x = [names[j][i] for j in range(len(matrices))]
x = "Row %s: %s" % (parselib.pretty_int(i+1), ", ".join(x))
bad_rows.append(x)
if not bad_rows:
return "Matrices are aligned."
total_bad = len(bad_rows)
if total_bad > 10:
bad_rows = bad_rows[:10]
bad_rows.append("...")
x = "%s of %s rows are unaligned." % (
parselib.pretty_int(total_bad),
parselib.pretty_int(matrices[0].nrow()))
lines = [x] + bad_rows
return "\n".join(lines)
def align_rows_to_annot(
MATRIX, row_names, header=None, hash=False, get_indexes=False,
reorder_MATRIX=False):
# Align a set of row names (e.g. gene names) to a MATRIX. Return
# tuple of aligned (MATRIX, row_names). If get_indexes is True,
# then will return a tuple (I_MATRIX, I_row_names) of the indexes
# to align the matrices instead. If there are no common names,
# then MATRIX and row_names will be empty. If header is given,
# then will use the row names from that header. Otherwise, will
# search through all possible headers for the best match.
# XXX document hash
x = _align_to_annot_I(
MATRIX, row_names, header, hash, True, reorder_MATRIX)
I1, I2 = x
if get_indexes:
return I1, I2
MATRIX = MATRIX.matrix(I1, None)
row_names = [row_names[x] for x in I2]
return MATRIX, row_names
def align_cols_to_annot(
MATRIX, col_names, header=None, hash=False, get_indexes=False,
reorder_MATRIX=False):
# Return tuple of aligned (MATRIX, col_names).
x = _align_to_annot_I(
MATRIX, col_names, header, hash, False, reorder_MATRIX)
I1, I2 = x
if get_indexes:
return I1, I2
MATRIX = MATRIX.matrix(None, I1)
col_names = [col_names[x] for x in I2]
return MATRIX, col_names
def align_rows_to_many_annots(
MATRIX, many_row_names, header=None, hash=False, get_indexes=False,
reorder_MATRIX=False):
# Align a list of possible row names (e.g. probe ids, gene names,
# and gene descriptions) to a MATRIX. Tries to find the best
# match between the row names and the annotations for the matrix.
#
# Return tuple of aligned (MATRIX, row_names). If get_indexes is
# True, then return (I_MATRIX, I_row_names, index into
# many_row_names).
x = _align_to_many_annots_I(
MATRIX, many_row_names, header, hash, True, reorder_MATRIX)
I_matrix, I_names, index = x
if get_indexes:
return I_matrix, I_names, index
MATRIX = MATRIX.matrix(I_matrix, None)
row_names = many_row_names[index]
row_names = [row_names[x] for x in I_names]
return MATRIX, row_names
def align_cols_to_many_annots(
MATRIX, many_col_names, header=None, hash=False, get_indexes=False,
reorder_MATRIX=False):
# many_col_names is list of col_names, where col_names is a list
# of the column names to be matched to the MATRIX. Return tuple
# of aligned (MATRIX, col_names). If get_indexes is True, then
# return (I_MATRIX, I_col_names, index into many_col_names). XXX
# BEST MATCH?
x = _align_to_many_annots_I(
MATRIX, many_col_names, header, hash, False, reorder_MATRIX)
I_matrix, I_names, index = x
if get_indexes:
return I_matrix, I_names, index
MATRIX = MATRIX.matrix(I_matrix, None)
col_names = many_col_names[index]
col_names = [col_names[x] for x in I_names]
return MATRIX, col_names
def _align_to_many_annots_I(
MATRIX, many_names, header, hash, is_row, reorder_MATRIX):
best_I_matrix = best_I_names = best_index = None
for i, names in enumerate(many_names):
x = _align_to_annot_I(
MATRIX, names, header, hash, is_row, reorder_MATRIX)
I_matrix, I_names = x
if best_I_matrix is None or len(I_matrix) > len(best_I_matrix):
best_I_matrix = I_matrix
best_I_names = I_names
best_index = i
#print "BEST MATCH", many_names[best_index][:5]
return best_I_matrix, best_I_names, best_index
def _align_to_annot_I(MATRIX, names, header, hash, is_row, reorder_MATRIX):
# If header is given, will only try to align that header.
# Otherwise, tries all headers.
get_names = MATRIX.row_names
if not is_row:
get_names = MATRIX.col_names
headers = [header]
if header is None:
headers = get_names()
assert headers
best_header = best_I_matrix = best_I_names = None
for header in headers:
x = _align_header_to_annot_I(
MATRIX, names, header, hash, is_row, reorder_MATRIX)
I_matrix, I_names = x
if best_I_matrix is None or len(I_matrix) > len(best_I_matrix):
best_header = header
best_I_matrix, best_I_names = I_matrix, I_names
#if len(best_I_matrix):
# print "MATCH", best_header, best_I_matrix[:5], best_I_names[:5]
# for i in range(min(5, len(best_I_matrix))):
# i0, i1 = best_I_matrix[i], best_I_names[i]
# print get_names(best_header)[i0], names[i1]
return best_I_matrix, best_I_names
def _align_header_to_annot_I(
MATRIX, names, header, hash, is_row, reorder_MATRIX):
import jmath
import hashlib
# Not needed.
#num = MATRIX.nrow()
#if not is_row:
# num = MATIRX.ncol()
#if not num:
# return [], []
get_names = MATRIX.row_names
if not is_row:
get_names = MATRIX.col_names
matrix_names = get_names(header)
h_names = names
h_matrix_names = matrix_names
if hash:
#h_names = [hashlib.hash_R(x) for x in names]
#h_matrix_names = [hashlib.hash_R(x) for x in matrix_names]
h_names = hashlib.hash_R_many(names)
h_matrix_names = hashlib.hash_R_many(matrix_names)
if reorder_MATRIX:
# Align MATRIX to names.
I_MATRIX = jmath.match(h_names, h_matrix_names)
I_names = []
for i in range(len(I_MATRIX)):
if I_MATRIX[i] is not None:
I_names.append(i)
I_MATRIX = [x for x in I_MATRIX if x is not None]
else:
# Align names to MATRIX.
I_names = jmath.match(h_matrix_names, h_names)
I_MATRIX = []
for i in range(len(I_names)):
if I_names[i] is not None:
I_MATRIX.append(i)
I_names = [x for x in I_names if x is not None]
assert len(I_MATRIX) == len(I_names)
return I_MATRIX, I_names
## def _match_rownames_to_geneset(MATRIX, all_genesets, geneset2genes):
## # Return tuple of (I_matrix, I_geneset) or None if no match can be
## # found. Will find the largest match possible.
## # Align every geneset to every row name in the matrix.
## geneset_aligns = [] # list of (I_geneset, rowname, geneset)
## matrix_aligns = [] # list of (I_matrix, rowname, geneset)
## for name in MATRIX.row_names():
## annots = MATRIX.row_names(name)
## for gs in all_genesets:
## genes = geneset2genes[gs]
## I_geneset = _align_geneset_to_matrix(annots, genes)
## I_matrix = _align_matrix_to_geneset(annots, genes)
## if I_geneset:
## x = I_geneset, name, gs
## geneset_aligns.append(x)
## if I_matrix:
## x = I_matrix, name, gs
## matrix_aligns.append(x)
## # First, try to find a geneset that matches the exactly matrix.
## # Favor geneset_aligns over matrix_aligns to avoid changing the
## # matrix.
## for x in geneset_aligns:
## I_geneset, rowname, geneset = x
## I_matrix = range(MATRIX.nrow())
## assert len(I_matrix) == len(I_geneset)
## return I_matrix, I_geneset
## # Otherwise, choose the match that generates the largest matrix.
## I_matrix = None
## for x in matrix_aligns:
## I, rowname, geneset = x
## if I_matrix is None or len(I) > len(I_matrix):
## I_matrix = I
## if I_matrix:
## I_geneset = range(len(I_matrix))
## return I_matrix, I_geneset
## return None
def find_row_header(MATRIX, row_names, hash=False):
# Find the column (row header) from the MATRIX that contains all
# the row_names. If multiple headers match, then just return the
# first one. If none match, return None.
x = find_best_row_header(MATRIX, row_names, hash=hash)
header, num_matches, found, missing = x
if num_matches == len(row_names):
return header
return None
def find_col_header(MATRIX, col_names, hash=False):
x = best_col_header(MATRIX, col_names, hash=hash)
header, num_matches, found, missing = x
if num_matches == len(col_names):
return header
return None
def _find_best_header(MATRIX, names, hash, is_row):
import hashlib
get_names = MATRIX.row_names
if not is_row:
get_names = MATRIX.col_names
h_names = names
if hash:
# Hash the names for comparison.
#h_names = [hashlib.hash_R(x) for x in names]
h_names = hashlib.hash_R_many(names)
# Count the number of names that are found in each header.
header2found = {} # header -> list of names found
header2missing = {} # header -> list of names not found
for header in get_names():
matrix_names = get_names(header)
h_matrix_names = matrix_names
if hash:
h_matrix_names = [hashlib.hash_R(x) for x in matrix_names]
h_matrix_names = {}.fromkeys(h_matrix_names)
x1 = [x for x in h_names if x in h_matrix_names]
x2 = [x for x in h_names if x not in h_matrix_names]
header2found[header] = x1
header2missing[header] = x2
# Find the best match.
best_header = best_found = best_num_missing = missing = None
for header in get_names():
found = header2found[header]
missing = header2missing[header]
num_missing = len(missing)
if best_header is None or num_missing < best_num_missing:
best_header = header
best_found = found
best_num_missing = num_missing
best_missing = missing
return best_header, len(names)-best_num_missing, best_found, best_missing
def find_best_row_header(MATRIX, row_names, hash=False):
# Find the column (row header) from the MATRIX that contains the
# most row_names. If multiple headers match the same number, then
# just return the first one. Return a tuple of (header,
# num_matches, list of matches, list of mismatches).
return _find_best_header(MATRIX, row_names, hash, True)
def find_best_col_header(MATRIX, col_names, hash=False):
return _find_best_header(MATRIX, col_names, hash, False)
def read_matrices(filenames, cache=None):
"""Read a list of matrices and align them. filenames is a list of
the matrix files to read. Returns a tuple where the first element
is a list of the matrices read, and the second is the aligned
matrix.
cache is an optional dictionary of filename to matrix. This can
be used to prevent re-reading of matrices.
"""
import copy
import arrayio
import filelib
for filename in filenames:
assert filelib.exists(filename), "File not found: %s" % filename
# Load the files.
DATA = []
for filename in filenames:
if cache is not None and filename in cache:
x = copy.deepcopy(cache[filename])
else:
try:
x = arrayio.read(filename)
except (SystemError, KeyboardInterrupt, MemoryError), x:
raise
except Exception, x:
# Can diagnose which file failed here.
# raise
raise Exception, "Problem reading %s: %s" % (
repr(filename), str(x))
if cache is not None:
cache[filename] = x
DATA.append(x)
#for d, filename in zip(DATA, filenames):
# f = os.path.split(filename)[1]
# print "%s has %d genes and %d samples." % (f, d.nrow(), d.ncol())
# Align the matrices.
ALIGNED = align_rows(*DATA)
#if DATA:
# print "Merged file has %d genes." % DATA[0].nrow()
#sys.stdout.flush()
return DATA, ALIGNED
def merge_matrices(*matrices):
import arrayio
import Matrix
assert len(matrices)
assert are_rows_aligned(*matrices)
# Get the values in list of lists format.
X = [x.value() for x in matrices]
X_all = []
for i in range(len(X[0])): # each row
x = []
for j in range(len(X)): # each matrix
x.extend(X[j][i])
X_all.append(x)
# The sample names may not be unique.
matrix0 = matrices[0]
row_order = []
col_order = []
row_names = {}
col_names = {}
synonyms = {}
# Use the annotations from the first matrix.
for name in matrix0.row_names():
row_names[name] = matrix0.row_names(name)
row_order = matrix0._row_order
# Add any annotations that do not occur in the first matrix.
for m in matrices[1:]:
for name in m.row_names():
if name in row_names:
continue
row_names[name] = m.row_names(name)
row_order.append(name)
# NotImplemented: copy over the column annotations.
# Copy over the sample names.
col_order = [matrix0._col_order[0]]
x = []
for m in matrices:
x.extend(m.col_names(arrayio.COL_ID))
col_names[col_order[0]] = x
synonyms[arrayio.ROW_ID] = matrix0._synonyms[arrayio.ROW_ID]
synonyms[arrayio.COL_ID] = col_order[0]
DATA = Matrix.InMemoryMatrix(
X_all, row_names=row_names, col_names=col_names,
row_order=row_order, col_order=col_order, synonyms=synonyms)
#DATA = Matrix.add_synonyms(DATA, synonyms)
return DATA
def merge_gct_matrices(*matrices):
import arrayio
import Matrix
assert len(matrices)
assert are_rows_aligned(*matrices)
for m in matrices:
assert arrayio.gct_format.is_matrix(m)
# Get the values in list of lists format.
X = [x.value() for x in matrices]
X_all = []
for i in range(len(X[0])): # each row
x = []
for j in range(len(X)): # each matrix
x.extend(X[j][i])
X_all.append(x)
# The sample names may not be unique.
row_order = ["NAME", "DESCRIPTION"]
col_order = []
row_names = {}
col_names = {}
synonyms = {}
# Should be in GCT format.
matrix0 = matrices[0]
assert len(matrix0.row_names()) == 2, "Invalid file format."
# Just assume the first column is the NAME and second is the
# DESCRIPTION. Allow more flexibility in the actual column
# headers.
name, description = matrix0.row_names()
row_names["NAME"] = matrix0.row_names(name)
row_names["DESCRIPTION"] = matrix0.row_names(description)
for m in matrices:
assert len(m.col_names()) == 1
col_order = matrix0.col_names()
assert len(col_order) == 1
x = []
for m in matrices:
x.extend(m.col_names(m.col_names()[0]))
col_names[col_order[0]] = x
synonyms[arrayio.ROW_ID] = "NAME"
synonyms[arrayio.COL_ID] = col_order[0]
DATA = Matrix.InMemoryMatrix(
X_all, row_names=row_names, col_names=col_names,
row_order=row_order, col_order=col_order, synonyms=synonyms)
#DATA = Matrix.add_synonyms(DATA, synonyms)
assert arrayio.gct_format.is_matrix(DATA)
return DATA
```
#### File: changlab/genomicode/mplgraph.py
```python
import numpy
import pylab
from matplotlib import axis
import matplotlib.pyplot as plt
def boxplot(*args,**keywds):
"""Return the pylab figure object.
data List of list for the data.
xlabel what label to put on x axis.
ylabel what label to put on y axis.
title what label to put on title.
box_label what to put in xticklabel.
box_color color for the boxes.<"black','red'>,etc.
whisker_color color for the whisker.<"black','red'>,etc.
tick_size what font size of the tick label.
left the left margin distance
right the right margin distance
top the top margin distance
bottom the bottom margin distance
"""
assert len(args) == 1, "Specify data"
data, = args
data=[numpy.array(i) for i in data]
xlabel = keywds.get("xlabel",None)
ylabel = keywds.get("ylabel",None)
title = keywds.get("title",None)
box_label = keywds.get("box_label",None)
tick_size = keywds.get("tick_size",10)
left = keywds.get("left",0.075)
right = keywds.get("right",0.95)
top = keywds.get("top",0.9)
bottom = keywds.get("bottom",0.25)
box_color = keywds.get("box_color",'blue')
whisker_color = keywds.get("whisker_color",'black')
assert data,'No data provided for the box plot.'
#check the inputs
if box_label:
assert len(box_label)==len(data)
fig=pylab.figure()
bp = pylab.boxplot(data, sym='o', vert=1, whis=1.5)
if xlabel:
pylab.xlabel(xlabel)
if ylabel:
pylab.ylabel(ylabel)
if title:
pylab.title(title)
if box_label:
name = box_label
if len(data)<=12:
label = box_label
else:
label = ['']*len(data)
index = [int(round(len(data)/12.0*i)) for i in range(12)]
for i in range(12):
label[index[i]] = box_label[index[i]]
ax = pylab.gca()
ax.set_xticks(range(1,len(data)+1))
ax.set_xticklabels(tuple(label),rotation='vertical',fontsize=tick_size)
pylab.setp(bp['boxes'], color=box_color)
pylab.setp(bp['whiskers'], color=whisker_color)
pylab.subplots_adjust(left=left, right=right, top=top, bottom=bottom)
return fig
##def scatter(*args,**keywds):
## """Return the figure object.
## x1,x2 List of list for the two dimension data.
## xlabel what label to put on x axis.
## ylabel what label to put on y axis.
## title what label to put on title.
## left the left margin distance
## right the right margin distance
## top the top margin distance
## bottom the bottom margin distance
## label a list of label for each point
## color list of color for each point or a single color for all the points
## legend list of text for legend
## """
## assert len(args) == 2, "Input data should be two dimension"
## [x1, x2] = args
## xlabel = keywds.get("xlabel",None)
## ylabel = keywds.get("ylabel",None)
## title = keywds.get("title",None)
## labels = keywds.get('label',None)
## legend = keywds.get('legend',None)
## color = keywds.get('color','b')
## left = keywds.get("left",0.17)
## right = keywds.get("right",0.88)
## top = keywds.get("top",0.86)
## bottom = keywds.get("bottom",0.13)
## assert [x1,x2],'No data provided for the box plot.'
## #check the inputs
## if labels:
## assert len(labels)==len(x1)
## fig=pylab.figure()
## pylab.scatter(x1,x2,marker = 'o',s=50,c=color)
## if xlabel:
## pylab.xlabel(xlabel)
## if ylabel:
## pylab.ylabel(ylabel)
## if title:
## pylab.title(title)
## pylab.subplots_adjust(left=left, right=right, top=top, bottom=bottom)
## if labels:
## txt_height = 0.08*(pylab.ylim()[1] - pylab.ylim()[0])
## txt_width = 0.04*(pylab.xlim()[1] - pylab.xlim()[0])
## text_positions = get_text_positions(x1, x2, txt_width, txt_height)
## for label, x, y,t in zip(labels, x1, x2,text_positions):
## pylab.annotate(
## label,
## xy = (x, y),xytext = (x-txt_width, t*1.05),
## textcoords = 'data',size=6,
## #bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
## arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
## return fig
def scatter(*args,**keywds):
"""Return the figure object.
x1,x2 List of list for the two dimension data.
xlabel what label to put on x axis.
ylabel what label to put on y axis.
title what label to put on title.
left the left margin distance
right the right margin distance
top the top margin distance
bottom the bottom margin distance
label a list of label for each point
color list of color for each point or a single color for all the points
legend list of text for legend for each point
"""
assert len(args) == 2, "Input data should be two dimension"
[x1, x2] = args
xlabel = keywds.get("xlabel",None)
ylabel = keywds.get("ylabel",None)
title = keywds.get("title",None)
labels = keywds.get('label',None)
legend = keywds.get('legend',None)
color = keywds.get('color','b')
left = keywds.get("left",0.17)
right = keywds.get("right",0.88)
top = keywds.get("top",0.86)
bottom = keywds.get("bottom",0.13)
assert [x1,x2],'No data provided for the box plot.'
#check the inputs
if labels:
assert len(labels)==len(x1)
if legend:
assert len(legend)==len(x1)
if len(color)>1:
assert len(color)==len(x1)
fig=pylab.figure()
if len(color)>1 and legend:
old_legend = numpy.array(legend)
legend = reduce(lambda x, y: x if y in x else x + [y], legend, [])
color = reduce(lambda x, y: x if y in x else x + [y], color, [])
for l,t in zip(legend,color):
pylab.scatter(numpy.array(x1)[old_legend==l],numpy.array(x2)[old_legend==l],label=l,marker = 'o',s=50,c=t)
pylab.legend()
elif not legend:
pylab.scatter(x1,x2,marker = 'o',s=50,c=color)
if xlabel:
pylab.xlabel(xlabel)
if ylabel:
pylab.ylabel(ylabel)
if title:
pylab.title(title)
pylab.subplots_adjust(left=left, right=right, top=top, bottom=bottom)
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
if labels:
txt_height = 0.08*(pylab.ylim()[1] - pylab.ylim()[0])
txt_width = 0.04*(pylab.xlim()[1] - pylab.xlim()[0])
text_positions = get_text_positions(x1, x2, txt_width, txt_height)
for label, x, y,t in zip(labels, x1, x2,text_positions):
pylab.annotate(
label,
xy = (x, y),xytext = (x-txt_width, t*1.05),
textcoords = 'data',size=6,
#bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
return fig
def get_text_positions(x_data, y_data, txt_width, txt_height):
a = zip(y_data, x_data)
text_positions = y_data[:]
for index, (y, x) in enumerate(a):
local_text_positions = [i for i in a if i[0] > (y - txt_height)
and (abs(i[1] - x) < txt_width * 2) and i != (y,x)]
if local_text_positions:
sorted_ltp = sorted(local_text_positions)
if abs(sorted_ltp[0][0] - y) < txt_height: #True == collision
differ = numpy.diff(sorted_ltp, axis=0)
a[index] = (sorted_ltp[-1][0] + txt_height, a[index][1])
text_positions[index] = sorted_ltp[-1][0] + txt_height
for k, (j, m) in enumerate(differ):
#j is the vertical distance between words
if j > txt_height * 2: #if True then room to fit a word in
a[index] = (sorted_ltp[k][0] + txt_height, a[index][1])
text_positions[index] = sorted_ltp[k][0] + txt_height
break
return text_positions
def lineplot(*args,**keywds):
"""Return the pylab figure object.
line1 List of the (x, y) coordinates of the points.
line2 ...
xlabel what label to put on x axis.
ylabel what label to put on y axis.
title what label to put on title.
box_label what to put in xticklabel.
tick_size what font size of the tick label.
left the left margin distance
right the right margin distance
top the top margin distance
bottom the bottom margin distance
legend list of legend, 1 for each line
color List of colors, 1 for each line
"""
assert args, "No lines given."
lines = args
for line in lines:
for x in line:
assert len(x) in [2, 3], str(x)
X=[[x[0] for x in line] for line in lines]
Y=[[x[1] for x in line] for line in lines]
xlabel = keywds.get("xlabel",None)
ylabel = keywds.get("ylabel",None)
title = keywds.get("title",None)
box_label = keywds.get("box_label",None)
tick_size = keywds.get("tick_size",10)
left = keywds.get("left",0.075)
right = keywds.get("right",0.95)
top = keywds.get("top",0.9)
bottom = keywds.get("bottom",0.25)
color = keywds.get("color",'b')
legend = keywds.get("legend",None)
ylim_min = keywds.get("ylim_min",None)
assert lines,'No data provided for the line plot.'
#check the inputs
if box_label:
assert len(box_label)==len(lines[0])
fig=pylab.figure()
if isinstance(color,list):
for x,y,c in zip(X,Y,color):
bp = pylab.plot(x,y,color=c)
else:
for x,y in zip(X,Y):
bp = pylab.plot(x,y)
if xlabel:
pylab.xlabel(xlabel)
if ylabel:
pylab.ylabel(ylabel)
if title:
pylab.title(title)
if box_label:
if len(lines[0])<=12:
label = box_label
else:
label = ['']*len(lines[0])
index = [int(round(len(lines[0])/12.0*i)) for i in range(12)]
for i in range(12):
label[index[i]] = box_label[index[i]]
ax = pylab.gca()
ax.set_xticks(x)
ax.set_xticklabels(tuple(label),rotation='vertical',fontsize=tick_size)
if legend:
pylab.legend(legend, 'best', shadow=True, fancybox=True)
if ylim_min is not None:
b=max(max(Y))
pylab.ylim((ylim_min,b+1))
pylab.subplots_adjust(left=left, right=right, top=top, bottom=bottom)
return fig
def barplot(*args,**keywds):
"""Return the pylab figure object.
mean List of means for the data.
std List of standard variation.
xlabel what label to put on x axis.
ylabel what label to put on y axis.
title what label to put on title.
box_label what to put in xticklabel.
tick_size what font size of the tick label.
left the left margin distance.
right the right margin distance.
top the top margin distance.
bottom the bottom margin distance.
xtick_rotation rotation the box_label in vertical.
ylim (min,max) of the limit of y_axis.
ytick_pos List of position to put yticks.
yticks List of ticks to put in y_axis.
"""
assert len(args) >=1, "Specify data"
if len(args)==1:
mean, = args
std = [0]*len(mean)
elif len(args)==2:
mean,std = args
xlabel = keywds.get("xlabel",None)
ylabel = keywds.get("ylabel",None)
title = keywds.get("title",None)
box_label = keywds.get("box_label",None)
tick_size = keywds.get("tick_size",10)
left = keywds.get("left",0.085)
right = keywds.get("right",0.95)
top = keywds.get("top",0.9)
bottom = keywds.get("bottom",0.35)
xtick_rotation = keywds.get('xtick_rotation',None)
assert mean,'No data provided for the bar plot.'
#check the inputs
if box_label:
assert len(box_label)==len(mean)
ind = numpy.arange(len(mean))
width = 0.35
fig = pylab.figure()
bp = pylab.bar(ind, mean, width, yerr=std, color='y', align="center")
if xlabel:
pylab.xlabel(xlabel)
if ylabel:
pylab.ylabel(ylabel)
if title:
pylab.title(title)
if box_label:
if len(mean)<=12:
label = box_label
else:
label = ['']*len(mean)
index = [int(round(len(mean)/12.0*i)) for i in range(12)]
for i in range(12):
label[index[i]] = box_label[index[i]]
#pylab.xticks(ind+width/2.,label,rotation=xtick_rotation)
pylab.xticks(ind, label, rotation=xtick_rotation)
if keywds.get('ylim'):
pylab.ylim(keywds.get('ylim'))
if keywds.get('ytick_pos') and keywds.get('yticks'):
pylab.yticks(keywds.get('ytick_pos'),keywds.get('yticks'))
pylab.subplots_adjust(left=left, right=right, top=top, bottom=bottom)
return fig
```
#### File: changlab/genomicode/quantnorm.py
```python
import os
def normalize(X, which_columns=None):
# X is a Matrix of the data. which_columns is a list of the
# columns used to calculate the quantiles. If None, then will use
# every column. Returns the normalized version of the matrix.
import jmath
# Make a list of the quantiles from smallest to largest.
if which_columns is None:
which_columns = range(X.ncol())
X_quant = X[(None, which_columns)]
quantiles = sorted(jmath.median(X_quant))
X_norm = [[None]*X.ncol() for i in range(X.nrow())]
for i in range(X.ncol()):
# Normalize each column.
col = X[(None, i)]
O = jmath.order(col)
for o, v in zip(O, quantiles):
X_norm[o][i] = v
Y = X.matrix()
Y._X = X_norm
return Y
def normalize_binreg(X, which_columns=None, matlab=None, binreg_path=None):
# X is a Matrix of the data. which_columns is a list of the
# columns used to calculate the quantiles. If None, then will use
# every column. Returns the normalized version of the matrix.
#import copy
import subprocess
import tempfile
import jmath
import Matrix
import binreg
from dwdnorm import _write_matlab_matrix, _parse_normalized_matrix
from dwdnorm import _safe_unlink
# Make a list of the quantiles from smallest to largest.
if which_columns is None:
which_columns = range(X.ncol())
X_quant = X[(None, which_columns)]
m = jmath.median(X_quant)
m = Matrix.InMemoryMatrix([m])
# Set defaults.
matlab = matlab or "matlab"
binreg_path = binreg.find_binreg_20(binreg_path)
assert binreg_path, "I could not find Binreg2.0"
binreg_path = os.path.realpath(binreg_path)
temp_path = "."
# Start an instance of matlab.
matlab_args = [
"-nosplash", "-nodesktop", "-nodisplay", "-nojvm"]
x = " ".join(matlab_args)
cmd = "%s %s" % (matlab, x)
#print cmd
#w, r = os.popen4(cmd, bufsize=0)
p = subprocess.Popen(
cmd, shell=True, bufsize=0, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
w, r = p.stdin, p.stdout
# Run the normalization.
X_file = m_file = None
try:
x, X_file = tempfile.mkstemp(dir=temp_path); os.close(x)
x, m_file = tempfile.mkstemp(dir=temp_path); os.close(x)
_write_matlab_matrix(X, X_file)
_write_matlab_matrix(m, m_file)
script = _format_exec_file(X_file, m_file, binreg_path)
w.write(script)
w.close()
lines = r.readlines()
finally:
_safe_unlink(X_file)
_safe_unlink(m_file)
# Debug: print out the matrix returned by Matlab.
#for l in lines:
# print l,
X_norm = _parse_normalized_matrix(lines)
assert len(X_norm) == X.nrow()
if X_norm:
assert len(X_norm[0]) == X.ncol()
Y = X.matrix()
Y._X = X_norm
return Y
def _format_exec_file(X_file, m_file, binreg_path):
from StringIO import StringIO
handle = StringIO()
w = handle.write
w("X = load('%s');\n" % X_file)
w("m = load('%s');\n" % m_file)
w("m = m(1,:);\n") # convert m from matrix to vector.
w("\n");
w("curpath = cd;\n")
w("cd('%s');\n" % binreg_path)
w("Xout = quantnorm(X, m);\n")
w("cd(curpath);\n")
w("\n")
w("disp('NORMALIZED MATRIX')\n")
w("disp(num2str(Xout, 16))\n")
w("quit;\n")
handle.seek(0)
return handle.read()
```
#### File: changlab/genomicode/selap.py
```python
import os
def selap_make_raw(
matrix_file, penalty, matlab_bin=None, selap_path=None, outpath=None):
# matrix_file should contain a matrix (no headers) of samples x
# pathway predictions.
import matlab
# Set defaults.
matrix_file = os.path.realpath(matrix_file)
assert os.path.exists(matrix_file)
selap_path = find_selap(selap_path)
assert selap_path is not None, "I could not find SELAP."
selap_path = os.path.realpath(selap_path)
outpath = outpath or "."
lines = []
w = lines.append
w("addpath '%s/Utilities';\n" % selap_path)
w("X = load('%s');\n" % matrix_file)
w("X = logit(X);\n")
w("[mu sig prob] = selapMixMap(X, %s);\n" % penalty)
#w("predictions = mvnMixtureProb(X', mu, sig, prob);\n")
w("save('mu.txt', 'mu', '-ASCII', '-TABS');\n")
w("save('sig.txt', 'sig', '-ASCII', '-TABS');\n")
w("save('prob.txt', 'prob', '-ASCII', '-TABS');\n")
#w("save('predict.txt', 'predictions', '-ASCII', '-TABS');\n")
script = "".join(lines)
x = matlab.run(script, matlab_bin=matlab_bin, working_path=outpath)
return x
def selap_predict_raw(
matrix_file, mu_file, sig_file, prob_file, matlab_bin=None,
selap_path=None, outpath=None):
# matrix_file should contain a matrix (no headers) of samples x
# pathway predictions.
import matlab
# Set defaults.
matrix_file = os.path.realpath(matrix_file)
mu_file = os.path.realpath(mu_file)
sig_file = os.path.realpath(sig_file)
prob_file = os.path.realpath(prob_file)
assert os.path.exists(matrix_file)
assert os.path.exists(mu_file)
assert os.path.exists(sig_file)
assert os.path.exists(prob_file)
selap_path = find_selap(selap_path)
assert selap_path is not None, "I could not find SELAP."
selap_path = os.path.realpath(selap_path)
outpath = outpath or "."
lines = []
w = lines.append
w("addpath '%s/Utilities';\n" % selap_path)
w("X = load('%s');\n" % matrix_file)
w("X = logit(X);\n")
w("mu = load('%s');\n" % mu_file)
w("sig = load('%s');\n" % sig_file)
w("prob = load('%s');\n" % prob_file)
w("predictions = mvnMixtureProb(X', mu, sig, prob);\n")
w("save('predict.txt', 'predictions', '-ASCII', '-TABS');\n")
script = "".join(lines)
x = matlab.run(script, matlab_bin=matlab_bin, working_path=outpath)
return x
def find_selap(default_path):
import config
search_paths = [
default_path,
config.selap_path,
]
path = None
for spath in search_paths:
assert path is None
if spath is None or not os.path.exists(spath):
continue
# Test for some BFRM files.
files = [
"runme.txt", "subanalysis.m", "Utilities",
]
complete = True # Is this distribution complete.
for file_ in files:
filename = os.path.join(spath, file_)
if not os.path.exists(filename):
complete = False
# Don't break here, so we can diagnose missing files.
#break
if not complete:
continue
path = spath
break
return path
```
#### File: changlab/scripts/analyze_clinical_outcome.py
```python
def parse_genes(genes):
# genes is a list of comma-separated gene names or IDs.
# e.g. ["E2F1,E2F3", "MYC"]
# Return (potentially empty) list of genes.
clean = []
for x in genes:
clean.extend(x.split(','))
return clean
def parse_genes_from_geneset(genes_from_geneset):
# genes_from_geneset is a list of genesets.
from genomicode import genesetlib
if not genes_from_geneset:
return []
all_genes = []
for file_geneset in genes_from_geneset:
filename, genesets = _parse_file_gs(file_geneset)
keywds = {"allow_tdf": True}
genes = genesetlib.read_genes(filename, *genesets, **keywds)
all_genes.extend(genes)
return all_genes
def _parse_file_gs(geneset):
# Parse a geneset specified by the user. geneset is in the format
# of #[,<geneset>,<geneset>,...]. Return a tuple of
# #, list of <geneset> (or empty list).
# XXX what happens if this is an empty list?
x = geneset.split(",")
assert len(x) >= 1
filename, genesets = x[0], x[1:]
return filename, genesets
def parse_gene_sets(geneset):
# geneset is a list of gene sets. Return it unchanged.
return geneset
def list_all_genes(filename):
import arrayio
M = read_gene_expression(filename)
genes = M.row_names(arrayio.ROW_ID)
# Make sure genes are unique.
x = sorted({}.fromkeys(genes))
assert len(genes) == len(x), "Not all genes are unique."
return genes
def list_all_gene_sets(filename):
M = read_geneset_scores(filename)
gene_sets = M.row_names("geneset")
return gene_sets
def parse_rank_cutoffs(cutoffs):
# Comma-separated list of breakpoints, e.g. 0.25,0.50. Return
# list, e.g. [0.25, 0.50], that:
# - is sorted
# - has no duplicates
# - all numbers > 0 and < 1 (no 0 or 1)
assert type(cutoffs) is type("")
cutoffs = cutoffs.split(',')
cutoffs = [float(x) for x in cutoffs]
cutoffs.sort()
# Make sure all numbers between 0 and 1.
for x in cutoffs:
assert x >= 0 and x <= 1.0, "cutoff should be between 0 and 1: %s" % x
assert cutoffs, "no cutoffs"
# Remove duplicates and remove any numbers too close to 0 and 1.
DELTA = 0.001
i = 1
while i < len(cutoffs):
if cutoffs[i]-cutoffs[i-1] < DELTA:
del cutoffs[i]
else:
i += 1
assert cutoffs, "invalid cutoffs"
if cutoffs[0] < DELTA:
del cutoffs[0]
assert cutoffs, "invalid cutoffs"
if cutoffs[-1] > 1.0-DELTA:
del cutoffs[-1]
assert cutoffs, "invalid cutoffs"
return cutoffs
def parse_zscore_cutoffs(cutoffs):
# Comma-separated list of breakpoints, e.g. -1,1, n1,1. Since
# it's hard to pass negatives on the command line, replace "n"
# with "-" here. Return list, # e.g. [-1, 1], that:
# - is sorted
# - has no duplicates
assert type(cutoffs) is type("")
x = cutoffs
x = x.replace("n", "-")
x = x.split(',')
x = [float(x) for x in x]
cutoffs = sorted(x)
# Remove duplicates.
DELTA = 0.0001
i = 1
while i < len(cutoffs):
if cutoffs[i]-cutoffs[i-1] < DELTA:
del cutoffs[i]
else:
i += 1
assert cutoffs, "invalid cutoffs"
return cutoffs
def parse_outcomes(outcomes):
# List of <time_header>,<dead_header>. Return a list of tuples:
# (<time_header>, <dead_header>).
clean = []
for x in outcomes:
x = [x.strip() for x in x.split(",")]
assert len(x) == 2
clean.append(x)
# Make sure there are no duplicate outcomes.
seen = {}
for (x1, x2) in clean:
assert x1 not in seen, "duplicate outcome: %s" % x1
seen[x1] = 1
return clean
def parse_filestem(filestem):
# Return an empty string, or a filestem without a '.' at the end.
if filestem is None:
filestem = ""
if filestem.endswith("."):
filestem = filestem[:-1]
#elif not filestem.endswith("."):
# filestem += "."
return filestem
def read_gene_expression(filename):
import os
import arrayio
assert os.path.exists(filename)
M = arrayio.read(filename)
return M
def read_geneset_scores(filename):
# Read the output from score_geneset.py and return a Matrix
# object.
import os
from genomicode import jmath
from genomicode import filelib
from genomicode import Matrix
from arrayio import const
from arrayio import tab_delimited_format as tdf
assert os.path.exists(filename)
matrix = [x for x in filelib.read_cols(filename)]
matrix = jmath.transpose(matrix)
# Only want the scores. Get rid of the direction, pvalue, and
# significance lines.
# Columns:
# SAMPLE
# FILE
# [Score ...]
# [Direction ...] " direction"
# [p value ...] " pvalue"
# [significant ...] " significant"
assert matrix
i = 0
while i < len(matrix):
assert matrix[i]
metadata = False
if matrix[i][0].endswith(" direction"):
metadata = True
elif matrix[i][0].endswith(" pvalue"):
metadata = True
elif matrix[i][0].endswith(" significant"):
metadata = True
if not metadata:
i += 1
continue
del matrix[i]
# BUG: Need more checks on size and format of matrix.
col_names = {}
sample_row = 0
if matrix[1][0].upper() == "SAMPLE":
sample_row = 1
col_names[tdf.SAMPLE_NAME] = matrix[sample_row][1:]
row_names = {}
row_names['geneset'] = []
synonyms = {}
synonyms[const.COL_ID] = tdf.SAMPLE_NAME
data = []
for line in matrix[2:]:
single_data = [jmath.safe_float(i) for i in line[1:]]
data.append(single_data)
row_names['geneset'].append(line[0])
M = Matrix.InMemoryMatrix(
data, row_names=row_names, col_names=col_names, synonyms=synonyms)
return M
def read_expression_or_geneset_scores(genes, all_genes, gene_sets, filename):
assert genes or all_genes or gene_sets
assert not (genes and all_genes)
has_genes = genes or all_genes
assert not (has_genes and gene_sets)
if genes or all_genes:
# If genes were specified, then the input should be a gene
# expression data file.
M = read_gene_expression(filename)
else:
M = read_geneset_scores(filename)
return M
def read_clinical_annotations(M, filename):
# Return a tuple of (Matrix, clinical annotations). The
# annotations are a dictionary of name -> list of values. They
# are aligned with the matrix.
from genomicode import genesetlib
clinical_annots = {}
for x in genesetlib.read_tdf(
filename, preserve_spaces=True, allow_duplicates=True):
name, description, values = x
clinical_annots[name] = values
# Align the gene scores with the clinical annotations.
x = align_matrix_with_clinical_data(M, clinical_annots)
M, clinical_annots = x
return M, clinical_annots
def align_matrix_with_clinical_data(M, clinical_dict):
from genomicode import matrixlib
assert clinical_dict, "No clinical data."
sample_name = M._col_names['_SAMPLE_NAME']
# Figure out the header in the clinical data that contains these
# samples.
name2count = {}
for name, values in clinical_dict.iteritems():
count = len(set(sample_name).intersection(values))
name2count[name] = count
best_name = best_count = None
for name, count in name2count.iteritems():
if best_count is None or count > best_count:
best_name, best_count = name, count
assert best_count > 0, \
"I could not align the matrix with the clinical data."
clinical_name = clinical_dict[best_name]
# From the clinical data, only keep the samples that occur in the
# matrix.
I = [i for (i, name) in enumerate(clinical_name) if name in sample_name]
clinical_clean = {}
for name, values in clinical_dict.iteritems():
values_clean = [values[i] for i in I]
clinical_clean[name] = values_clean
clinical_dict = clinical_clean
# Realign the matrix to match the clinical data.
x = matrixlib.align_cols_to_annot(
M, clinical_dict[best_name], reorder_MATRIX=True)
M, colnames = x
assert colnames == clinical_dict[best_name]
return M, clinical_dict
def discretize_scores(
scores, rank_cutoffs, zscore_cutoffs, expression_or_score):
# Discretize into groups based on rank or zscore cutoffs. Return
# tuple of (group_names, groups). group_names is a list of the
# names of each group. groups is a list of integers from 0 to
# len(group_names)-1, parallel to scores.
if rank_cutoffs:
groups = discretize_by_value(scores, rank_cutoffs)
# groups should range from [0, len(rank_cutoffs)+1]. So if there's
# one cutoff (e.g. 0.50), groups will be 0 and 1.
for x in groups:
assert x >= 0 and x <= len(rank_cutoffs)+1
group_names = make_group_names(rank_cutoffs, expression_or_score)
elif zscore_cutoffs:
groups = discretize_by_zscore(scores, zscore_cutoffs)
group_names = make_zscore_names(zscore_cutoffs, expression_or_score)
else:
raise AssertionError
return group_names, groups
def describe_gene(MATRIX, gene_i):
# Return tuple of (probe_id, gene_id, gene_symbol, gene_index).
# If any of these cannot be found, then will be None. gene_index
# is just based on the index of the gene.
from genomicode import arrayplatformlib as apl
probe_id = gene_id = gene_symbol = None
cat2header = apl.categorize_headers(MATRIX)
# probe_id
header = cat2header.get(apl.PROBE_ID)
if header is not None:
probe_id = MATRIX.row_names(header)[gene_i]
# gene_symbol
header = cat2header.get(apl.GENE_SYMBOL)
if header is not None:
gene_symbol = MATRIX.row_names(header)[gene_i]
# gene_id
header = cat2header.get(apl.GENE_ID)
if header is not None:
gene_id = MATRIX.row_names(header)[gene_i]
gene_index = "Gene %04d" % gene_i
return probe_id, gene_id, gene_symbol, gene_index
def find_best_groups(scores, survival, dead):
assert len(scores) == len(survival)
assert len(scores) == len(dead)
scores_o = sorted({}.fromkeys(scores))
best_p = best_cutoff = None
for i in range(2, len(scores_o)-2):
# Keep only values with score >= scores_o[i].
groups = [0] * len(scores)
for j in range(len(scores)):
if scores[j] >= scores_o[i]:
groups[j] = 1
uniq_groups = sorted({}.fromkeys(groups))
if len(uniq_groups) < 2:
continue
surv = calc_km(survival, dead, groups)
if best_p is None or surv["p_value"] < best_p:
best_p = surv["p_value"]
best_cutoff = scores_o[i]
# Print out all possible scores.
rank = float(i) / len(scores_o)
x = rank, surv["p_value"]
print "\t".join(map(str, x))
assert best_p is not None
# Make the groups based on the best cutoff.
groups = [0] * len(scores)
for j in range(len(scores)):
if scores[j] >= best_cutoff:
groups[j] = 1
group_names = ["Low", "High"]
return group_names, groups
def format_gene_name(MATRIX, gene_headers, gene_i):
# Return a string describing this gene that can be used as part of
# a filename.
# Use the gene set name.
if len(MATRIX.row_names()) == 1:
x = MATRIX.row_names()[0]
return MATRIX.row_names(x)[gene_i]
if gene_headers:
x = [MATRIX.row_names(x)[gene_i] for x in gene_headers]
x = "_".join(x)
return x
x = describe_gene(MATRIX, gene_i)
probe_id, gene_id, gene_symbol, gene_index = x
if probe_id and gene_symbol:
return "%s_%s" % (gene_symbol, probe_id)
if probe_id:
return probe_id
if gene_symbol:
return gene_symbol
return gene_index
def pretty_gene_name(MATRIX, gene_headers, gene_i):
# Use the gene set name.
if len(MATRIX.row_names()) == 1:
x = MATRIX.row_names()[0]
return MATRIX.row_names(x)[gene_i]
if gene_headers:
x = [MATRIX.row_names(x)[gene_i] for x in gene_headers]
x = " ".join(x)
return x
x = describe_gene(MATRIX, gene_i)
probe_id, gene_id, gene_symbol, gene_index = x
if probe_id and gene_symbol:
return "%s (%s)" % (gene_symbol, probe_id)
if probe_id:
return probe_id
if gene_symbol:
return gene_symbol
return gene_index
def calc_association(
survival, dead, scores, rank_cutoffs, zscore_cutoffs, best_cutoff,
expression_or_score, ignore_unscored_genesets):
# Return a dictionary with keys:
# survival list of <float>
# dead list of <int>
# scores list of <float>
# groups list of <int> [0, length(group_names)-1]
# group_names list of <string>
# p_value <float>
# num_samples dict of <group> : <int>
# mean_score dict of <group> : <float>
# surv50 dict of <group> : <float> or None
# surv90 dict of <group> : <float> or None
# hi_score_short_surv <boolean> or None (no difference in surv)
# relationship <string>
#
# Can return None if the results can't be calculated, e.g. if
# there are not enough samples, or not enough groups.
from genomicode import jmath
# Select only the samples with both survival, dead, and score
# information.
I1 = [i for (i, x) in enumerate(survival) if x]
I2 = [i for (i, x) in enumerate(dead) if x]
I3 = [i for (i, x) in enumerate(scores) if x]
I = sorted(set.intersection(set(I1), set(I2), set(I3)))
if ignore_unscored_genesets and not I:
return None
assert I, "No valid samples."
survival = [float(survival[i]) for i in I]
dead = [int(float(dead[i])) for i in I] # might be 0.0, 1.0
scores = [scores[i] for i in I]
# GraphPad Prism filters out the 0's. Do the same thing here.
I = [i for (i, x) in enumerate(survival) if x > 0]
survival = [survival[i] for i in I]
dead = [dead[i] for i in I]
scores = [scores[i] for i in I]
# Figure out the groupings.
if best_cutoff:
# Figure out the best way to discretize the scores.
x = find_best_groups(scores, survival, dead)
group_names, groups = x
else:
x = discretize_scores(
scores, rank_cutoffs, zscore_cutoffs, expression_or_score)
group_names, groups = x
# May not have two groups, e.g. if there are no outliers. If this
# happens, then return None.
uniq_groups = sorted({}.fromkeys(groups))
if len(uniq_groups) < 2:
return None
# Calculate the KM model.
surv = calc_km(survival, dead, groups)
# Clean up the surv dictionary. If some groups are missing, some
# of the members will be missing values. Fix this.
for i in range(len(group_names)):
if i not in surv["num_samples"]:
surv["num_samples"][i] = 0
if i not in surv["surv50"]:
surv["surv50"][i] = None
if i not in surv["surv90"]:
surv["surv90"][i] = None
# Add extra data to the survival dictionary.
surv["survival"] = survival
surv["dead"] = dead
surv["scores"] = scores
surv["groups"] = groups
surv["group_names"] = group_names
# Calculate the mean scores for each group. If a group is empty,
# then the mean score is None.
mean_score = {}
for group in range(len(group_names)):
s = [s for (s, g) in zip(scores, groups) if g == group]
m = None
if s:
m = jmath.mean(s)
mean_score[group] = m
surv["mean_score"] = mean_score
# Figure out relationship.
MAX_SURV = 1E10
# Compare the time to 50% survival for the low and high scoring
# groups.
# ASSUMPTION: lowest group has low scores, while highest group has
# high scores.
surv_low = surv["surv50"][min(groups)] # low score
surv_high = surv["surv50"][max(groups)] # high score
# If neither groups drop to 50% survival, compare the time to 90%
# survival.
if surv_low is None and surv_high is None:
surv_low = surv["surv90"][min(groups)]
surv_high = surv["surv90"][max(groups)]
if surv_high is None:
surv_high = MAX_SURV
if surv_low is None:
surv_low = MAX_SURV
assert surv_low <= MAX_SURV and surv_high <= MAX_SURV
hi_score_short_surv = None
if surv_high < surv_low:
hi_score_short_surv = True
elif surv_high > surv_low:
hi_score_short_surv = False
surv["hi_score_short_surv"] = hi_score_short_surv
relationship = ""
if hi_score_short_surv:
relationship = "High %s has shorter time to outcome." % \
expression_or_score.lower()
elif hi_score_short_surv is not None:
relationship = "Low %s has shorter time to outcome." % \
expression_or_score.lower()
surv["relationship"] = relationship
return surv
def calc_km(survival, dead, group):
# Return a dictionary with keys:
# p_value <float>
# num_samples dict of <group> : <int>
# surv50 dict of <group> : <float> or None
# surv90 dict of <group> : <float> or None
#
# group is a list of ints indicating group membership, starting from 0.
from genomicode import jmath
assert len(survival) == len(dead)
assert len(survival) == len(group)
# At least two samples must be dead.
#x0 = [x for x in dead if x == 0]
x1 = [x for x in dead if x == 1]
# It is OK if no samples are alive.
#assert len(x0) >= 2, "At least two samples must have dead=0."
assert len(x1) >= 2, "At least two samples must have dead=1."
R = start_and_init_R()
# Suppress a potential warning message:
# Warning messages:
# 1: In fitter(X, Y, strats, offset, init, control, weights = weights, :
# Loglik converged before variable 1,2 ; beta may be infinite.
# 2: In min(abs(s$surv - 0.9)) :
# no non-missing arguments to min; returning Inf
# 3: In min(abs(s$surv - 0.5)) :
# no non-missing arguments to min; returning Inf
jmath.R_equals(survival, 'survival')
jmath.R_equals(dead, 'dead')
jmath.R_equals(group, 'group')
R('ow <- options("warn")')
R('options(warn=-1)')
R('x <- calc.km.multi(survival, dead, group)')
R('options(ow)')
c = R['x']
p_value = c.rx2('p.value')[0]
#hazard_ratio = list(c.rx2('hr'))
unique_group = [x for x in {}.fromkeys(group)]
num_samples = {}
for x in unique_group:
num_samples[x] = c.rx2('num.samples').rx2(str(x))[0]
surv50 = {}
surv90 = {}
for x in unique_group:
x1 = c.rx2('surv').rx2(str(x)).rx2('surv.50')[0]
x2 = c.rx2('surv').rx2(str(x)).rx2('surv.90')[0]
if str(x1) == "NA":
x1 = None
if str(x2) == "NA":
x2 = None
surv50[x] = x1
surv90[x] = x2
SURV = {
"p_value" : p_value,
"num_samples" : num_samples,
"surv50" : surv50,
"surv90" : surv90,
}
return SURV
def discretize_by_value(values, breakpoints):
# Return a list that specifies the group for each member of
# values. Groups are specified by numbers starting from 0. Group
# 0 are the values lower than the first breakpoint.
from genomicode import jmath
R = start_and_init_R()
jmath.R_equals(values, 'F')
jmath.R_equals(breakpoints, 'cutoffs')
R('group <- group.by.value(F, cutoffs)')
groups = list(R['group'])
assert len(groups) == len(values)
return groups
def discretize_by_zscore(values, cutoffs):
# Return a list that specifies the group for each member of
# values. Groups are specified by numbers starting from 0. Group
# 0 are the values lower than the first cutoff.
from genomicode.jmath import R_equals, R_fn, R_var
z_model = 1.0
R = start_and_init_R()
R_equals(values, 'x')
R_equals(cutoffs, "z.cutoffs")
params = {
"z.model" : z_model,
}
R_fn('score.outliers.regr', R_var("x"), RETVAL="M", **params)
R_fn(
'assign.groups', R_var("x"), R_var("M$z"), R_var("z.cutoffs"),
RETVAL="groups")
groups = list(R['groups'])
return groups
def make_group_names(cutoffs, expression_or_score):
assert len(cutoffs) >= 1
if len(cutoffs) == 1:
# Just low vs high.
# [0, <cutoff>, 1]
x = ["Low %s" % expression_or_score, "High %s" % expression_or_score]
return x
cutoffs = [0] + cutoffs + [1] # for convenience
names = []
for i in range(len(cutoffs)-1):
x1 = "Middle"
if i == 0:
x1 = "Lowest"
elif i == len(cutoffs)-2:
x1 = "Highest"
x2 = "%d%% - %d%%" % ((cutoffs[i])*100, (cutoffs[i+1])*100)
x = "%s %s %s" % (x1, x2, expression_or_score)
names.append(x)
return names
def make_zscore_names(cutoffs, expression_or_score):
assert len(cutoffs) >= 1
names = []
names.append("z < %g" % cutoffs[0])
for i in range(len(cutoffs)-1):
names.append("%g <= z < %g" % (cutoffs[i], cutoffs[i+1]))
names.append("z >= %s" % cutoffs[-1])
return names
def plot_km(
filename, survival, dead, group_indexes, p_value, gene_id, group_names,
mar_bottom, mar_left, mar_top, title, title_size, mar_title,
subtitle_size, mar_subtitle, xlab, ylab, legend_size, x_legend=None,
colors=None):
from genomicode import colorlib
from genomicode.jmath import R_fn, R_var
R = start_and_init_R()
# Set the colors.
assert len(group_names) >= 2
if not colors:
colors = ['#1533AD', '#FFB300']
if len(group_names) > 2:
x = colorlib.bild_colors(len(group_names))
x = [colortuple2hex(*x) for x in x]
colors = x
assert len(colors) == len(group_names)
# R command:
# col <- list("<name>"="<color>", ...)
cmd = []
for i in range(len(group_names)):
x = '"%s"="%s"' % (group_names[i], colors[i])
cmd.append(x)
cmd = "col <- list(%s)" % (", ".join(cmd))
R(cmd)
# Convert mar_title to the line parameter for title. mar_title is
# a scaling factor from 0 to infinity. line=1 is the default, and
# -10 and +10 should indicate the outer bounds. Convert mar_title
# to main.line according to:
# mar_title main.line
# 0-1 -3 to 1
# 1-5 1 to 5
assert mar_title >= 0
if mar_title < 1:
main_line = mar_title*4 - 3
else:
main_line = mar_title
# Convert mar_subtitle to the line parameter for title.
# mar_subtitle is a scaling factor from 0 to infinity. line=4 is
# the default, and -10 and +10 should indicate the outer bounds.
# Convert mar_subtitle to sub.line according to:
# mar_subtitle sub.line
# 0-1 -6 to 4
# 1-7 4 to 10
assert mar_subtitle >= 0
if mar_subtitle < 1:
sub_line = mar_subtitle*10 - 6
else:
sub_line = mar_subtitle + 3
group = [group_names[i] for i in group_indexes]
xlab = xlab or ""
ylab = ylab or ""
sub = ""
if p_value is not None:
sub = "p=%.2g" % p_value
title = title or gene_id
cex_main = 2.0 * title_size
cex_sub = 1.5 * subtitle_size
cex_legend = 1.5 * legend_size
R_fn(
"bitmap", file=filename, type="png256",
height=1600, width=1600, units="px", res=300)
# Set the margins.
x = 5*mar_bottom, 4*mar_left, 4*mar_top, 2
mar = [x+0.1 for x in x]
R_fn("par", mar=mar, RETVAL="op")
# Suppress warning message. See calc_km.
params = {
'cex.main' : cex_main,
'main.line' : main_line,
'cex.sub' : cex_sub,
'sub.line' : sub_line,
'cex.legend' : cex_legend,
}
if x_legend:
params["x.legend"] = x_legend
R_fn('options', "warn", RETVAL='ow')
R_fn('options', warn=-1)
R_fn(
'plot.km.multi', survival, dead, group, col=R_var('col'),
main=title, sub=sub, xlab=xlab, ylab=ylab, **params)
R_fn('options', R_var('ow'))
R_fn("par", R_var('op'))
R_fn('dev.off')
def write_km_prism_file(filename, survival, dead, group_indexes, group_names):
from genomicode import jmath
R = start_and_init_R()
jmath.R_equals(filename, 'filename')
group = [group_names[i] for i in group_indexes]
jmath.R_equals(survival, 'survival')
jmath.R_equals(dead, 'dead')
jmath.R_equals(group, 'group')
R('write.km.prism.multi(filename, survival, dead, group)')
def plot_groups(filename, scores, group_names, groups, unlog):
from genomicode import colorlib
from genomicode.jmath import R_fn, R_equals, R_var
from genomicode import jmath
assert len(scores) == len(groups)
if unlog:
scores = [2**x for x in scores]
start_and_init_R()
#R = start_and_init_R()
mar = [x+0.1 for x in [5, 6, 4, 2]]
# Set the colors.
assert len(group_names) >= 2
colors = ['#1533AD', '#FFB300']
if len(group_names) > 2:
x = colorlib.bild_colors(len(group_names))
x = [colortuple2hex(*x) for x in x]
colors = x
col = [colors[x] for x in groups]
# Only keep the group_names and colors that occur in the groups.
assert len(colors) == len(group_names)
uniq_groups = sorted({}.fromkeys(groups))
colors = [colors[i] for i in uniq_groups]
group_names = [group_names[i] for i in uniq_groups]
x = range(len(scores))
y = scores
# Sort the scores from lowest to highest.
O = jmath.order(y)
y = [y[i] for i in O]
col= [col[i] for i in O]
R_equals(x, 'x')
R_equals(y, 'y')
R_equals(col, 'col')
R_fn(
"bitmap", filename, type="png256", height=1600, width=1600,
units="px", res=300)
R_fn("par", mar=mar, RETVAL="op")
R_fn(
"plot", R_var('x'), R_var('y'), type="n", axes=R_var("FALSE"),
xlab="", ylab="")
R_fn("par", "usr", RETVAL="usr")
R_fn(
"rect", R_var("usr[1]"), R_var("usr[3]"), R_var("usr[2]"),
R_var("usr[4]"), col="#FFFFFF")
R_fn("points", R_var('x'), R_var('y'), pch=19, cex=1, col=R_var('col'))
R_fn("box", lwd=1.5)
R_fn(
"axis", 1, lwd=1.5, labels=R_var("FALSE"), tick=R_var("FALSE"),
**{"cex.axis" : 1.25})
R_fn("axis", 2, lwd=1.5, **{"cex.axis" : 1.25})
R_fn("par", mgp=[1.0, 1.5, 0], RETVAL="op2")
R_fn("title", xlab="Sample", **{"cex.lab" : 1.5})
R_fn("par", R_var("op2"))
R_fn("title", main="", xlab="", ylab="Gene Expression", sub="",
**{"cex.lab":1.5, "cex.sub":1, "col.sub":"#A60400", "cex.main":1.0})
R_fn(
"legend", "topleft", legend=group_names, fill=colors, inset=0.05,
cex=1.25, **{"box.lwd":1.5})
R_fn("par", R_var("op"))
R_fn("dev.off")
def write_km_group_file(filename, scores, group_names, groups, unlog):
#from genomicode import colorlib
from genomicode.jmath import R_fn, R_equals, R_var
#from genomicode import jmath
assert len(scores) == len(groups)
if unlog:
scores = [2**x for x in scores]
R = start_and_init_R()
group2scores = {}
for g, s in zip(groups, scores):
if g not in group2scores:
group2scores[g] = []
group2scores[g].append(s)
R("DATA <- list()")
for i in sorted(group2scores):
R_equals(group2scores[i], "x")
R('DATA[["%s"]] <- x' % group_names[i])
R_fn('write.boxplot', filename, R_var('DATA'))
GLOBAL_R = None
def start_and_init_R():
global GLOBAL_R
import os
from genomicode import jmath
from genomicode import config
if GLOBAL_R is None:
assert os.path.exists(config.changlab_Rlib)
km_lib = os.path.join(config.changlab_Rlib, "kaplanmeierlib.R")
stat_lib = os.path.join(config.changlab_Rlib, "statlib.R")
prism_lib = os.path.join(config.changlab_Rlib, "prismlib.R")
assert os.path.exists(km_lib), "File not found: %s" % km_lib
assert os.path.exists(stat_lib), "File not found: %s" % stat_lib
assert os.path.exists(prism_lib), "File not found: %s" % prism_lib
R = jmath.start_R()
R('require(splines, quietly=TRUE)')
R('source("%s")' % km_lib)
R('source("%s")' % stat_lib)
R('source("%s")' % prism_lib)
GLOBAL_R = R
return GLOBAL_R
def colortuple2hex(R, G, B):
R = hex(int(R * 255))[2:]
G = hex(int(G * 255))[2:]
B = hex(int(B * 255))[2:]
color_hex = '#' + R + G + B
return color_hex
def _format_list(x):
for i in range(len(x)):
if x[i] is None:
x[i] = ""
x = map(str, x)
return ";".join(x)
def _make_filename(M, gene_i,
filestem, analysis, gene_headers, filetype, fileext):
# gene_i is the index of the gene in the matrix.
# If filestem is None, will not use a filestem.
# gene_headers is a list of headers from Matrix. If empty, will
# try to provide one.
# Format:
# <filestem>.<analysis>.<gene_name>.<filetype>.<fileext>
#
# <filestem> BRCA (has no "." at end)
# <analysis> SUBTYPE,ER,OS
# <gene_name> GAPDH
# <filetype> boxplot, prism, waterfall
# <fileext> txt, png
from genomicode import hashlib
assert type(analysis) is type("") and analysis
assert type(filetype) is type("") and filetype
assert type(fileext) is type("") and fileext
for h in gene_headers:
assert h in M.row_names()
# Figure out the gene_name.
x = format_gene_name(M, gene_headers, gene_i)
gene_name = hashlib.hash_var(x)
#if gene_headers:
# x = [M.row_names(x)[gene_i] for x in gene_headers]
# gene_name = "_".join(x)
#else:
# x = get_gene_name(M, gene_i)
# x = hashlib.hash_var(x)
# gene_name = x
parts = [analysis, gene_name, filetype, fileext]
if filestem:
parts.insert(0, filestem)
filename = ".".join(parts)
return filename
def main():
import os
import sys
import argparse
import itertools
#from genomicode import hashlib
parser = argparse.ArgumentParser(
description='Associate gene expression patterns with outcomes.')
parser.add_argument(
'expression_file',
help='Either a gene expression file (GCT,CDT,PCL format) or gene set '
'scores from score_geneset.py.')
parser.add_argument('outcome_file', help='Table of clinical annotations.')
group = parser.add_argument_group(title='Analysis')
group.add_argument(
'--outcome', default=[], action='append',
help='Where to find the outcome information in the clinical '
'annotation file. To analyze more than one outcome, use this '
'parameter multiple times. Format: <time_header>,<dead_header>')
group.add_argument(
'--gene', default=[], action='append',
help='Comma separated name or ID of genes to analyze. '
'I will search for this gene in the annotations of the '
'expression_file. '
'You can use this parameter multiple times to search more genes.')
group.add_argument(
'--genes_from_geneset', default=[], action='append',
help="Include the genes from this geneset. "
"Format: <txt/gmx/gmt_file>,<geneset>[,<geneset>,...]")
group.add_argument(
'--geneset', default=[], action='append',
help='Name of the geneset (from the gene set score file) to analyze. '
'To specify multiple gene sets, use this parameter multiple times.')
group.add_argument(
'--all_genes', action='store_true',
help='Use all genes in the expression file.')
group.add_argument(
'--all_genesets', action='store_true',
help='Use all gene sets in the geneset file.')
group.add_argument(
'--ignore_unscored_genesets', default=False, action='store_true',
help="If a gene set is not scored on this data set, then ignore it.")
group = parser.add_argument_group(
title='Discretization', description='We can discretize the '
'expression data using two different strategies. '
'The first approach is to set a simple cutoff based on the '
'relative magnitude of the expression. For example, a simple '
'cutoff of 0.50 would separate the samples with lowest 50% '
'expression from those with highest 50%. '
'The second approach looks for outliers in gene expression '
'based on z-scores. So a z-score cutoff of 1.0 would separate '
'the samples into groups with z <= -1, -1 < z < 1, and z >= 1. '
'For the z-score strategy, it is possible that no outliers will '
'be detected with a specific z-score cutoff.'
)
group.add_argument(
'--rank_cutoff',
help='Comma-separated list of breakpoints (between 0 and 1), '
'e.g. 0.25,0.50,0.75. Default is to use cutoff of 0.50. '
'I will use this strategy unless a --zscore_cutoff is given.')
group.add_argument(
'--zscore_cutoff',
help='Comma-separated list of breakpoints, e.g. n1,1 '
'(can use n for negative sign).')
group.add_argument(
'--best_cutoff', action="store_true",
help="Try every cutoff and use the best one.")
group = parser.add_argument_group(title='Output')
group.add_argument(
'-o', dest='filestem',
help='Prefix used to name files. e.g. "myanalysis".')
group.add_argument(
"--gene_header", action="append", default=[],
help="Header of gene name to include in the name of the output file "
"(MULTI). By default, will try to find the gene symbol.")
group.add_argument(
'--no_plots', action='store_true', default=False,
help="Don't produce any plots or Prism files.")
group.add_argument(
'--unlog_group_plot', action='store_true', default=False,
help='Plot the group plot on an un-logged scale.')
group = parser.add_argument_group(title='Formatting the Kaplan-Meier Plot')
group.add_argument(
"--km_mar_left", default=1.0, type=float,
help="Scale margin at left of plot. Default 1.0 (no scaling).")
group.add_argument(
"--km_mar_bottom", default=1.0, type=float,
help="Scale margin at bottom of plot. Default 1.0 (no scaling).")
group.add_argument(
"--km_mar_top", default=1.0, type=float,
help="Scale margin at top of plot. Default 1.0 (no scaling).")
group.add_argument(
'--km_title', default=None, help='Title for the Kaplan-Meier plot.')
group.add_argument(
'--km_title_size', default=1.0, type=float,
help='Scale the size of the title. Default 1.0 (no scaling).')
group.add_argument(
'--km_mar_title', default=1.0, type=float,
help="Scale margin for the title. Default 1.0 (no scaling).")
group.add_argument(
'--km_subtitle_size', default=1.0, type=float,
help='Scale the size of the subtitle. Default 1.0 (no scaling).')
group.add_argument(
'--km_mar_subtitle', default=1.0, type=float,
help="Scale margin for the subtitle. Default 1.0 (no scaling).")
group.add_argument(
'--km_xlab', default=None,
help='x-axis label for the Kaplan-Meier plot.')
group.add_argument(
'--km_ylab', default=None,
help='y-axis label for the Kaplan-Meier plot.')
group.add_argument(
'--km_legend_size', default=1.0, type=float,
help='Scale the size of the legend. Default 1.0 (no scaling).')
args = parser.parse_args()
# Check inputs.
assert args.expression_file, (
'Please specify a gene expression or gene set score file.')
assert os.path.exists(args.expression_file), "File not found: %s" % \
args.expression_file
assert args.outcome_file, (
'Please specify a clinical outcomes file.')
assert os.path.exists(args.outcome_file), "File not found: %s" % \
args.outcome_file
assert args.outcome, 'Please specify the clinical outcomes to analyze.'
assert args.gene or args.all_genes or \
args.geneset or args.all_genesets or args.genes_from_geneset, \
'Please specify a gene or gene set.'
assert not (args.gene and args.all_genes), (
'Please specify either a gene or all genes, not both.')
assert not (args.gene and args.geneset), (
'Please specify either a gene or a gene set score, not both.')
assert not (args.gene and args.all_genesets), (
'Please specify either a gene or a gene set score, not both.')
assert not (args.all_genes and args.all_genesets), (
'Please specify either all genes or all genesets, not both.')
assert not (args.geneset and args.genes_from_geneset), (
'Please specify either a gene or a gene set score, not both.')
assert not (args.all_genesets and args.genes_from_geneset), (
'Please specify either a gene or a gene set score, not both.')
if args.rank_cutoff:
assert not args.zscore_cutoff
assert not args.best_cutoff
elif args.zscore_cutoff:
assert not args.rank_cutoff
assert not args.best_cutoff
elif args.best_cutoff:
assert not args.rank_cutoff
assert not args.zscore_cutoff
if not args.rank_cutoff and not args.zscore_cutoff:
args.best_cutoff = True
assert args.km_mar_bottom > 0 and args.km_mar_bottom < 10
assert args.km_mar_left > 0 and args.km_mar_left < 10
assert args.km_mar_top > 0 and args.km_mar_top < 10
assert args.km_title_size > 0 and args.km_title_size < 10
assert args.km_mar_title > 0 and args.km_mar_title < 10
assert args.km_subtitle_size > 0 and args.km_subtitle_size < 10
assert args.km_mar_subtitle > 0 and args.km_mar_subtitle < 10
assert args.km_legend_size > 0 and args.km_legend_size < 10
# Clean up the input.
genes1 = parse_genes(args.gene)
genes2 = parse_genes_from_geneset(args.genes_from_geneset)
genes = genes1 + genes2
if args.all_genes:
genes = list_all_genes(args.expression_file)
gene_sets = parse_gene_sets(args.geneset)
if args.all_genesets:
gene_sets = list_all_gene_sets(args.expression_file)
rank_cutoffs = zscore_cutoffs = None
if args.rank_cutoff:
rank_cutoffs = parse_rank_cutoffs(args.rank_cutoff)
if args.zscore_cutoff:
zscore_cutoffs = parse_zscore_cutoffs(args.zscore_cutoff)
outcomes = parse_outcomes(args.outcome)
filestem = parse_filestem(args.filestem)
# Read the input files.
M = read_expression_or_geneset_scores(
genes, False, gene_sets, args.expression_file)
x = read_clinical_annotations(M, args.outcome_file)
M, clinical_annots = x
# Make sure at least one of the outcomes are in the clinical
# annotations.
outcomes = [(x1, x2) for (x1, x2) in outcomes
if x1 in clinical_annots and x2 in clinical_annots]
msg = "No clinical annotations found."
if filestem:
fs = filestem
if fs.endswith("."):
fs = fs[:-1]
msg = msg + " (%s)" % fs
assert outcomes, msg
#for x1, x2 in outcomes:
# assert x1 in clinical_annots, "Missing clinical annotation: %s" % x1
# assert x2 in clinical_annots, "Missing clinical annotation: %s" % x2
# Select the genes or gene sets of interest.
x = genes or gene_sets
M = M.matrix(row=x)
if genes:
assert M.nrow(), "I could not find any of the genes."
elif gene_sets:
assert M.nrow(), "I could not find any of the gene sets."
# Calculate the association of each gene and each outcome.
expression_or_score = "Expression"
if gene_sets:
expression_or_score = "Score"
# (time_header, dead_header, gene_index) -> returned from calc_association
gene_outcome_scores = {}
for x in itertools.product(outcomes, range(M.nrow())):
(time_header, dead_header), i = x
survival = clinical_annots[time_header]
dead = clinical_annots[dead_header]
scores = M.value(i, None)
try:
x = calc_association(
survival, dead, scores,
rank_cutoffs, zscore_cutoffs, args.best_cutoff,
expression_or_score, args.ignore_unscored_genesets)
except AssertionError, x:
# Provide a better error message.
type_, value, tb = sys.exc_info()
x = str(x)
if filestem:
fs = filestem
if fs.endswith("."):
fs = fs[:-1]
x = x + " (%s)" % fs
raise AssertionError, x, tb
if x is None:
continue
gene_outcome_scores[(time_header, dead_header, i)] = x
# Files generated:
# <filestem>.stats.txt Or to STDOUT if no <filestem> given.
# <filestem>.<outcome>.<gene_id>.km.png K-M plot.
# <filestem>.<outcome>.<gene_id>.km.txt Prism format for K-M analysis.
# <filestem>.<outcome>.<gene_id>.groups.png Group for each sample.
# <filestem>.<outcome>.<gene_id>.groups.txt Prism format.
# Write the output in a table with headers:
# <headers> # From the expression or gene set file.
# Outcome
# Groups # one for each group
# Num Samples # one for each group, separated by semicolon
# Average Expression # one for each group, separated by semicolon
# 90% Survival # one for each group, separated by semicolon
# 50% Survival # one for each group, separated by semicolon
# Relationship
# p-value
outhandle = sys.stdout
if filestem:
outhandle = open("%s.stats.txt" % filestem, 'w')
# Figure out the header for the table.
header = M.row_names() + [
"Outcome", "Groups", "Num Samples", "Average Expression",
"90% Survival", "50% Survival", "Relationship", "p-value"]
if filestem:
header = ["Data Set"] + header
print >>outhandle, "\t".join(header)
# Write out each row of the table.
for x in itertools.product(outcomes, range(M.nrow())):
(time_header, dead_header), gene_i = x
x = time_header, dead_header, gene_i
SURV = gene_outcome_scores.get(x)
if not SURV: # if couldn't be calculated, e.g. not enough groups
continue
gene_names = [M.row_names(x)[gene_i] for x in M.row_names()]
outcome = time_header
group_names = SURV["group_names"]
I = range(len(group_names))
num_samples = [SURV["num_samples"][x] for x in I]
mean_score = [SURV["mean_score"][x] for x in I]
surv90 = [SURV["surv90"][x] for x in I]
surv50 = [SURV["surv50"][x] for x in I]
relationship = SURV["relationship"]
p_value = SURV["p_value"]
_fmt = _format_list
x = gene_names + [
outcome, _fmt(group_names), _fmt(num_samples), _fmt(mean_score),
_fmt(surv90), _fmt(surv50), relationship, p_value]
if filestem:
fs = filestem
if fs.endswith("."):
fs = fs[:-1]
x = [fs] + x
assert len(x) == len(header)
print >>outhandle, "\t".join(map(str, x))
if args.no_plots:
continue
# Write out Prism, Kaplan-Meier curves, etc.
### Better way to pick gene ID.
##gene_id = get_gene_name(M, gene_i)
##gene_id_h = hashlib.hash_var(gene_id)
# Make the Kaplan-Meier plot.
#filename = "%s%s.%s.km.png" % (filestem, time_header, gene_id_h)
filename = _make_filename(
M, gene_i, filestem, time_header, args.gene_header, "km", "png")
#gene_id = get_gene_name(M, gene_i)
gene_id = format_gene_name(M, args.gene_header, gene_i)
plot_km(
filename, SURV["survival"], SURV["dead"], SURV["groups"],
SURV["p_value"], gene_id, SURV["group_names"],
args.km_mar_bottom, args.km_mar_left, args.km_mar_top,
args.km_title, args.km_title_size, args.km_mar_title,
args.km_subtitle_size, args.km_mar_subtitle,
args.km_xlab, args.km_ylab, args.km_legend_size)
# Write out a Prism file for the Kaplan-Meier plot.
#filename = "%s%s.%s.km.txt" % (filestem, time_header, gene_id_h)
filename = _make_filename(
M, gene_i, filestem, time_header, args.gene_header, "km", "txt")
write_km_prism_file(
filename, SURV["survival"], SURV["dead"], SURV["groups"],
SURV["group_names"])
# Make the group plot.
#filename = "%s%s.%s.groups.png" % (filestem, time_header, gene_id_h)
filename = _make_filename(
M, gene_i, filestem, time_header, args.gene_header,
"groups", "png")
plot_groups(
filename, SURV["scores"], SURV["group_names"], SURV["groups"],
args.unlog_group_plot)
# Write out a Prism file for the group plot.
#filename = "%s%s.%s.groups.txt" % (filestem, time_header, gene_id_h)
filename = _make_filename(
M, gene_i, filestem, time_header, args.gene_header,
"groups", "txt")
write_km_group_file(
filename, SURV["scores"], SURV["group_names"], SURV["groups"],
args.unlog_group_plot)
if __name__ == '__main__':
main()
```
#### File: changlab/scripts/analyze_phenotype.py
```python
def parse_phenotypes(phenotypes):
# list of phenotypes.
# e.g. ["STEM", "EMT"]
# Return (potentially empty) list of phenotypes.
return phenotypes
def parse_groups(center_by_groups):
# Return tuple of batch_header, list of group 1, list of group 2.
# Format: <BATCH_HEADER>;
# <GROUP 1 VALUE>[,<GROUP 1 VALUE>,...];
# <GROUP 2 VALUE>[,<GROUP 2 VALUE>,...]
# If not given, return a tuple of None's.
if not center_by_groups:
return None, None, None
x = center_by_groups.split(";")
assert len(x) == 3
batch_header, x1, x2 = x
group1 = x1.split(",")
group2 = x2.split(",")
return batch_header, group1, group2
def parse_ignore_samples(ignore_samples):
# Return a tuple of <annot>, <value>
# Format: <annot>,<value>
x = ignore_samples.split(",")
assert len(x) == 2
return x
def ignore_samples(M, clinical_annots, ignore):
x = parse_ignore_samples(ignore)
annot, value = x
assert annot in clinical_annots, "Missing annot: %s" % annot
values = clinical_annots[annot]
I = [] # indexes to keep
for i in range(len(values)):
if value != values[i]:
I.append(i)
assert len(I) < len(values), "I could not find any %s=%s" % (annot, value)
M_f = M.matrix(None, I)
annots_f = {}
for name, values in clinical_annots.iteritems():
values_f = [values[i] for i in I]
annots_f[name] = values_f
assert len(values_f) == M_f.ncol()
return M_f, annots_f
def calc_association(phenotypes, scores, ignore_insufficient_groups):
# Return a dictionary with keys:
# n Number of samples.
# m Number of groups.
# scores n-list of <float>
# delta None or <float>
# phenotypes n-list of <string>
# groups n-list of <int> [0, length(group_names)-1]
# group_names m-list of <string> (unique list of pheno)
# num_samples dict of <group (int)> : <int>
# mean_score dict of <group (int)> : <float>
# p_value <float>
# relationship <string>
#
# May return None if there is only 1 group, and
# ignore_insufficient_groups is a true value.
from genomicode import jmath
from genomicode import sortlib
# Select only the samples with phenotype and score information.
I1 = [i for (i, x) in enumerate(phenotypes) if x]
I2 = [i for (i, x) in enumerate(scores) if x != ""]
I = sorted(set.intersection(set(I1), set(I2)))
assert I, "No valid samples."
phenotypes = [phenotypes[i] for i in I]
scores = [float(scores[i]) for i in I]
# Figure out the groupings.
#group_names = sorted({}.fromkeys(phenotypes))
group_names = sortlib.sort_natural({}.fromkeys(phenotypes))
if len(group_names) < 2 and ignore_insufficient_groups:
return None
assert len(group_names) >= 2, "Need at least 2 groups (%s)." % \
str(group_names)
groups = [None] * len(phenotypes)
for i in range(len(phenotypes)):
x = group_names.index(phenotypes[i])
groups[i] = x
# Calculate the association.
group2scores = {} # group -> list of scores
for i in range(len(scores)):
n = groups[i]
if n not in group2scores:
group2scores[n] = []
group2scores[n].append(scores[i])
y = scores
x = [[0]*len(group_names) for i in range(len(y))]
for i in range(len(groups)):
x[i][groups[i]] = 1
jmath.start_R()
jmath.R_equals(x, "x")
jmath.R_equals(y, "y")
jmath.R("m <- aov(y~x)")
p_value = jmath.R('summary(m)[[1]][["Pr(>F)"]][1]')[0]
# Count other things.
num_samples = {}
for n in group2scores:
num_samples[n] = len(group2scores[n])
mean_score = {}
for n in group2scores:
mean_score[n] = jmath.mean(group2scores[n])
# If there are exactly 2 groups, then find the difference between
# the two groups.
delta = None # list of deltas
if len(group_names) == 2:
delta = mean_score[1] - mean_score[0]
# Figure out the relationship.
relationship = ""
assert len(group_names) >= 2
high_score = None
for n, score in mean_score.iteritems():
if high_score is not None and score <= high_score:
continue
high_score = score
x1 = "Higher"
if len(group_names) > 2:
x1 = "Highest"
relationship = "%s in %s" % (x1, group_names[n])
SCORE = {}
SCORE["n"] = len(scores)
SCORE["m"] = len(group_names)
SCORE["scores"] = scores
SCORE["phenotypes"] = phenotypes
SCORE["groups"] = groups
SCORE["group_names"] = group_names
SCORE["num_samples"] = num_samples
SCORE["mean_score"] = mean_score
SCORE["delta"] = delta
SCORE["p_value"] = p_value
SCORE["relationship"] = relationship
return SCORE
def center_scores(scores, batches, phenotypes, group1, group2):
from genomicode import jmath
assert len(scores) == len(phenotypes)
assert len(batches) == len(phenotypes)
batches_all = sorted({}.fromkeys(batches))
scores_c = [None] * len(scores)
for batch in batches_all:
I = [i for i in range(len(batches)) if batches[i] == batch]
scores1, scores2 = [], []
for i in I:
pheno = phenotypes[i]
if pheno in group1:
scores1.append(scores[i])
elif pheno in group2:
scores2.append(scores[i])
else:
raise AssertionError, "%s not in groups" % pheno
assert scores1, "No samples from group1 in batch %s" % batch
assert scores2, "No samples from group2 in batch %s" % batch
mean1 = jmath.mean(scores1)
mean2 = jmath.mean(scores2)
n = (mean1 + mean2)/2.0
for i in I:
scores_c[i] = scores[i] - n
assert None not in scores_c
return scores_c
def write_prism_file(filename, scores, phenotypes, group_names):
for x in phenotypes:
assert x in group_names
pheno2scores = {}
for pheno, score in zip(phenotypes, scores):
if pheno not in pheno2scores:
pheno2scores[pheno] = []
pheno2scores[pheno].append(score)
matrix = []
matrix.append(group_names)
x = [[""]*len(group_names) for i in range(len(scores))]
matrix.extend(x)
for j in range(len(group_names)):
scores = pheno2scores.get(group_names[j], [])
for i in range(len(scores)):
matrix[i+1][j] = scores[i]
# Delete all the empty rows in the bottom.
while matrix:
x = matrix[-1]
if x == [""]*len(x):
del matrix[-1]
else:
break
handle = open(filename, 'w')
for x in matrix:
print >>handle, "\t".join(map(str, x))
## def plot_boxplot(
## filename, scores, phenotypes, group_names, p_value, gene_id,
## mar_bottom, mar_left, mar_top):
## import os
## from genomicode import jmath
## from genomicode.jmath import R_fn, R_var, R_equals
## from genomicode import config
## xlabel_size = 1.0
## height = 1600
## width = 1600
## pheno2scores = {}
## for pheno, score in zip(phenotypes, scores):
## if pheno not in pheno2scores:
## pheno2scores[pheno] = []
## pheno2scores[pheno].append(score)
## R = jmath.start_R()
## path = config.changlab_Rlib
## plotlib = os.path.join(path, "plotlib.R")
## assert os.path.exists(plotlib), "I cannot find: %s" % plotlib
## R_fn("source", plotlib)
## #main = R_var("NA")
## main = gene_id
## #sub = ""
## sub = "%.2g" % p_value
## xlab = ""
## ylab = "Gene Expression"
## labels = group_names
## col = R_var("NULL")
## lwd = 2
## las = 3 # vertical labels
## at = R_var("NULL")
## if labels:
## at = range(1, len(labels)+1)
## cex_labels = 1.25*xlabel_size
## #cex_legend = 1
## #cex_lab = 1.5
## cex_xlab = 2.0
## cex_ylab = 2.0
## cex_sub = 1.5
## R_equals(labels, "labels")
## R_equals(at, "at")
## R("X <- list()")
## for i, n in enumerate(group_names):
## s = pheno2scores.get(n, [])
## R_equals(s, "s")
## R("X[[%d]] <- s" % (i+1))
## bm_type = "png16m"
## if filename.lower().endswith(".pdf"):
## bm_type = "pdfwrite"
## R_fn(
## "bitmap", filename, type=bm_type,
## height=height, width=width, units="px", res=300)
## # Set the margins.
## # default is 5.1, 4.1, 4.1, 2.1
## x = 10*mar_bottom, 5*mar_left, 4*mar_top, 2
## mar = [x+0.1 for x in x]
## R_fn("par", mar=mar, RETVAL="op")
## R_fn(
## "boxplot", R_var("X"), col=col, main="", xlab="", ylab="",
## axes=R_var("FALSE"), pch=19, cex=1, ylim=R_var("NULL"))
## # Make plot area solid white.
## jmath.R('usr <- par("usr")')
## jmath.R('rect(usr[1], usr[3], usr[2], usr[4], col="#FFFFFF")')
## R_fn(
## "boxplot", R_var("X"), col=col, main="", xlab="", ylab="",
## axes=R_var("FALSE"), pch=19, cex=1, ylim=R_var("NULL"),
## add=R_var("TRUE"))
## R_fn("box", lwd=lwd)
## R_fn(
## "axis", 1, lwd=lwd, labels=R_var("labels"),
## at=R_var("at"), las=las, **{ "cex.axis" : cex_labels })
## R_fn(
## "axis", 2, lwd=lwd, **{ "cex.axis" : 1.5 })
## R_fn(
## "title", main=main, sub=sub, xlab=xlab, ylab=ylab,
## **{ "cex.lab" : cex_xlab, "cex.main" : 2.0, "cex.sub" : cex_sub,
## "col.sub" : "#A60400" })
## R("par(op)")
## R_fn("dev.off")
def plot_waterfall(
filename, scores, phenotypes, group_names, sample_names, p_value, gene_id,
mar_bottom, mar_left, mar_top, xlabel_off):
import os
from genomicode import jmath
from genomicode.jmath import R_fn, R_var, R_equals
from genomicode import config
from genomicode import colorlib
import analyze_clinical_outcome as aco
# Sort by increasing score.
O = jmath.order(scores)
scores = [scores[i] for i in O]
phenotypes = [phenotypes[i] for i in O]
sample_names = [sample_names[i] for i in O]
# Plot the colors.
assert len(group_names) >= 2
colors = ['#1533AD', '#FFB300']
if len(group_names) > 2:
x = colorlib.bild_colors(len(group_names))
x = [aco.colortuple2hex(*x) for x in x]
colors = x
xlabel_size = 1.0
height = 1600
width = 1600
R = jmath.start_R()
path = config.changlab_Rlib
plotlib = os.path.join(path, "plotlib.R")
assert os.path.exists(plotlib), "I cannot find: %s" % plotlib
R_fn("source", plotlib)
#main = R_var("NA")
main = gene_id
sub = ""
#sub = "%.2g" % p_value
xlab = ""
ylab = "Gene Expression"
labels = sample_names
col = [colors[group_names.index(x)] for x in phenotypes]
x = range(1, len(scores)+1)
y = scores
r = (max(y)-min(y))*0.10
mn = min(y)-r
mx = max(y)+r
ylim = (mn, mx)
lwd = 2
las = 3 # vertical labels
cex_labels = 1.25*xlabel_size
cex_ytick = 1.5
#cex_legend = 1
cex_xlab = 2.0
cex_ylab = 2.0
cex_sub = 2.0
legend_x = "topleft"
R_equals(labels, "labels")
R_equals(y, "y")
bm_type = "png16m"
if filename.lower().endswith(".pdf"):
bm_type = "pdfwrite"
R_fn(
"bitmap", filename, type=bm_type,
height=height, width=width, units="px", res=300)
# Set the margins.
xlabel_bottom = 2.0
if xlabel_off:
R_equals(R_var("FALSE"), "labels")
xlabel_bottom = 0.5
x = 5*mar_bottom*xlabel_bottom, 5*mar_left, 4*mar_top, 2
mar = [x+0.1 for x in x]
R_fn("par", mar=mar, RETVAL="op")
R_fn(
"barplot", R_var("y"), xlab="", ylab="",
axes=R_var("FALSE"), ylim=ylim, xpd=R_var("FALSE"),
RETVAL="mp")
# Make plot area solid white.
jmath.R('usr <- par("usr")')
jmath.R('rect(usr[1], usr[3], usr[2], usr[4], col="#FFFFFF")')
R_fn("box", lwd=lwd)
mgp = 3, 1.5, 0
R_fn("par", mgp=mgp, RETVAL="op2")
R_fn(
"axis", 1, lwd=lwd, labels=R_var("labels"),
at=R_var("mp"), las=las, **{ "cex.axis" : cex_labels })
R("par(op2)")
R_fn("axis", 2, lwd=lwd, **{ "cex.axis" : cex_ytick })
R_fn(
"title", main=main, sub=sub, xlab=xlab, ylab="",
**{ "cex.lab" : cex_xlab, "cex.main" : 2.0, "cex.sub" : cex_sub,
"col.sub" : "#A60400" })
R_fn("title", ylab=ylab, **{ "cex.lab" : cex_ylab } )
R_fn(
"barplot", R_var("y"), col=col, xlab="", ylab="",
axes=R_var("FALSE"), ylim=ylim, add=R_var("TRUE"), xpd=R_var("FALSE"))
R_fn(
"legend", legend_x, legend=group_names, fill=colors, inset=0.05,
bg="#FFFFFF")
R("par(op)")
R_fn("dev.off")
def main():
import os
import sys
import itertools
import argparse
import arrayio
import analyze_clinical_outcome as aco
import boxplot
from genomicode import parallel
parser = argparse.ArgumentParser(
description="Associate gene expression patterns with a "
"categorical phenotype.")
parser.add_argument(
'expression_file',
help='Either a gene expression file (GCT,CDT,PCL format) or gene set '
'scores from score_geneset.py.')
parser.add_argument(
'phenotype_file', help="Table of phenotypes (tab-delimited text "
"file).")
parser.add_argument(
"--ignore_samples", help="Ignore the samples where an annotation "
"(a column in the phenotype file) matches a specific value. "
"Format:<header>,<value>")
parser.add_argument(
"-j", dest="num_procs", type=int, default=1,
help="Number of processors to use.")
group = parser.add_argument_group(title='Analysis')
group.add_argument(
'--phenotype', default=[], action='append',
help='Header in the phenotype file (MULTI). Format: <header>')
group.add_argument(
'--all_phenotypes', action="store_true",
help="Analyze all phenotypes in the file.")
parser.add_argument(
"--ignore_phenotype", default=[], action="append",
help="Ignore this column in the phenotype file. "
"Helpful to get rid of the sample column when using "
"--all_phenotypes. Format: <header> (MULTI)")
group.add_argument(
'--ignore_insufficient_groups', action="store_true",
help="If a phenotype only has one group, then ignore it rather "
"than raising an error.")
group.add_argument(
'--gene', default=[], action='append',
help='Comma separated name or ID of genes to analyze. '
'I will search for this gene in the annotations of the '
'expression_file. '
'You can use this parameter multiple times to search more genes.')
group.add_argument(
"--empty_vs_filled", action="store_true",
help="Instead of categorizing by the contents of the cells, "
"compare the ones that are empty against the ones that are filled.")
group.add_argument(
"--all_genes", action="store_true",
help="Run analysis on all genes in this file.")
group.add_argument(
'--geneset', default=[], action='append',
help='Name of the geneset to analyze. To specify multiple gene sets, '
'use this parameter multiple times.')
group.add_argument(
"--center_by_phenotype",
help="Center the scores or gene expression values seen for a "
"phenotype to 0. Only one --phenotype can be analyzed in this way "
"at a time. This phenotype should have two possible values. "
"If there are more values, they need to be merged into two groups. "
"Each phenotype must be seen in each BATCH. "
"Format: <BATCH_HEADER>;<PHENO 1 VALUE>[,<PHENO 1 VALUE>,...];"
"<PHENO 2 VALUE>[,<PHENO 2 VALUE>,...]")
group = parser.add_argument_group(title='Output')
group.add_argument(
'-o', dest='filestem', default=None,
help='Prefix used to name files. e.g. "myanalysis".')
group.add_argument(
"--gene_header", action="append", default=[],
help="When naming the output file, use the gene name(s) under this "
"Header (MULTI). If not given, will try to use a combination of the "
"probe ID and gene symbol.")
group = parser.add_argument_group(title='Formatting the boxplot')
group.add_argument(
"--box_mar_left", default=1.0, type=float,
help="Scale margin at left of plot. Default 1.0 (no scaling).")
group.add_argument(
"--box_mar_bottom", default=1.0, type=float,
help="Scale margin at bottom of plot. Default 1.0 (no scaling).")
group.add_argument(
"--box_mar_top", default=1.0, type=float,
help="Scale margin at top of plot. Default 1.0 (no scaling).")
group.add_argument(
"--water_mar_left", default=1.0, type=float,
help="Scale margin at left of plot. Default 1.0 (no scaling).")
group.add_argument(
"--water_mar_bottom", default=1.0, type=float,
help="Scale margin at bottom of plot. Default 1.0 (no scaling).")
group.add_argument(
"--water_mar_top", default=1.0, type=float,
help="Scale margin at top of plot. Default 1.0 (no scaling).")
group.add_argument(
"--water_xlabel_off", action="store_true",
help="Do not label the X axis on the waterfall plot.")
## group.add_argument(
## '--km_title', default=None, help='Title for the Kaplan-Meier plot.')
## group.add_argument(
## '--km_title_size', default=1.0, type=float,
## help='Scale the size of the title. Default 1.0 (no scaling).')
## group.add_argument(
## '--km_mar_title', default=1.0, type=float,
## help="Scale margin for the title. Default 1.0 (no scaling).")
## group.add_argument(
## '--km_subtitle_size', default=1.0, type=float,
## help='Scale the size of the subtitle. Default 1.0 (no scaling).')
## group.add_argument(
## '--km_mar_subtitle', default=1.0, type=float,
## help="Scale margin for the subtitle. Default 1.0 (no scaling).")
## group.add_argument(
## '--km_xlab', default=None,
## help='x-axis label for the Kaplan-Meier plot.')
## group.add_argument(
## '--km_ylab', default=None,
## help='y-axis label for the Kaplan-Meier plot.')
## group.add_argument(
## '--km_legend_size', default=1.0, type=float,
## help='Scale the size of the legend. Default 1.0 (no scaling).')
args = parser.parse_args()
# Check inputs.
assert args.expression_file, (
'Please specify a gene expression or gene set score file.')
assert os.path.exists(args.expression_file), "File not found: %s" % \
args.expression_file
assert args.phenotype_file, 'Please specify a phenotype file.'
assert os.path.exists(args.phenotype_file), "File not found: %s" % \
args.phenotype_file
assert args.num_procs >= 1 and args.num_procs < 100
assert args.phenotype or args.all_phenotypes, \
'Please specify the phenotype to analyze.'
assert not (args.phenotype and args.all_phenotypes)
assert args.gene or args.geneset or args.all_genes, \
'Please specify a gene or gene set.'
assert not (args.gene and args.all_genes)
has_gene = args.gene or args.all_genes
assert not (has_gene and args.geneset), \
'Please specify either a gene or a gene set, not both.'
assert args.box_mar_bottom > 0 and args.box_mar_bottom < 10
assert args.box_mar_left > 0 and args.box_mar_left < 10
assert args.box_mar_top > 0 and args.box_mar_top < 10
assert args.water_mar_bottom > 0 and args.water_mar_bottom < 10
assert args.water_mar_left > 0 and args.water_mar_left < 10
assert args.water_mar_top > 0 and args.water_mar_top < 10
## assert args.km_title_size > 0 and args.km_title_size < 10
## assert args.km_mar_title > 0 and args.km_mar_title < 10
## assert args.km_subtitle_size > 0 and args.km_subtitle_size < 10
## assert args.km_mar_subtitle > 0 and args.km_mar_subtitle < 10
## assert args.km_legend_size > 0 and args.km_legend_size < 10
# Clean up the input.
phenotypes = parse_phenotypes(args.phenotype)
genes = aco.parse_genes(args.gene)
gene_sets = aco.parse_gene_sets(args.geneset)
x = parse_groups(args.center_by_phenotype)
center_batch, center_group1, center_group2 = x
filestem = aco.parse_filestem(args.filestem)
if center_batch:
assert len(phenotypes) == 1, \
"Only 1 phenotype can be centered by groups."
# Read the input files.
M = aco.read_expression_or_geneset_scores(
genes, args.all_genes, gene_sets, args.expression_file)
x = aco.read_clinical_annotations(M, args.phenotype_file)
M, clinical_annots = x
# Filter the phenotype files.
if args.ignore_samples:
x = ignore_samples(M, clinical_annots, args.ignore_samples)
M, clinical_annots = x
if args.all_phenotypes:
phenotypes = sorted(clinical_annots)
phenotypes = [x for x in phenotypes if x not in args.ignore_phenotype]
# Make sure at least one of the phenotypes are in the clinical
# annotations.
x = [x for x in phenotypes if x in clinical_annots]
assert x, "Could not find phenotypes: %s" % ", ".join(phenotypes)
phenotypes = x
# Select the genes or gene sets of interest.
if not args.all_genes:
x = genes or gene_sets
M = M.matrix(row=x)
assert M.nrow(), "I could not find any of the genes or gene sets."
# Make sure the batch information is valid.
if center_batch:
assert center_batch in clinical_annots, "Missing annotation: %s" % \
center_batch
assert len(phenotypes) == 1
pheno = phenotypes[0]
values = clinical_annots[pheno]
for x in values:
assert x in center_group1 or x in center_group2, \
"Unknown phenotype: %s" % x
# Calculate the association of each gene and each phenotype.
#expression_or_score = "Expression"
#if gene_sets:
# expression_or_score = "Score"
jobs = [] # list of (function, args, keywds)
keys = []
for x in itertools.product(phenotypes, range(M.nrow())):
pheno_header, i = x
phenotype = clinical_annots[pheno_header]
if args.empty_vs_filled:
x = ["0"] * len(phenotype)
for j in range(len(phenotype)):
if phenotype[j].strip():
x[j] = "1"
phenotype = x
scores = M.value(i, None)
if center_batch:
batch = clinical_annots[center_batch]
scores = center_scores(
scores, batch, phenotype, center_group1, center_group2)
x = phenotype, scores, args.ignore_insufficient_groups
x = calc_association, x, {}
jobs.append(x)
keys.append((pheno_header, i))
retvals = parallel.pyfun(jobs, num_procs=args.num_procs)
assert len(retvals) == len(keys)
# (header, gene_index) -> returned from calc_association
gene_phenotype_scores = {}
for (pheno_header, i), x in zip(keys, retvals):
if x is None:
continue
gene_phenotype_scores[(pheno_header, i)] = x
# Files generated:
# <filestem>.stats.txt Or to STDOUT if no <filestem> given.
# <filestem>.<outcome>.<gene_id>.waterfall.png
# <filestem>.<outcome>.<gene_id>.boxplot.png
# <filestem>.<outcome>.<gene_id>.prism.txt Prism format.
# Write the output in a table with headers:
# <headers> # From the expression or gene set file.
# Phenotype
# Groups # one for each group
# Num Samples # one for each group, separated by semicolon
# Average Expression # one for each group, separated by semicolon
# Relationship
# p-value
outhandle = sys.stdout
if filestem:
outhandle = open("%s.stats.txt" % filestem, 'w')
# Figure out the header for the table.
header = M.row_names() + [
"Phenotype", "Groups", "Num Samples", "Average Expression",
"Delta", "Relationship", "p-value"]
print >>outhandle, "\t".join(header)
# Write out each row of the table.
for x in itertools.product(phenotypes, range(M.nrow())):
pheno_header, gene_i = x
SCORE = gene_phenotype_scores.get((pheno_header, gene_i))
if not SCORE: # couldn't calculate.
continue
gene_names = [M.row_names(x)[gene_i] for x in M.row_names()]
phenotype = pheno_header
group_names = SCORE["group_names"]
I = range(len(group_names))
num_samples = [SCORE["num_samples"][x] for x in I]
mean_score = [SCORE["mean_score"][x] for x in I]
delta = ""
if len(group_names) == 2:
delta = SCORE["delta"]
relationship = SCORE["relationship"]
p_value = SCORE["p_value"]
_fmt = aco._format_list
x = gene_names + [
phenotype, _fmt(group_names), _fmt(num_samples), _fmt(mean_score),
delta, relationship, p_value]
assert len(x) == len(header)
print >>outhandle, "\t".join(map(str, x))
if filestem:
outhandle.close()
# Write out other files.
if not filestem:
return
jobs = [] # list of (fn, args, keywds)
for x in itertools.product(phenotypes, range(M.nrow())):
pheno_header, gene_i = x
SCORE = gene_phenotype_scores.get((pheno_header, gene_i))
if not SCORE:
continue
# Write the PRISM file.
gene_id = aco.format_gene_name(M, None, gene_i)
sample_names = M.col_names(arrayio.COL_ID)
filename = aco._make_filename(
M, gene_i, filestem, pheno_header, args.gene_header,
"prism", "txt")
x1 = (filename,
SCORE["scores"], SCORE["phenotypes"], SCORE["group_names"])
x = write_prism_file, x1, {}
jobs.append(x)
# Make a boxplot.
filename = aco._make_filename(
M, gene_i, filestem, pheno_header, args.gene_header,
"boxplot", "png")
pretty_gene = aco.pretty_gene_name(M, args.gene_header, gene_i)
group_names = SCORE["group_names"]
pheno2scores = {}
for pheno, score in zip(SCORE["phenotypes"], SCORE["scores"]):
if pheno not in pheno2scores:
pheno2scores[pheno] = []
pheno2scores[pheno].append(score)
p_value = "p=%.2g" % SCORE["p_value"]
x1 = (filename, group_names, pheno2scores)
x2 = {
"height" : 1600,
"width" : 1600,
"title" : pretty_gene,
"subtitle" : p_value,
"subtitle_col" : "#A60400",
"subtitle_size" : 1.2,
"subtitle_line" : 0.5,
"ylab" : "Gene Expression",
"mar_bottom" : args.box_mar_bottom,
"mar_left" : args.box_mar_left,
"mar_top" : 1.25,
}
x = boxplot.plot_boxplot, x1, x2
jobs.append(x)
# Make a waterfall plot.
#filename = "%s%s.%s.waterfall.png" % (
# filestem, pheno_header, gene_id_h)
filename = aco._make_filename(
M, gene_i, filestem, pheno_header, args.gene_header,
"waterfall", "png")
pretty = aco.pretty_gene_name(M, args.gene_header, gene_i)
x1 = (
filename, SCORE["scores"], SCORE["phenotypes"],
SCORE["group_names"], sample_names, SCORE["p_value"], pretty,
args.water_mar_bottom, args.water_mar_left, args.water_mar_top,
args.water_xlabel_off)
x = plot_waterfall, x1, {}
jobs.append(x)
parallel.pyfun(jobs, num_procs=args.num_procs)
if __name__ == '__main__':
#import profile
#profile.run("main()")
main()
```
#### File: changlab/scripts/arrayplot.py
```python
import os, sys
## Detect if I'm being run from within GenePattern. If so, then add
## the current directory to the library path.
#if os.path.split(__file__)[0].endswith("gp_pybinreg"):
# sys.path.append(os.path.split(__file__)[0])
MIN_FONTSIZE = 6
MAX_MEGAPIXELS = 256 # No more than 256 megapixel plot.
class ClusterData:
def __init__(
self, gene_tree, array_tree, gene_tree_cluster, array_tree_cluster,
gene_cluster, array_cluster):
self.gene_tree = gene_tree
self.array_tree = array_tree
self.gene_tree_cluster = gene_tree_cluster
self.array_tree_cluster = array_tree_cluster
self.gene_cluster = gene_cluster
self.array_cluster = array_cluster
class PlotLayout:
def __init__(self, heatmap, colorbar, gene_dendrogram, array_dendrogram,
gene_cluster, array_cluster, gene_label, array_label):
self.heatmap = heatmap
self.colorbar = colorbar
self.gene_dendrogram = gene_dendrogram
self.array_dendrogram = array_dendrogram
self.gene_cluster = gene_cluster
self.array_cluster = array_cluster
self.gene_label = gene_label
self.array_label = array_label
class PlotCoords:
def __init__(self, hm_x, hm_y, cb_x, cb_y, gd_x, gd_y, ad_x, ad_y,
gc_x, gc_y, ac_x, ac_y, gl_x, gl_y, al_x, al_y):
self.hm_x, self.hm_y = hm_x, hm_y
self.cb_x, self.cb_y = cb_x, cb_y
self.gd_x, self.gd_y = gd_x, gd_y
self.ad_x, self.ad_y = ad_x, ad_y
self.gc_x, self.gc_y = gc_x, gc_y
self.ac_x, self.ac_y = ac_x, ac_y
self.gl_x, self.gl_y = gl_x, gl_y
self.al_x, self.al_y = al_x, al_y
class HeatmapLayout:
def __init__(
self, nrow, ncol, boxwidth, boxheight, scale_border, grid,
inverse_colors, black0, color_fn):
# Looks OK with even 1 pixel.
#MIN_GRID = 1
#if boxwidth < MIN_GRID or boxheight < MIN_GRID:
# grid = False
self.nrow = nrow
self.ncol = ncol
self.boxwidth = boxwidth
self.boxheight = boxheight
self.inverse_colors = inverse_colors
self.black0 = black0
self.color_fn = color_fn
self.BORDER = int(round(min(boxwidth, boxheight)*0.20) * scale_border)
self.GRID_SIZE = int(round(min(boxwidth, boxheight)*0.10))
if not grid:
self.GRID_SIZE = 0
assert self.GRID_SIZE <= self.BORDER
def width(self):
return self.size()[0]
def height(self):
return self.size()[1]
def size(self):
height = self.BORDER*2
width = self.BORDER*2
height += self.boxheight * self.nrow
width += self.boxwidth * self.ncol
height += (self.nrow-1) * self.GRID_SIZE
width += (self.ncol-1) * self.GRID_SIZE
return width, height
def coord(self, row, col):
x = self.BORDER
y = self.BORDER
x += col * (self.boxwidth + self.GRID_SIZE)
y += row * (self.boxheight + self.GRID_SIZE)
return x, y, self.boxwidth, self.boxheight
def color(self, x):
# x is from [0, 1]. find the nearest color.
import math
if x is None or math.isnan(x):
# Missing value. Return a white box.
#return _get_color(0.5, self.color_fn)
return (255, 255, 255)
assert x >= 0 and x <= 1, "x out of range: %g" % x
return _get_color(
x, self.color_fn, flip_colors=self.inverse_colors,
black0=self.black0)
class ColorbarLayout:
def __init__(
self, cb_width, cb_height, signal_0, signal_1,
ticks, tick_labels, label_sizes, fontsize, inverse_colors, color_fn):
TICK_SIZE = 0.15 # relative to BAR_SHORT
TICK_BUFFER = 0.15 # relative to BAR_SHORT
assert len(ticks) == len(tick_labels)
assert len(ticks) == len(label_sizes)
self.TICK_SIZE = TICK_SIZE
self.TICK_BUFFER = TICK_BUFFER
self._cb_width = cb_width # Width of just the bar.
self._cb_height = cb_height
self._signal_0 = signal_0
self._signal_1 = signal_1
self._ticks = ticks
self._tick_labels = tick_labels
self._label_sizes = label_sizes
self._fontsize = fontsize
self._inverse_colors = inverse_colors
#self._black0 = black0
self._color_fn = color_fn
def is_vertical(self):
return self._cb_height > self._cb_width
def width(self):
width = self._cb_width
if self.is_vertical():
# Vertical skinny colorbar.
# Tick mark.
width += self._cb_width * self.TICK_SIZE
# BUFFER between tick mark and label.
width += self._cb_width * self.TICK_BUFFER
# Text.
text_width = max([x[1] for x in self._label_sizes])
# PIL doesn't calculate text widths very accurately.
# Compensate with a fudge factor. 2 is not big enough.
text_width *= 2.5
width += text_width
width = int(width)
return width
def height(self):
height = self._cb_height
# Bug: For vertical colorbar, does not take into account
# height of labels. Might be cut off.
if not self.is_vertical():
# Horizontal colorbar.
# Tick mark.
height += self._cb_height * self.TICK_SIZE
# BUFFER between tick mark and label.
height += self._cb_height * self.TICK_BUFFER
# Text.
text_height = max([x[0] for x in self._label_sizes])
height += text_height
height = int(height)
return height
def size(self):
# Size taken by the entire color bar, including labels.
return self.width(), self.height()
def bar_width(self):
# Size of just the bar.
return self._cb_width
def bar_height(self):
return self._cb_height
def num_ticks(self):
return len(self._tick_labels)
def tick_coord(self, i):
assert i >= 0 and i < len(self._ticks)
tick = self._ticks[i]
perc = float(tick-self._signal_0)/(self._signal_1-self._signal_0)
if self.is_vertical():
width = self._cb_width * self.TICK_SIZE
height = 1
x = self._cb_width
#y = perc * self._cb_height # high numbers on bottom
y = (1.0-perc) * self._cb_height # high numbers on top
y = min(y, self._cb_height-height)
else:
width = 1
height = self._cb_height * self.TICK_SIZE
x = perc * self._cb_width
y = self._cb_height
x = min(x, self._cb_width-width)
x, y, width, height = int(x), int(y), int(width), int(height)
return x, y, width, height
def tick_label(self, i):
assert i >= 0 and i < len(self._tick_labels)
return self._tick_labels[i]
def label_coord(self, i):
x = self.tick_coord(i)
tick_x, tick_y, tick_width, tick_height = x
label_width, label_height = self._label_sizes[i]
if self.is_vertical():
x = tick_x + tick_width + self._cb_width*self.TICK_BUFFER
y = tick_y - label_height/2.0
else:
x = tick_x - label_width/2.0
y = tick_y + tick_height + self._cb_height*self.TICK_BUFFER
x, y = int(x), int(y)
return x, y
def label_size(self, i):
return self._label_sizes[i]
def fontsize(self):
return self._fontsize
def color(self, x):
# x is from [0, 1]. find the nearest color.
assert x >= 0 and x <= 1, "x out of range: %g" % x
return _get_color(x, self._color_fn, flip_colors=self._inverse_colors)
class DendrogramLayout:
def __init__(
self, num_items, num_other_items,
pixels_per_item, pixels_per_other_item,
size_scale, thickness_scale, tree, tree_cluster, color_fn):
# This dendrogram is measured in 2 dimensions: the dimension
# that spans across the branches, and the dimension that spans
# across the phylogenetic distance. color_fn is for the
# clusters.
import math
self.num_items = num_items
self.pixels_per_item = pixels_per_item
self.tree = tree
self.tree_cluster = tree_cluster
self.color_fn = color_fn
self.max_cluster = None
if self.tree_cluster:
self.max_cluster = max(self.tree_cluster.values())
self._item_size = num_items * pixels_per_item
# Should be the same height as the heatmap. The width should
# be 0.625x the height (perfect ratio).
RATIO = 1.0 / 1.6 / 2.0
# Both dendrograms should have symmetric sizes. So base the
# RATIO on the smaller of the two dimensions.
x1 = num_items * pixels_per_item
x2 = num_other_items * pixels_per_other_item
x = min(x1, x2)
#x = max(x1, x2)
self._dist_size = int(math.ceil(x * RATIO * size_scale))
# Convert the distances from clustering to percentages. The
# percentages indicate how far across the plot to place the
# node. 0% is the furthest distance, while 100% is the
# closest.
# These are actually similarity metrics, so put 1.0 at 100%,
# and the lowest value given in the tree (can be negative) at
# 0%.
lowest = highest = None
for node in tree:
left, right, distance = node
if lowest is None or distance < lowest:
lowest = distance
if highest is None or distance > highest:
highest = distance
assert highest <= 1.0
# Set the closest to always be 1.0.
highest = 1.0
# Set a small border at the end for the root.
self.ROOT_SIZE = 0.15 * (highest - lowest)
lowest -= self.ROOT_SIZE
self.lowest, self.highest = lowest, highest
min_ppi = min(pixels_per_item, pixels_per_other_item)
#min_ppi = pixels_per_item
x = int(math.ceil(min_ppi*0.20 * thickness_scale))
#x = min(max(x, 1), min_ppi)
x = min(max(x, 1), pixels_per_item)
self.LINEWIDTH = x
#print min_ppi, thickness_scale, min_ppi, self.LINEWIDTH
def vthicken(self, x, y, width, height):
import math
np = self.LINEWIDTH - width
if np <= 0:
return x, y, width, height
hnp = int(math.floor(np/2.0))
return x-hnp, y, width+np, height
def hthicken(self, x, y, width, height):
import math
np = self.LINEWIDTH - height
if np <= 0:
return x, y, width, height
hnp = int(math.floor(np/2.0))
return x, y-hnp, width, height+np
def item_size(self):
return self._item_size
def dist_size(self):
return self._dist_size
def color(self, id):
c = 0, 0, 0
n = None
if self.tree_cluster:
n = self.tree_cluster[id]
if n is not None and self.max_cluster:
# If requested, should I use the inverse of the color here?
p = float(n) / self.max_cluster
c = _get_color(p, self.color_fn)
return c
def item_coord(self, item):
x = int(item * self.pixels_per_item + self.pixels_per_item/2.0)
return x
def dist_coord(self, distance):
assert distance >= self.lowest and distance <= self.highest
# Convert the distance to a percentage.
perc = (distance - self.lowest) / (self.highest - self.lowest)
x = int(perc * self.dist_size())
return x
class GeneDendrogramLayout(DendrogramLayout):
def __init__(self, nrow, ncol, boxwidth, boxheight,
size_scale, thickness_scale, tree, tree_cluster, color_fn):
DendrogramLayout.__init__(
self, nrow, ncol, boxheight, boxwidth, size_scale, thickness_scale,
tree, tree_cluster, color_fn)
def width(self):
return self.size()[0]
def height(self):
return self.size()[1]
def size(self):
height = self.item_size() + self.LINEWIDTH
width = self.dist_size() + self.LINEWIDTH
return width, height
def coord(self, row, distance):
x = self.dist_coord(distance)
y = self.item_coord(row)
return x, y
def lines(self, node_num, node_dist, left_num, left_dist,
right_num, right_dist):
node_x, node_y = self.coord(node_num, node_dist)
left_x, left_y = self.coord(left_num, left_dist)
right_x, right_y = self.coord(right_num, right_dist)
#print "NODE", node_x, node_y
#print "LEFT", left_x, left_y
#print "RIGHT", right_x, right_y
# The right node is on top of the left node.
# 3-----4 right (2 lines: vertical and horizontal)
# |
# * node
# |
# 1-----2 left (2 lines: vertical and horizontal)
#
# Separate out lines 1 and 3 in case they are different
# colors.
#
# Add two left lines, then two right lines.
line1 = self.vthicken(node_x, node_y, 1, left_y-node_y+1)
line2 = self.hthicken(node_x, left_y, left_x-node_x+1, 1)
line3 = self.vthicken(node_x, right_y, 1, node_y-right_y+1)
line4 = self.hthicken(node_x, right_y, right_x-node_x+1, 1)
# For some reason, left and right are sometimes switched. If
# that's the case, then adjust the coordinates so there are no
# negative heights.
if line1[3] < 0:
line1 = self.vthicken(node_x, left_y, 1, node_y-left_y+1)
if line3[3] < 0:
line3 = self.vthicken(node_x, node_y, 1, right_y-node_y+1)
assert line1[3] >= 0, "%d %d %d" % (node_x, left_x, right_x)
assert line3[3] >= 0, "%d %d %d" % (node_x, left_x, right_x)
# Make sure the x-coordinates of 2,4 are aligned with 1,3.
# Also, make sure the width is at least the same as the line
# width.
if line1[0] < line2[0]:
delta = line2[0] - line1[0]
x, y, width, height = line2
line2 = x-delta, y, max(width+delta, self.LINEWIDTH), height
x, y, width, height = line4
line4 = x-delta, y, max(width+delta, self.LINEWIDTH), height
lines = [line1, line2, line3, line4]
return lines
def root(self, node_num, node_dist):
root_x, root_y = self.coord(node_num, self.lowest)
node_x, node_y = self.coord(node_num, node_dist)
x = root_x, root_y, node_x-root_x+1, 1
return self.hthicken(*x)
class ArrayDendrogramLayout(DendrogramLayout):
def __init__(self, nrow, ncol, boxwidth, boxheight,
size_scale, thickness_scale, tree, tree_cluster, color_fn):
DendrogramLayout.__init__(
self, ncol, nrow, boxwidth, boxheight, size_scale, thickness_scale,
tree, tree_cluster, color_fn)
def width(self):
return self.size()[0]
def height(self):
return self.size()[1]
def size(self):
height = self.dist_size() + self.LINEWIDTH
width = self.item_size() + self.LINEWIDTH
return width, height
def coord(self, row, distance):
x = self.item_coord(row)
y = self.dist_coord(distance)
return x, y
def lines(self, node_num, node_dist, left_num, left_dist,
right_num, right_dist):
node_x, node_y = self.coord(node_num, node_dist)
left_x, left_y = self.coord(left_num, left_dist)
right_x, right_y = self.coord(right_num, right_dist)
# 1--*--3
# | |
# 2 4
line1 = self.hthicken(left_x, node_y, node_x-left_x+1, 1)
line2 = self.vthicken(left_x, node_y, 1, left_y-node_y+1)
line3 = self.hthicken(node_x, node_y, right_x-node_x+1, 1)
line4 = self.vthicken(right_x, node_y, 1, right_y-node_y+1)
# For some reason, left and right are sometimes switched. If
# that's the case, then adjust the coordinates so there are no
# negative widths.
if line1[2] < 0:
line1 = self.hthicken(node_x, node_y, left_x-node_x+1, 1)
if line3[2] < 0:
line3 = self.hthicken(right_x, node_y, node_x-right_x+1, 1)
assert line1[2] >= 0, "%d %d %d" % (node_x, left_x, right_x)
assert line3[2] >= 0, "%d %d %d" % (node_x, left_x, right_x)
# Make sure the y-coordinates of 2,4 are aligned with 1,3.
# Also, make sure the height is at least the same as the line
# width.
if line1[1] < line2[1]:
delta = line2[1] - line1[1]
x, y, width, height = line2
line2 = x, y-delta, width, max(height+delta, self.LINEWIDTH)
x, y, width, height = line4
line4 = x, y-delta, width, max(height+delta, self.LINEWIDTH)
#print node_x, node_y
lines = [line1, line2, line3, line4]
return lines
def root(self, node_num, node_dist):
root_x, root_y = self.coord(node_num, self.lowest)
node_x, node_y = self.coord(node_num, node_dist)
x = root_x, root_y, 1, node_y-root_y+1
return self.vthicken(*x)
class GeneClusterLayout:
def __init__(self, num_items, item_width, item_height, grid):
array_layout = ArrayClusterLayout(
num_items, item_height, item_width, grid)
self.array_layout = array_layout
def width(self):
return self.size()[0]
def height(self):
return self.size()[1]
def size(self):
height, width = self.array_layout.size()
return width, height
def coord(self, num):
y, x, height, width = self.array_layout.coord(num)
return x, y, width, height
class ArrayClusterLayout:
def __init__(self, num_items, item_width, item_height, grid):
self.num_items = num_items
self.item_width = item_width
self.item_height = item_height
self.BORDER = 1
self.GRID_SIZE = 1
if not grid:
self.GRID_SIZE = 0
assert self.GRID_SIZE <= self.BORDER
def width(self):
return self.size()[0]
def height(self):
return self.size()[1]
def size(self):
height = self.BORDER*2
width = self.BORDER*2
height += self.item_height
width = self.item_width * self.num_items
width += (self.num_items-1) * self.GRID_SIZE
return width, height
def coord(self, num):
# Return a box that bounds the region.
assert num >= 0 and num < self.num_items
x = self.BORDER
y = self.BORDER
x += num * (self.item_width + self.GRID_SIZE)
return x, y, self.item_width, self.item_height
class GeneLabelLayout:
def __init__(self, item_height, item_widths, fontsize):
self._item_height = item_height
self._item_widths = item_widths
num_items = len(item_widths)
self._width = max(item_widths)
self._height = item_height * num_items
self._num_items = num_items
self._fontsize = fontsize
def item_height(self):
return self._item_height
def width(self):
return self._width
def height(self):
return self._height
def size(self):
return self.width(), self.height()
def fontsize(self):
return self._fontsize
def coord(self, num):
# Return a box that bounds the region.
assert num >= 0 and num < self._num_items
x = 0
y = num * self._item_height
return x, y, self._item_widths[num], self._item_height
class ArrayLabelLayout:
def __init__(self, item_height, item_widths, fontsize):
# item_height refers to the text, not rotated.
self._item_height = item_height
self._item_widths = item_widths
num_items = len(item_widths)
# _width and _height refer to the layout object.
self._width = item_height * num_items
self._height = max(item_widths)
self._num_items = num_items
self._fontsize = fontsize
def item_height(self):
return self._item_height
def width(self):
return self._width
def height(self):
return self._height
def size(self):
return self.width(), self.height()
def fontsize(self):
return self._fontsize
def coord(self, num):
# Return a box that bounds the region.
assert num >= 0 and num < self._num_items
x = num * self._item_height
y = self._height-self._item_widths[num]
return x, y, self._item_height, self._item_widths[num]
def process_data_set(
MATRIX, cluster, cluster_data, jobname,
gene_indexes, gene_names, gene_file, num_genes_var,
array_indexes, array_file,
log_transform, gene_center, gene_normalize, array_center, array_normalize,
cluster_genes, cluster_arrays, cluster_alg, distance, method,
gene_k, array_k, kmeans_k, scale, gain, autoscale):
assert MATRIX.nrow() > 0, "Matrix has no genes."
MATRIX = filter_matrix(
MATRIX, gene_indexes, gene_names, gene_file, num_genes_var,
array_indexes, array_file)
assert MATRIX.nrow() > 0, "Filtered matrix has no genes."
MATRIX, cluster_data = normalize_matrix(
MATRIX, cluster, cluster_data, log_transform,
gene_center, gene_normalize, array_center, array_normalize)
MATRIX, cluster_data = cluster_matrix(
MATRIX, cluster, cluster_data, cluster_genes, cluster_arrays,
cluster_alg, distance, method, gene_k, array_k, kmeans_k)
# Scale after the clustering, so it doesn't affect the clustering
# results.
x = pretty_scale_matrix(MATRIX, scale, gain, autoscale)
MATRIX_scaled, orig_min, orig_max = x
if jobname:
write_data_set(MATRIX, MATRIX_scaled, cluster_data, jobname)
return MATRIX_scaled, cluster_data, orig_min, orig_max
def make_layout(
MATRIX, cluster_data, signal_0, signal_1, plotlib,
boxwidth, boxheight, scale_border, grid, color_scheme, flip_colors,
black0, colorbar,
cluster_genes, gene_tree_scale, gene_tree_thickness,
cluster_arrays, array_tree_scale, array_tree_thickness,
cluster_alg, label_genes, label_arrays):
from genomicode import colorlib
# Choose the color scheme.
scheme2fn = {
"red" : colorlib.red_shade,
"white" : colorlib.white_shade,
"red-green" : colorlib.rg_array_colors,
"blue-yellow" : colorlib.by_array_colors,
"matlab" : colorlib.matlab_colors,
"bild" : colorlib.bild_colors,
"genespring" : colorlib.genespring_colors,
"yahoo" : colorlib.yahoo_weather_colors,
}
assert color_scheme in scheme2fn, "Unknown color scheme: %s" % color_scheme
color_fn = scheme2fn[color_scheme]
# Make the layout for the heatmap.
hm_layout = HeatmapLayout(
MATRIX.nrow(), MATRIX.ncol(), boxwidth, boxheight,
scale_border, grid, flip_colors, black0, color_fn)
# Make the layout for the colorbar.
cb_layout = None
if colorbar:
x = _calc_colorbar_size(
hm_layout.width(), hm_layout.height(), hm_layout.GRID_SIZE,
boxwidth, boxheight)
width, height = x
x = _calc_colorbar_ticks(
width, height, signal_0, signal_1, plotlib)
ticks, tick_labels, label_sizes, fontsize = x
cb_layout = ColorbarLayout(
width, height, signal_0, signal_1,
ticks, tick_labels, label_sizes, fontsize, flip_colors, color_fn)
# Make layouts for the dendrograms.
gd_layout = ad_layout = None
#if(cluster_genes and cluster_data.gene_tree and gene_tree_scale > 0 and
# cluster_alg == "hierarchical" and MATRIX.ncol() > 1):
if(cluster_data.gene_tree and gene_tree_scale > 0 and
cluster_alg == "hierarchical" and MATRIX.ncol() > 1):
# Only add the dendrogram if hierarchical clustering was
# requested. If clustering not done, then the matrix file
# will not have the GID annotations, and there will be no way
# to match up the genes with the clusters.
# Also should add dendrogram if the clusters were supplied by
# the user in a gtr file.
#print "Making gd_layout."
assert gene_tree_scale > 0
assert gene_tree_thickness > 0
width, height = boxwidth, boxheight
width += hm_layout.GRID_SIZE
height += hm_layout.GRID_SIZE
gd_layout = GeneDendrogramLayout(
MATRIX.nrow(), MATRIX.ncol(), width, height,
gene_tree_scale, gene_tree_thickness,
cluster_data.gene_tree, cluster_data.gene_tree_cluster,
colorlib.matlab_colors)
#if(cluster_arrays and cluster_data.array_tree and array_tree_scale > 0 and
# cluster_alg == "hierarchical" and MATRIX.nrow() > 1):
if(cluster_data.array_tree and array_tree_scale > 0 and
cluster_alg == "hierarchical" and MATRIX.nrow() > 1):
#print "Making ad_layout."
assert array_tree_scale > 0
assert array_tree_thickness > 0
width, height = boxwidth, boxheight
width += hm_layout.GRID_SIZE
height += hm_layout.GRID_SIZE
#print "HERE", width, height
ad_layout = ArrayDendrogramLayout(
MATRIX.nrow(), MATRIX.ncol(), width, height,
array_tree_scale, array_tree_thickness,
cluster_data.array_tree, cluster_data.array_tree_cluster,
colorlib.matlab_colors)
# Make layouts for the clusters.
# Can plot these (k-means) clusters if either kmeans or
# hierarchical clustering was requested. Unlike hierarchical
# clustering, plotting this does not require any annotations in
# the matrix file.
gc_layout = ac_layout = None
if cluster_data.gene_cluster:
gc_layout = GeneClusterLayout(MATRIX.nrow(), boxwidth, boxheight, grid)
if cluster_data.array_cluster:
ac_layout = ArrayClusterLayout(
MATRIX.ncol(), boxwidth, boxheight, grid)
# Make the layout for the gene or array labels.
gl_layout = al_layout = None
gene_labels = array_labels = None
gl_fontsize = al_fontsize = None
# If plotting both gene and array labels, make sure they aren't
# wildly different sizes.
if label_genes:
gl_fontsize = plotlib.fit_fontsize_to_height(boxheight)
if gl_fontsize < MIN_FONTSIZE:
gl_fontsize = None
if label_arrays:
al_fontsize = plotlib.fit_fontsize_to_height(boxwidth)
if al_fontsize < MIN_FONTSIZE:
al_fontsize = None
if gl_fontsize and al_fontsize:
FONT_RATIO = 1.5
gl_fontsize = int(min(gl_fontsize, al_fontsize*FONT_RATIO))
al_fontsize = int(min(al_fontsize, gl_fontsize*FONT_RATIO))
if label_genes and gl_fontsize:
gene_labels = _get_gene_labels(MATRIX)
height = boxheight
height += hm_layout.GRID_SIZE
widths = [plotlib.get_text_size(x, gl_fontsize)[0]
for x in gene_labels]
gl_layout = GeneLabelLayout(height, widths, gl_fontsize)
if label_arrays and al_fontsize:
array_labels = _get_array_labels(MATRIX)
width = boxwidth
width += hm_layout.GRID_SIZE
widths = [plotlib.get_text_size(x, al_fontsize)[0]
for x in array_labels]
al_layout = ArrayLabelLayout(width, widths, al_fontsize)
x = PlotLayout(
hm_layout, cb_layout, gd_layout, ad_layout, gc_layout, ac_layout,
gl_layout, al_layout)
return x
def calc_coords_for_layout(layout):
x = y = 0
def _safe_size(layout):
if layout is None:
return 0, 0
return layout.size()
hm_width, hm_height = _safe_size(layout.heatmap)
cb_width, cb_height = _safe_size(layout.colorbar)
gd_width, gd_height = _safe_size(layout.gene_dendrogram)
ad_width, ad_height = _safe_size(layout.array_dendrogram)
gc_width, gc_height = _safe_size(layout.gene_cluster)
ac_width, ac_height = _safe_size(layout.array_cluster)
gl_width, gl_height = _safe_size(layout.gene_label)
al_width, al_height = _safe_size(layout.array_label)
# Now position the heatmap based on the dendrograms.
hm_x = x + gd_width + gc_width + gl_width
hm_y = y + ad_height + ac_height + al_height
# On X-axis: gene dendrogram, cluster, label, then heatmap.
gd_x, gd_y = x, hm_y+layout.heatmap.BORDER
gc_x, gc_y = gd_x+gd_width, gd_y
gl_x, gl_y = gc_x+gc_width, gd_y
# On Y-axis: array dendrogram, cluster, label, then heatmap.
ad_x, ad_y = hm_x+layout.heatmap.BORDER, y
ac_x, ac_y = ad_x, ad_y+ad_height
al_x, al_y = ad_x, ac_y+ac_height
# Add the colorbar.
cb_x = cb_y = None
if layout.colorbar:
CB_BUFFER = 0.75 # separation from heatmap, relative to BAR_SHORT
bar_width = layout.colorbar.bar_width()
bar_height = layout.colorbar.bar_height()
if layout.colorbar.is_vertical():
cb_x = hm_x + hm_width + CB_BUFFER*bar_width
cb_y = hm_y
# If there are no dendrograms or labels, then need to add
# a buffer so that the labels aren't cut off.
if not layout.array_dendrogram and not layout.array_label:
cb_y += layout.colorbar.fontsize()
else:
cb_x = hm_x
cb_y = hm_y + hm_height + CB_BUFFER*bar_height
if not layout.gene_dendrogram and not layout.gene_label:
cb_x += layout.colorbar.fontsize()
cb_x, cb_y = int(cb_x), int(cb_y)
x = PlotCoords(
hm_x, hm_y, cb_x, cb_y, gd_x, gd_y, ad_x, ad_y,
gc_x, gc_y, ac_x, ac_y, gl_x, gl_y, al_x, al_y)
return x
def _choose_gene_id(MATRIX):
# Given a user-specified matrix, try to pick a good unique ID for
# the genes.
import arrayio
headers = MATRIX.row_names()
# Prioritize some potential ones. Don't use the standard headers,
# e.g. arrayio.ROW_ID, so that we can preserve the user's header.
IDS = ["Probe.Set.ID"]
for id_ in IDS:
if id_ in headers:
return id_
# If no known headers are found, then choose a standard one.
IDS = [arrayio.AFFY_PROBESET_ID, arrayio.GENE_ID, arrayio.ROW_ID]
for id_ in IDS:
if id_ in headers:
return id_
# If no standard ones are found, then just arbitrarily use the
# first column that is not missing any values.
for header in headers:
names = MATRIX.row_names(header)
missing = [x for x in names if not x.strip()]
if not missing:
return header
raise AssertionError, "I could not find an ID for the matrix."
def _choose_gene_label(MATRIX):
import arrayio
names = MATRIX.row_names()
# Prioritize some potential ones.
IDS = [
arrayio.GENE_SYMBOL, "Gene.Symbol", "Gene Symbol", "Symbol",
#arrayio.GENE_DESCRIPTION, "Description",
"DESCRIPTION", # For GCT files. Use the pretty name.
"NAME",
arrayio.GENE_ID, "LocusLink",
arrayio.AFFY_PROBESET_ID, "Probe.Set.ID",
arrayio.ROW_ID
]
# Exception: If the GCT files have generic descriptions,
# e.g. DESC0001, then use the name field instead.
if "DESCRIPTION" in names:
desc = MATRIX.row_names("DESCRIPTION")
if desc[0].startswith("DESC"):
i = IDS.index("DESCRIPTION")
IDS.pop(i)
for id_ in IDS:
if id_ in names:
return id_
if names:
return names[0]
raise AssertionError, "I could not find an ID for the matrix."
def convert_to_pcl(MATRIX, label_name=None):
# Convert the matrix to PCL format.
# Row names <ID> NAME
# Col names
import arrayio
from genomicode import Matrix
# Select from the row names an ID and a NAME.
id_name = _choose_gene_id(MATRIX)
name_name = _choose_gene_label(MATRIX)
# Make sure there aren't any blank gene IDs, or cluster will
# complain. Also, make sure they are unique.
seen = {}
for id_ in MATRIX.row_names(id_name):
id_ = id_.strip()
assert id_, "Missing gene IDs (header %s)." % id_name
assert id_ not in seen, "Duplicate gene ID %s." % id_
seen[id_] = 1
# Should not use "GID" as column name for PCL file. When
# clustering, cluster will add another "GID" column, and then
# there will be two columns called "GID". Rename this to
# something else, if necessary.
pretty_id_name = id_name
if pretty_id_name == "GID":
pretty_id_name = "GID.OLD"
if pretty_id_name == "NAME":
# GCT files uses "NAME" for ID, which conflicts with PCL definition.
pretty_id_name = "ID.NAME"
pretty_name_name = "NAME"
SAMPLE_NAME = arrayio.tab_delimited_format.SAMPLE_NAME
row_order = [pretty_id_name, pretty_name_name]
col_order = [SAMPLE_NAME]
row_names = {}
col_names = {}
synonyms = {}
row_names[pretty_id_name] = MATRIX.row_names(id_name)
row_names[pretty_name_name] = MATRIX.row_names(name_name)
col_names[SAMPLE_NAME] = MATRIX.col_names(arrayio.COL_ID)
synonyms[arrayio.ROW_ID] = pretty_id_name
synonyms[arrayio.COL_ID] = SAMPLE_NAME
pcl_matrix = Matrix.InMemoryMatrix(
MATRIX.slice(), row_names=row_names, col_names=col_names,
row_order=row_order, col_order=col_order, synonyms=synonyms)
#pcl_matrix = Matrix.add_synonyms(x, synonyms)
assert arrayio.pcl_format.is_matrix(pcl_matrix)
return pcl_matrix
def read_filecol(filecol):
from genomicode import iolib
# filecol is either # or #,<col>. commas
# are not allowed in the filenames. <col> should be 1-based
# index.
filename, colnum = filecol, 1
if filecol.find(",") >= 0:
x = filecol.split(",")
assert len(x) == 2, "File should be specified: #,<col>"
filename, colnum = x
colnum = int(colnum)
assert colnum >= 1
assert os.path.exists(filename), "could not find file %s" % filename
data = iolib.split_tdf(open(filename).read())
# Make sure colnum is correct.
for x in data:
assert colnum <= len(x)
names = [x[colnum-1].strip() for x in data]
names = [x for x in names if x]
return names
def _parse_gene_names(gene_name_list):
# This can a list of comma separated genes, e.g.
# ["E2F1", "E2F2,E2F3"]
# Need to separate them out.
gene_names = []
for x in gene_name_list:
x = x.split(",")
gene_names.extend(x)
return gene_names
def _parse_color(color_str):
# color_str is <R>,<G>,<B> where each number is an integer from
# 0-255. Return tuple of (<R>, <G>, <B>).
x = color_str.split(",")
assert len(x) == 3, "color should be <R>,<G>,<B>"
x = [int(x) for x in x]
for i in range(len(x)):
assert x[i] >= 0 and x[i] < 256, "color should be 0-255"
return tuple(x)
def filter_matrix(
MATRIX, gene_indexes, gene_names, gene_filecol, num_genes_var,
array_indexes, array_filecol):
# Filter the genes, maintaining the order specified in the input.
from genomicode import pcalib
from genomicode import parselib
# User provides indexes as 1-based inclusive. Convert to 0-based
# exclusive.
if gene_indexes is not None:
I = []
for s, e in parselib.parse_ranges(gene_indexes):
assert s >= 1
s = s-1
if e > MATRIX.nrow():
e = MATRIX.nrow()
I.extend(range(s, e))
MATRIX = MATRIX.matrix(I, None)
# Use the names specified by the user.
gene_names = _parse_gene_names(gene_names)
# Read gene names from a file.
if gene_filecol:
x = read_filecol(gene_filecol)
gene_names.extend(x)
if array_indexes is not None:
I = []
for s, e in parselib.parse_ranges(array_indexes):
assert s >= 1
s = s-1
if e > MATRIX.ncol():
e = MATRIX.ncol()
I.extend(range(s, e))
MATRIX = MATRIX.matrix(None, I)
# Read array names from a file.
array_names = []
if array_filecol:
x = read_filecol(array_filecol)
array_names.extend(x)
#print "GENES", gene_names
#print "ARRAYS", array_names
#print MATRIX.row_names()
# Specify genes by the annotations. Microarray formats do not
# name genes, but give them annotations. Specify arrays by their
# name.
if gene_names and array_names:
MATRIX = MATRIX.matrix(row=gene_names, col_name=array_names)
elif gene_names:
MATRIX = MATRIX.matrix(row=gene_names)
elif array_names:
MATRIX = MATRIX.matrix(col_name=array_names)
# Now select the genes based on variance.
if num_genes_var and MATRIX.nrow() > num_genes_var:
I = pcalib.select_genes_var(MATRIX._X, num_genes_var)
MATRIX = MATRIX.matrix(I, None)
return MATRIX
def normalize_matrix(
MATRIX, cluster, cluster_data, log_transform, gene_center, gene_normalize,
array_center, array_normalize):
# log_transform boolean
# gene_center None, "mean", or "median"
# gene_normalize None, "ss", or "var"
# array_center None, "mean", or "median"
# array_normalize None, "ss", or "var"
from genomicode import jmath
# If no normalization requested, then just return the matrix.
if (not log_transform and
not gene_center and not gene_normalize and
not array_center and not array_normalize):
return MATRIX, cluster_data
# Log transform data before normalizing the variance.
if log_transform:
MATRIX._X = jmath.log(MATRIX._X, base=2, safe=1)
# Normalize the variance before normalizing the median. If you
# normalize the median first, then the final median may be far
# from 0. Example: Some data points a lot less than 0, a few
# close to 0. Median is 0. After normalizing variance, all data
# points will be less than 0.
#arrayio.tdf.write(MATRIX, sys.stdout)
if gene_normalize == "var":
normalize_genes_var(MATRIX)
if array_normalize == "var":
normalize_arrays_var(MATRIX)
#arrayio.tdf.write(MATRIX, sys.stdout)
args = []
#if log_transform:
# args.append("-l")
if gene_center == "mean":
args.append("-cg a")
elif gene_center == "median":
args.append("-cg m")
if gene_normalize == "ss":
args.append("-ng")
if array_center == "mean":
args.append("-ca a")
elif array_center == "median":
args.append("-ca m")
if array_normalize == "ss":
args.append("-na")
# No clustering.
args.append("-g 0")
args.append("-e 0")
filestem = _cluster(MATRIX, cluster=cluster, *args)
files = find_data_files(filestem)
assert "nrm" in files, "No normalization file produced."
MATRIX, cluster_data = read_data_set(filestem, cluster_data)
_cleanup_cluster(filestem)
return MATRIX, cluster_data
def normalize_genes_var(MATRIX):
from genomicode import jmath
# Normalize the genes in place.
X = MATRIX._X
for i in range(len(X)):
X_i = X[i]
m = jmath.mean(X_i)
# Subtract the mean.
X_i = [x-m for x in X_i]
# Normalize to stddev of 1.
s = jmath.stddev(X_i)
if s != 0:
X_i = [x/s for x in X_i]
# Add the mean back.
X_i = [x+m for x in X_i]
X[i] = X_i
def normalize_arrays_var(MATRIX):
from genomicode import jmath
# Normalize the arrays in place.
X = MATRIX._X
if not X or not X[0]:
return
for i in range(len(X[0])):
X_i = [x[i] for x in X]
m = jmath.mean(X_i)
# Subtract the mean.
X_i = [x-m for x in X_i]
# Normalize to stddev of 1.
s = jmath.stddev(X_i)
if s != 0:
X_i = [x/s for x in X_i]
# Add the mean back.
X_i = [x+m for x in X_i]
for j in range(len(X)):
X[j][i] = X_i[j]
def cluster_matrix(
MATRIX, cluster, cluster_data,
cluster_genes, cluster_arrays, algorithm, distance, method,
gene_k, array_k, kmeans_k):
from genomicode import clusterio
assert algorithm in ["hierarchical", "kmeans"]
dist2id = {
"uncent-cor" : 1, "pearson" : 2, "abs-uncent-cor" : 3,
"abs-pearson" : 4, "spearman" : 5, "kendall" : 6,
"euclidean" : 7, "city-block" : 8,
}
method2id = {
"complete" : "m", "single" : "s", "centroid" : "c", "average" : "a",
}
# Skip if all conditions are true:
# - not clustering genes
# - not clustering arrays
# - not cutting gene tree (and gene tree already exists)
# - not cutting array tree (and array tree already exists)
if (not cluster_genes and not cluster_arrays and
not (gene_k and cluster_data.gene_tree) and
not (array_k and cluster_data.array_tree)):
return MATRIX, cluster_data
# If not clustering and just re-cutting the tree, then don't
# bother regenerating the clusters.
if cluster_genes or cluster_arrays:
args = []
id_ = dist2id[distance]
if cluster_genes:
args.append("-g %s" % id_)
else:
args.append("-g 0")
if cluster_arrays:
args.append("-e %s" % id_)
else:
args.append("-e 0")
id_ = method2id[method]
args.append("-m %s" % id_)
if algorithm == "kmeans":
args.append("-k %d" % kmeans_k)
filestem = _cluster(MATRIX, cluster=cluster, *args)
files = find_data_files(filestem)
#print filestem, files
assert "cdt" in files, "No cdt file produced."
MATRIX, cluster_data = read_data_set(filestem, cluster_data)
_cleanup_cluster(filestem)
# Cluster the hierarchical trees, if necessary.
gene_tree_cluster = array_tree_cluster = None
# If I haven't reclustered the data, then the old tree is still
# valid.
if not cluster_genes:
gene_tree_cluster = cluster_data.gene_tree_cluster
if not cluster_arrays:
array_tree_cluster = cluster_data.array_tree_cluster
if cluster_data.gene_tree and gene_k:
assert gene_k <= MATRIX.nrow(), "more gene clusters than genes"
gene_tree_cluster = clusterio.cut_dendrogram(
cluster_data.gene_tree, gene_k)
if cluster_data.array_tree and array_k:
assert array_k <= MATRIX.ncol(), "more array clusters than arrays"
array_tree_cluster = clusterio.cut_dendrogram(
cluster_data.array_tree, array_k)
cluster_data.gene_tree_cluster = gene_tree_cluster
cluster_data.array_tree_cluster = array_tree_cluster
return MATRIX, cluster_data
def pretty_scale_matrix(MATRIX, scale, gain, autoscale):
# Find a good default gain value. After scaling, values should
# range from [-1, 1]. Then, for convenience, I will re-scale that
# matrix to [0, 1].
# Will change the MATRIX variable.
import math
from genomicode import jmath
MATRIX = MATRIX.matrix()
nrow, ncol = MATRIX.dim()
X = MATRIX._X
# Choose a default scale so that the average expression level is
# 0.
defscale = 0.0
if autoscale:
x_all = []
for x in X:
x_all.extend(x)
# Use safe_mean to handle missing values.
defscale = -jmath.safe_mean(x_all)
# Apply the scale specified by the user.
for i in range(nrow):
for j in range(ncol):
# Ignore missing values.
if X[i][j] is None:
continue
X[i][j] = X[i][j] + defscale + scale
# Choose a default gain so that the maximum expression level is 1.
defgain = 1.0
if autoscale:
x_max = None
for i in range(nrow):
for j in range(ncol):
# Ignore missing values.
if X[i][j] is None or math.isnan(X[i][j]):
continue
if x_max is None or abs(X[i][j]) > x_max:
x_max = abs(X[i][j])
if x_max is not None:
defgain = 1.0/x_max
# By default, automatically multiply by 2.0, or else
# everything is too dark (empirically).
defgain = defgain * 2.0
# Apply the gain specified by the user.
for i in range(nrow):
for j in range(ncol):
if X[i][j] is None:
continue
# The gain is scaled from the default gain.
X[i][j] = X[i][j] * defgain * gain
#x_min, x_max = 1E9, -1E9
#for i in range(nrow):
# for j in range(ncol):
# x_min = min(x_min, X[i][j])
# x_max = max(x_max, X[i][j])
#print x_min, x_max, defgain, gain, defscale, scale
#for x in X:
# print "\t".join(map(str, x))
# Finally, rescale to [0, 1].
for i in range(nrow):
for j in range(ncol):
if X[i][j] is None:
continue
x = X[i][j]
x = (x + 1) * 0.5
x = max(min(x, 1), 0)
X[i][j] = x
#print defgain, gain, defscale, scale
assert not math.isnan(defgain) and not math.isnan(defscale)
ORIG_min = (0.0*2.0 - 1.0)/(defgain*gain) - (defscale+scale)
ORIG_max = (1.0*2.0 - 1.0)/(defgain*gain) - (defscale+scale)
return MATRIX, ORIG_min, ORIG_max
def _guess_filestem(file_or_job):
# Examples:
# file_or_job stem
# test test
# GSE5451.l2.mas5.gtr GSE5451.l2.mas5
# GSE1456.mas5.gz GSE1456
# GSE1456.mas5 GSE1456
# out.dat out
# out.pcl out
# out.txt out
# /home/jchang/out.txt /home/jchang/out
#
# Rule:
# - If the file doesn't exist, then use the whole thing as the
# stem.
# - If there's a .gz, then chop it off.
# - If there's an extension, then chop it off.
if not _exists_nz(file_or_job):
return file_or_job
stem = file_or_job
# Chop off the .gz at the end.
COMPRESSION_EXTS = [".gz", ".bz2", ".zip"]
s, e = os.path.splitext(stem)
for ext in COMPRESSION_EXTS:
if e.lower() == ext:
stem = s
break
# Chop off one more extension, if it exists.
stem, e = os.path.splitext(stem)
return stem
def find_data_files(file_or_stem):
# Return a dictionary of extension -> filename.
# Needs to take a stem, because it can be hard to find the file
# names based on the CDT file, because the stem is different.
# Example files:
# <stem>.gtc
# <stem>_K_A5.kag
# <stem>_K_G5.kgg
# <stem>_K_G5_A5.cdt Hard to cut into a stem.
fullstem = _guess_filestem(file_or_stem)
# Bug: should do a case insensitive search.
path, stem = os.path.split(fullstem)
if not path:
path = "."
EXTENSIONS = [
"nrm", "pcl", "cdt", "gtr", "atr", "gtc", "atc", "kgg", "kag"]
ext2file = {}
for file_ in os.listdir(path):
if not file_.startswith(stem):
continue
f, e = os.path.splitext(file_)
if e.startswith("."):
e = e[1:]
if e not in EXTENSIONS:
continue
# Do some checking to make sure file looks reasonable.
recognize_file = False
if f == stem:
recognize_file = True
elif f.startswith("%s_K_A" % stem):
recognize_file = True
elif f.startswith("%s_K_G" % stem):
recognize_file = True
if not recognize_file:
continue
ext2file[e] = os.path.join(path, file_)
return ext2file
def read_data_set(file_or_stem, default=None):
import arrayio
from genomicode import parselib
from genomicode import Matrix
from genomicode import clusterio
files = find_data_files(file_or_stem)
#print "FOUND", files; sys.exit(0)
filename = file_or_stem
if not os.path.exists(filename):
# If this file does not exist, then look for a CDT, NRM, or
# PCL file (in that order).
DATA_EXTS = ["cdt", "nrm", "pcl"]
DATA_EXTS = [x for x in DATA_EXTS if x in files]
assert DATA_EXTS, "I could not find the expression data file."
ext = DATA_EXTS[0]
filename = files[ext]
MATRIX = arrayio.read(filename)
# If no gene IDs were provided, then just make some up.
if not MATRIX.row_names():
header = "GENE.ID"
MATRIX._row_order.append(header)
x = ["R%s" % x for x in parselib.pretty_range(0, MATRIX.nrow())]
MATRIX._row_names[header] = x
synonyms = {}
synonyms[arrayio.ROW_ID] = header
#MATRIX = Matrix.add_synonyms(MATRIX, synonyms)
MATRIX._synonyms.update(synonyms)
if not MATRIX.col_names():
header = arrayio.tdf.SAMPLE_NAME
MATRIX._col_order.append(header)
x = ["C%s" % x for x in parselib.pretty_range(0, MATRIX.ncol())]
MATRIX._col_names[header] = x
synonyms = {}
synonyms[arrayio.COL_ID] = header
#MATRIX = Matrix.add_synonyms(MATRIX, synonyms)
MATRIX._synonyms.update(synonyms)
# Read the clustering files.
formats = [
("gtr", clusterio.read_gtr_file),
("atr", clusterio.read_atr_file),
("gtc", clusterio.read_gtc_file),
("atc", clusterio.read_atc_file),
("kgg", clusterio.read_kgg_file),
("kag", clusterio.read_kag_file),
]
data = {} # ext -> output
for ext, read_fn in formats:
if ext not in files:
continue
data[ext] = read_fn(files[ext])
if default is None:
default = ClusterData(None, None, None, None, None, None)
cluster_data = ClusterData(
data.get("gtr", default.gene_tree),
data.get("atr", default.array_tree),
data.get("gtc", default.gene_tree_cluster),
data.get("atc", default.array_tree_cluster),
data.get("kgg", default.gene_cluster),
data.get("kag", default.array_cluster),
)
return MATRIX, cluster_data
def write_data_set(MATRIX, SCALED, cluster_data, jobname):
from arrayio import tab_delimited_format
from genomicode import clusterio
matrix_file = "%s.cdt" % jobname
tab_delimited_format.write(MATRIX, open(matrix_file, 'w'))
scaled_file = "%s_s.cdt" % jobname
tab_delimited_format.write(SCALED, open(scaled_file, 'w'))
cd = cluster_data
formats = [
("gtr", clusterio.write_gtr_file, cd.gene_tree),
("atr", clusterio.write_atr_file, cd.array_tree),
("gtc", clusterio.write_gtc_file, cd.gene_tree_cluster),
("atc", clusterio.write_atc_file, cd.array_tree_cluster),
("kgg", clusterio.write_kgg_file, cd.gene_cluster),
("kag", clusterio.write_kag_file, cd.array_cluster),
]
for ext, write_fn, data in formats:
if not data:
continue
outfile = "%s.%s" % (jobname, ext)
write_fn(data, open(outfile, 'w'))
def plot(
filename, MATRIX, cluster_data, plotlib, layout, coords,
border_color, grid_color):
# Calculate the plot width and height.
plot_width = coords.hm_x + layout.heatmap.width()
plot_height = coords.hm_y + layout.heatmap.height()
if layout.colorbar:
x = coords.cb_x + layout.colorbar.width()
plot_width = max(plot_width, x)
x = coords.cb_y + layout.colorbar.height()
plot_height = max(plot_height, x)
# Plot each element of the figure.
image = plotlib.image(plot_width, plot_height)
if layout.gene_dendrogram:
plot_dendrogram(
plotlib, image, MATRIX, coords.gd_x, coords.gd_y,
layout.gene_dendrogram, "GENE", cluster_data.gene_tree)
if layout.array_dendrogram:
plot_dendrogram(
plotlib, image, MATRIX, coords.ad_x, coords.ad_y,
layout.array_dendrogram, "ARRAY", cluster_data.array_tree)
if layout.gene_cluster:
plot_gene_clusters(
plotlib, image, MATRIX, coords.gc_x, coords.gc_y,
layout.gene_cluster, cluster_data.gene_cluster,
border_color, grid_color)
if layout.array_cluster:
plot_array_clusters(
plotlib, image, MATRIX, coords.ac_x, coords.ac_y,
layout.array_cluster, cluster_data.array_cluster,
border_color, grid_color)
if layout.gene_label:
gene_labels = _get_gene_labels(MATRIX)
plot_gene_labels(
plotlib, image, MATRIX, coords.gl_x, coords.gl_y,
layout.gene_label, gene_labels)
if layout.array_label:
array_labels = _get_array_labels(MATRIX)
plot_array_labels(
plotlib, image, MATRIX, coords.al_x, coords.al_y,
layout.array_label, array_labels)
plot_matrix(
plotlib, image, MATRIX, coords.hm_x, coords.hm_y, layout.heatmap,
border_color, grid_color)
if layout.colorbar:
plot_colorbar(
plotlib, image, coords.cb_x, coords.cb_y, layout.colorbar)
plotlib.write(image, open(filename, 'w'))
def plot_matrix(
plotlib, image, MATRIX, xoff, yoff, layout, border_color, grid_color):
# (0, 0, 0) is too dark for small box sizes. 100 looks too washed
# out. 50-75 is about right.
#GRID_COLOR = (0, 0, 0)
#GRID_COLOR = (75, 75, 75)
#BORDER_COLOR = (0, 0, 0)
width, height = layout.size()
# Draw the underlying grid.
plotlib.rectangle(image, xoff, yoff, width, height, grid_color)
# Draw a border around the heatmap.
# Draw top, right, bottom, and left borders.
#plotlib.rectangle(image, xoff, yoff, width, height, None, border_color)
plotlib.rectangle(image, xoff, yoff, width, layout.BORDER, border_color)
plotlib.rectangle(
image, xoff+width-layout.BORDER, yoff, layout.BORDER, height,
border_color)
plotlib.rectangle(
image, xoff, yoff+height-layout.BORDER, width, layout.BORDER,
border_color)
plotlib.rectangle(image, xoff, yoff, layout.BORDER, height, border_color)
# Draw the actual matrix.
X = MATRIX._X
for i in range(MATRIX.nrow()):
for j in range(MATRIX.ncol()):
x = X[i][j]
c = layout.color(x)
# Find the coordinates and plot it.
x, y, width, height = layout.coord(i, j)
plotlib.rectangle(image, x+xoff, y+yoff, width, height, c)
def plot_colorbar(plotlib, image, xoff, yoff, layout):
#yoff += 100
#xoff += 100
BLACK = (0, 0, 0)
OUTLINE_COLOR = (0, 0, 0)
TICK_COLOR = (50, 50, 50)
# Draw the colorbar.
cb_width, cb_height = layout.bar_width(), layout.bar_height()
if layout.is_vertical():
for i in range(cb_height):
#color = layout.color(float(i)/cb_height) # big on bottom
color = layout.color(1.0-(float(i)/cb_height)) # big on top
plotlib.line(image, xoff, yoff+i, cb_width, 1, color)
else:
for i in range(cb_width):
color = layout.color(float(i)/cb_width)
plotlib.line(image, xoff+i, yoff, 1, cb_height, color)
plotlib.rectangle(
image, xoff, yoff, cb_width, cb_height, None, outline=OUTLINE_COLOR)
# Draw tickmarks.
for i in range(layout.num_ticks()):
x = layout.tick_coord(i)
x, y, width, height = x
plotlib.line(image, xoff+x, yoff+y, width, height, TICK_COLOR)
# Label the tickmarks.
fontsize = layout.fontsize()
if fontsize < MIN_FONTSIZE:
return
labels = [layout.tick_label(i) for i in range(layout.num_ticks())]
label_sizes = [layout.label_size(i) for i in range(layout.num_ticks())]
max_width = max([x[0] for x in label_sizes])
#max_height = max([x[1] for x in label_sizes])
for i, label in enumerate(labels):
x, y = layout.label_coord(i)
# Right align the vertical colorbar.
if cb_height > cb_width:
width, height = label_sizes[i]
x += max_width - width
plotlib.text(image, xoff+x, yoff+y, label, fontsize, BLACK)
def plot_dendrogram(plotlib, image, MATRIX, xoff, yoff, layout, dim, tree):
import arrayio
from genomicode import clusterio
if dim == "GENE":
n = "GID" # Use the gene ID if available.
assert n in MATRIX.row_names(), "Gene dendrogram not available."
ids = MATRIX.row_names(n)
elif dim == "ARRAY":
n = "AID"
assert n in MATRIX.col_names(), "Array dendrogram not available."
ids = MATRIX.col_names(n)
else:
raise AssertionError, "Unknown dim: %s" % dim
# num is the row or column of the node.
id2num = {} # gene or node id -> num
id2distance = {} # gene or node id -> distance
# Find id2num and id2distance for each of the leaves.
for i, x in enumerate(ids):
id_ = clusterio.parse_node(x)
id2num[id_] = i
id2distance[id_] = 1
#print tree
# Set id2num and id2distance the internal nodes.
for i, node in enumerate(tree):
id_ = -(i+1)
left, right, distance = node
left_num = id2num[left]
right_num = id2num[right]
id2num[id_] = (left_num + right_num)/2.0
id2distance[id_] = distance
#print id2num
# Draw the nodes of the tree.
for i, node in enumerate(tree):
node_id = -(i+1)
left_id, right_id, node_dist = node
node_num = id2num[node_id]
left_num = id2num[left_id]
right_num = id2num[right_id]
left_dist = id2distance[left_id]
right_dist = id2distance[right_id]
# Two left lines, then two right lines.
lines = layout.lines(
node_num, node_dist, left_num, left_dist, right_num, right_dist)
ll1, ll2, lr1, lr2 = lines
#print node
#print lines
# Line 1 is the top joining line, and line 2 is the line that
# leads to the node.
cl1 = cl2 = layout.color(left_id)
cr1 = cr2 = layout.color(right_id)
# Color the node and everything beneath it. Don't color the
# lines on top of the node. The exception is if there's only
# a single leaf in the cluster, then color the bottom-most
if cl1 != cr1:
cl1 = cr1 = (0, 0, 0)
if left_id < 0:
cl2 = (0, 0, 0)
if right_id < 0:
cr2 = (0, 0, 0)
data = [(ll1, cl1), (ll2, cl2), (lr1, cr1), (lr2, cr2)]
#data = [(ll1, cl1), (lr1, cr1)]
#data = [(ll2, cl2), (lr2, cr2)]
for line, color in data:
x, y, width, height = line
#print x, y, width, height
assert width >= 0 and height >= 0, "%d %d" % (width, height)
plotlib.rectangle(image, x+xoff, y+yoff, width, height, color)
c = layout.color(node_id)
x, y, width, height = layout.root(node_num, node_dist)
plotlib.rectangle(image, x+xoff, y+yoff, width, height, c)
def plot_gene_clusters(
plotlib, image, X, xoff, yoff, layout, clusters, border_color, grid_color):
import arrayio
from genomicode import colorlib
assert X.nrow() == len(clusters), "%d %d" % (X.nrow(), len(clusters))
#GRID_COLOR = (75, 75, 75)
#BORDER_COLOR = (0, 0, 0)
# Figure out what kind of IDs to use.
ID_NAMES = ["GID", "NAME", arrayio.ROW_ID]
ID_NAMES = [x for x in ID_NAMES if x in X.row_names() or x in X._synonyms]
ids = [x[0] for x in clusters]
for ID_NAME in ID_NAMES:
ID = X.row_names(ID_NAME)
num_found = 0
for id_ in ids:
if id_ in ID:
num_found += 1
#print ID_NAME, num_found, len(ids), ids[:3]
if num_found == len(ids):
break
else:
raise AssertionError, "I could not find the cluster IDs: %s" % \
str(X.row_names())
GID = X.row_names(ID_NAME)
gid2i = {}
for i, gid in enumerate(GID):
gid2i[gid] = i
# Draw the underlying grid, and a border around the whole thing.
width, height = layout.size()
plotlib.rectangle(image, xoff, yoff, width, height, grid_color)
plotlib.rectangle(image, xoff, yoff, width, height, None, border_color)
max_cluster = max([x[1] for x in clusters])
for gid, n in clusters:
i = gid2i[gid]
x, y, width, height = layout.coord(i)
c = 255, 255, 255
if n is not None:
p = 0.5
if max_cluster > 0:
p = float(n) / max_cluster
# Bug: This should be set in the layout.
c = _get_color(p, colorlib.matlab_colors)
plotlib.rectangle(image, x+xoff, y+yoff, width, height, c)
def plot_array_clusters(
plotlib, image, X, xoff, yoff, layout, clusters, border_color, grid_color):
import arrayio
from genomicode import colorlib
assert X.ncol() == len(clusters)
#GRID_COLOR = (75, 75, 75)
#BORDER_COLOR = (0, 0, 0)
# Figure out what kind of IDs to use.
ID_NAMES = [
"AID", arrayio.COL_ID, arrayio.tab_delimited_format.SAMPLE_NAME]
ID_NAMES = [x for x in ID_NAMES if x in X.col_names() or x in X._synonyms]
#ID_NAMES = [x for x in ID_NAMES if x in X.col_names()]
ids = [x[0] for x in clusters]
for ID_NAME in ID_NAMES:
ID = X.col_names(ID_NAME)
num_found = 0
for id_ in ids:
if id_ in ID:
num_found += 1
if num_found == len(ids):
break
else:
raise AssertionError, "I could not find the array IDs."
AID = X.col_names(ID_NAME)
aid2i = {}
for i, aid in enumerate(AID):
aid2i[aid] = i
# Draw the underlying grid, and a border around the whole thing.
width, height = layout.size()
plotlib.rectangle(image, xoff, yoff, width, height, grid_color)
plotlib.rectangle(image, xoff, yoff, width, height, None, border_color)
max_cluster = max([x[1] for x in clusters])
for aid, n in clusters:
i = aid2i[aid]
x, y, width, height = layout.coord(i)
c = 255, 255, 255
if n is not None:
p = 0.5
if max_cluster > 0:
p = float(n) / max_cluster
# Bug: This should be set in the layout.
c = _get_color(p, colorlib.matlab_colors)
plotlib.rectangle(image, x+xoff, y+yoff, width, height, c)
def plot_gene_labels(plotlib, image, X, xoff, yoff, layout, labels):
fontsize = layout.fontsize()
if fontsize < MIN_FONTSIZE:
return
#print layout.__class__.__name__
for i in range(X.nrow()):
x, y, width, height = layout.coord(i)
#print x, y, width, height, fontsize
w, h = plotlib.get_text_size(labels[i], fontsize)
# Right-align the text. Need layout width, not width of item.
x += max(layout.width()-w, 0)
# Vertical align the text.
y += (height - h)/2
plotlib.text(image, xoff+x, yoff+y, labels[i], fontsize, (0, 0, 0))
def plot_array_labels(plotlib, image, X, xoff, yoff, layout, labels):
fontsize = layout.fontsize()
if fontsize < MIN_FONTSIZE:
return
for i in range(X.ncol()):
x, y, width, height = layout.coord(i)
w, h = plotlib.get_text_size(labels[i], fontsize)
#print "HERE1", xoff, yoff, x, y, width, height, fontsize
# Center the text.
x += (width-h)/2
plotlib.text90(image, xoff+x, yoff+y, labels[i], fontsize, (0, 0, 0))
def _cluster(MATRIX, *args, **params):
import tempfile
import subprocess
import arrayio
from genomicode import filelib
from genomicode import config
cluster = params.get("cluster") or config.cluster or "cluster"
path = "."
x, filestem = tempfile.mkstemp(dir=path); os.close(x)
filelib.safe_unlink(filestem)
# Write the data set in PCL format.
# This implementation requires a matrix in PCL format.
MATRIX = convert_to_pcl(MATRIX)
pcl_file = filestem + ".pcl"
arrayio.pcl_format.write(MATRIX, open(pcl_file, 'w'))
args = list(args)
args.append('-f "%s"' % pcl_file)
cmd = "%s %s" % (cluster, " ".join(args))
#print cmd
#w, r = os.popen4(cmd)
p = subprocess.Popen(
cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, close_fds=True)
w, r = p.stdin, p.stdout
w.close()
output = r.read()
#print output
if output.find("cluster: command not found") >= 0:
raise AssertionError, "cluster: command not found"
elif output.find("cluster: No such file or directory") >= 0:
raise AssertionError, output.strip()
elif output.find("command not found") >= 0:
raise AssertionError, "%s: command not found" % cluster
elif output.find("Error reading file") >= 0:
raise AssertionError, "%s\n%s" % (cmd, output)
elif output.find("cluster <options> graphfile") >= 0:
raise AssertionError, "ran Graphviz cluster, not Cluster 3.0"
return filestem
def _cleanup_cluster(filestem):
# Just remove all the files with the filestem.
from genomicode import filelib
path, filestem = os.path.split(filestem)
for file_ in os.listdir(path):
if not file_.startswith(filestem):
continue
filename = os.path.join(path, file_)
filelib.safe_unlink(filename)
def _get_gene_ids(MATRIX):
names = MATRIX.row_names()
KNOWN_NAMES = ["GID", "NAME", "GWEIGHT"]
UNKNOWN = [x for x in names if x not in KNOWN_NAMES]
# If only one column other than KNOWN_NAMES, then it must be the
# one supplied by the user. Use that as the ID.
if len(UNKNOWN) == 1:
return MATRIX.row_names(UNKNOWN[0])
# If multiple columns other than KNOWN_NAMES, then look for
# possible ones. If none are available, then just arbitrarily
# choose the first one, alphabetically.
if len(UNKNOWN) > 1:
POSSIBLE_IDS = ["Probe.Set.ID", "LocusLink", "Gene.Symbol"]
for n in POSSIBLE_IDS:
if n in names:
return MATRIX.row_names(n)
x = sorted(UNKNOWN)
return MATRIX.row_names(x[0])
# If no columns other than KNOWN_NAMES, then select the ones from
# KNOWN_NAMES.
for n in KNOWN_NAMES:
if n in names:
return MATRIX.row_names(n)
raise AssertionError, "I could not find any possible gene IDs."
def _get_array_ids(MATRIX):
import arrayio
return MATRIX.col_names(arrayio.COL_ID)
def _get_gene_labels(MATRIX):
name = _choose_gene_label(MATRIX)
labels = MATRIX.row_names(name)[:]
# Gene Symbols can contain "///". If this exists, then truncate
# it for readability.
for i in range(len(labels)):
j = labels[i].find("///")
if j >= 0:
labels[i] = labels[i][:j].strip()
return labels
def _get_array_labels(MATRIX):
import arrayio
labels = MATRIX.col_names(arrayio.COL_ID)[:]
# Array labels might be:
# 122.CEL.gz
# Cut off the .gz and .CEL for readability.
for i in range(len(labels)):
x = labels[i]
x = x.replace(".gz", "")
x = x.replace(".CEL", "")
labels[i] = x
return labels
def _calc_colorbar_size(hm_width, hm_height, grid_size, box_width, box_height):
# Calculate the dimensions of the colorbar. The size of the
# bar should be calculated based on the size of the heatmap,
# and also the size of the boxes in the heatmap.
#
# MAX_BOXES of 100 is too big for signature heatmap from pybinreg.
BAR_LONG = 0.50 # the long dimension, relative to heatmap
BAR_SHORT = 0.075 # short dimension, relative to long_ratio
MAX_BOXES = 50 # Maximum boxes in the long dimension.
MIN_BOXES = 1 # Minimum boxes in the short dimension.
vertical = hm_height > hm_width
if vertical:
# x1 is the upper limit. Do not make bigger than this.
x1 = hm_height * BAR_LONG
# These are both lower and upper limits. Make the bigger one
# of these.
x2 = (box_height+grid_size) * MAX_BOXES
x3 = ((box_width+grid_size)*MIN_BOXES)/BAR_SHORT
x4 = max(x2, x3)
height = max(min(x1, x4), 1)
width = max(height * BAR_SHORT, 1)
else:
x1 = hm_width * BAR_LONG
x2 = (box_width+grid_size) * MAX_BOXES
x3 = ((box_height+grid_size)*MIN_BOXES)/BAR_SHORT
x4 = max(x2, x3)
width = max(min(x1, x4), 1)
height = max(width * BAR_SHORT, 1)
width, height = int(width), int(height)
return width, height
def _calc_colorbar_ticks(
cb_width, cb_height, signal_0, signal_1, plotlib):
import math
from genomicode import graphlib
TEXT_SIZE = 0.75
MAX_TICKS = 20
vertical = cb_height > cb_width
# Calculate the minimum and maximum number to label.
assert not math.isnan(signal_0) and not math.isnan(signal_1)
assert signal_0 < signal_1, "%g %g" % (signal_0, signal_1)
delta = signal_1 - signal_0
num_decimals = max(-int(math.floor(math.log(delta, 10))), 0)+1
# Where to start labels. Give a larger range than might fit
# and shrink it later based on where the tick marks are.
label_min = math.floor(signal_0 * 10**num_decimals)/10**num_decimals
label_max = math.ceil(signal_1 * 10**num_decimals)/10**num_decimals
#print signal_0, signal_1, label_min, label_max, num_decimals
assert label_min < label_max
# Calculate the size of the font for the labels.
text_height = int(min(cb_width, cb_height) * TEXT_SIZE)
fontsize = plotlib.fit_fontsize_to_height(text_height)
# Try different number of tick marks until the labels fit in the
# colorbar.
# Can't have more ticks than the size of the colorbar.
num_ticks = min(MAX_TICKS, max(cb_width, cb_height)/2)
while num_ticks > 0:
# Calculate the ticks and remove any that are off the scale.
ticks = graphlib.place_ticks(label_min, label_max, num_ticks=num_ticks)
ticks = [x for x in ticks if x >= signal_0 and x <= signal_1]
assert ticks, "I couldn't place any tick marks."
# Format the tick labels.
x = [max(len(str(abs(x)%1))-2, 0) for x in ticks]
digits = max(x)
tick_labels = ["%.*f" % (digits, x) for x in ticks]
# Calculate the sizes of the tick labels.
label_sizes = [plotlib.get_text_size(x, fontsize) for x in tick_labels]
# See if this fits.
if vertical:
total = sum([x[1] for x in label_sizes])
else:
total = sum([x[0] for x in label_sizes])
if total < max(cb_width, cb_height)/2:
break
num_ticks = min(num_ticks, len(ticks))-1
assert num_ticks, "I couldn't place any tick marks."
return ticks, tick_labels, label_sizes, fontsize
_COLOR_CACHE = {} # (fn, num) -> list
def _get_color(perc, color_fn, num_colors=256, black0=False,
flip_colors=False):
# Convert a percentage into a (r, g, b) color.
# r, g, b are numbers from 0 to 255.
global _COLOR_CACHE
import math
assert perc >= 0.0 and perc <= 1.0
if black0 and perc < 1.0/num_colors:
return 0, 0, 0
if flip_colors:
perc = 1.0 - perc
x = color_fn, num_colors
if x not in _COLOR_CACHE:
_COLOR_CACHE[x] = color_fn(num_colors)
colors = _COLOR_CACHE[x]
i = min(int(math.floor(perc*num_colors)), num_colors-1)
r, g, b = colors[i]
r = min(int(math.floor(r*256)), 255)
g = min(int(math.floor(g*256)), 255)
b = min(int(math.floor(b*256)), 255)
#print perc, r, g, b
return r, g, b
def _exists_nz(filename):
import stat
if not os.path.exists(filename):
return None
if os.stat(filename)[stat.ST_SIZE] > 0:
return filename
return None
def main():
from optparse import OptionParser, OptionGroup
# Smallest boxes in which the labels can be clearly printed.
DEF_WIDTH = 20
DEF_HEIGHT = 20
usage = "usage: %prog [options] filename\n" + \
" Values should range from -1 to 1."
parser = OptionParser(usage=usage, version="%prog 02")
parser.add_option(
"--cluster", dest="autoclust", default=False, action="store_true",
help="Will automatically use the options: --gc mean --gn var "
"-g -a --gl --al")
parser.add_option(
"", "--libpath", dest="libpath", action="append", default=[],
help="Add to the Python library search path.")
parser.add_option(
"", "--cluster_app", dest="cluster", default=None,
help="Path to cluster program.")
group = OptionGroup(
parser, "Input/Output",
"If not specified, I will search the current directory for "
"reasonable defaults.")
group.add_option(
"-i", "--gene_indexes", dest="gene_indexes", type="string",
default=None,
help="Indexes of genes to show, e.g. 1-50,75 (1-based, inclusive).")
group.add_option(
"-n", "--gene_names", dest="gene_names", type="string",
action="append", default=[],
help="Comma-separated list of genes to show.")
group.add_option(
"", "--gene_file", dest="gene_file", type="string", default=None,
help="<file>[,<1-based column num>] containing the names of genes.")
group.add_option(
"", "--array_indexes", dest="array_indexes", type="string",
default=None,
help="Indexes of arrays to show, e.g. 1-50,75 (1-based, inclusive).")
group.add_option(
"", "--array_file", dest="array_file", type="string", default=None,
help="<file>[,<1-based column num>] containing the names of arrays.")
group.add_option(
"-j", "--jobname", dest="jobname", type="string", default=None,
help="Save the processed matrix to a file.")
group.add_option(
"-o", "--outfile", dest="outfile", type="string", default=None,
help="Save the image to this file.")
group.add_option(
"--format", dest="image_format", type="choice",
choices=["png", "svg"], default="png",
help="Image format: png (default) or svg.")
parser.add_option_group(group)
group = OptionGroup(parser, "Normalization")
group.add_option(
"-l", "--log_transform", dest="log_transform", default=False,
action="store_true",
help="Log transform the data first.")
group.add_option(
"--select_genes_var", dest="select_genes_var", type="int",
default=None,
help="Select this number of genes based on variance.")
group.add_option(
"--gc", "--gene_center", dest="gene_center", type="choice",
choices=["mean", "median"], default=None,
help="Center each gene by: mean, median.")
group.add_option(
"--gn", "--gene_normalize", dest="gene_normalize", default=None,
choices=["ss", "var"],
help="Normalize each gene by: ss (sum of squares), var (variance).")
group.add_option(
"--ac", "--array_center", dest="array_center", type="choice",
choices=["mean", "median"], default=None,
help="Center each array by: mean, median.")
group.add_option(
"--an", "--array_normalize", dest="array_normalize", default=None,
choices=["ss", "var"],
help="Normalize each array by: ss (sum of squares), var (variance).")
parser.add_option_group(group)
group = OptionGroup(parser, "Clustering")
group.add_option(
"-g", "--cluster_genes", dest="cluster_genes", default=False,
action="store_true", help="Cluster the genes.")
group.add_option(
"-a", "--cluster_arrays", dest="cluster_arrays", default=False,
action="store_true", help="Cluster the arrays.")
group.add_option(
"--algorithm", dest="cluster_alg", type="choice",
choices=["hierarchical", "kmeans"], default="hierarchical",
help="Choose a clustering algorithm: hierarchical (default), "
"kmeans.")
group.add_option(
"--distance", dest="distance", type="choice",
choices=["uncent-cor", "pearson", "abs-uncent-cor", "abs-pearson",
"spearman", "kendall", "euclidean", "city-block"],
default="uncent-cor",
help="Choose a distance metric: uncent-cor (default), pearson, "
"abs-uncent-cor, abs-pearson, spearman, kendall, euclidean, "
"or city-block.")
group.add_option(
"--method", dest="method", type="choice",
choices=["complete", "single", "centroid", "average"],
default="complete",
help="Choose clustering method: complete (default), single, "
"centroid, or average linkage.")
group.add_option(
"--gk", "--gene_k", dest="gene_k", type="int", default=None,
help="For hierarchical clustering, cut genes into K clusters "
"(default 0).")
group.add_option(
"--ak", "--array_k", dest="array_k", type="int", default=None,
help="For hierarchical clustering, cut arrays into K clusters "
"(default 0).")
group.add_option(
"-k", "--kmeans_k", dest="kmeans_k", type="int", default=5,
help="For K-means clustering, choose K (default 5).")
parser.add_option_group(group)
group = OptionGroup(parser, "Dendrogram")
group.add_option(
"--gl", "--label_genes", dest="label_genes", action="store_true",
default=False, help="Label the genes on the plot.")
group.add_option(
"--al", "--label_arrays", dest="label_arrays", action="store_true",
default=False, help="Label the arrays on the plot.")
group.add_option(
"--no_dendrogram", dest="no_dendrogram", default=False,
action="store_true",
help="Don't draw the dendrograms.")
group.add_option(
"--gene_tree_scale", dest="gene_tree_scale", type="float", default=1.0,
help="Scale the width of the gene tree by this factor. "
"Set to 0 to disable dendrogram.")
group.add_option(
"--array_tree_scale", dest="array_tree_scale", type="float",
default=1.0,
help="Scale the height of the array tree by this factor. "
"Set to 0 to disable dendrogram.")
group.add_option(
"--gene_tree_thickness", dest="gene_tree_thickness", type="float",
default=1.0,
help="Scale the thickness of the lines in the gene tree by this "
"factor.")
group.add_option(
"--array_tree_thickness", dest="array_tree_thickness", type="float",
default=1.0,
help="Scale the thickness of the lines in the array tree by this "
"factor.")
parser.add_option_group(group)
group = OptionGroup(parser, "Border and Grid")
group.add_option(
"--scale_border", default=1.0, type="float",
help="Scale the border thicker or thinner.")
group.add_option(
"--border_color",
help="Specify the color of the border. "
"Format: <R>,<G>,<B> (e.g. 128,128,128)")
group.add_option(
"--grid", action="store_true", default=False,
help="Add a grid around the boxes in the heatmap.")
group.add_option(
"--grid_color",
help="Specify the color of the grid. "
"Format: <R>,<G>,<B> (e.g. 128,128,128)")
parser.add_option_group(group)
group = OptionGroup(parser, "Graphics")
group.add_option(
"-x", "--width", dest="width", type="int", default=DEF_WIDTH,
help="Width of boxes (default=%d)." % DEF_WIDTH)
group.add_option(
"-y", "--height", dest="height", type="int", default=DEF_HEIGHT,
help="Height of boxes (default=%d)." % DEF_HEIGHT)
group.add_option(
"-s", "--scale", dest="scale", default=0, type="float",
help="Add this to each expression value before plotting (default 0)."
" Scale applied, then gain applied.")
group.add_option(
"-m", "--gain", dest="gain", default=1, type="float",
help="Multiply each expression value by this value before plotting "
"(default 1).")
group.add_option(
"", "--no_autoscale", dest="autoscale", action="store_false",
default=True, help="Disable autoscaling.")
group.add_option(
"--color", dest="color_scheme", type="choice", default="bild",
choices=["red", "white", "red-green", "blue-yellow", "matlab", "bild",
"genespring", "yahoo"],
help="Choose the color scheme to use: red, white, red-green, "
"blue-yellow, matlab, bild (default), genespring, or yahoo.")
group.add_option(
"--black0", dest="black0", action="store_true", default=False,
help="Color 0 values black (no matter the color scheme).")
group.add_option(
"--inverse", dest="inverse", action="store_true", default=False,
help="Flip the colors for the heatmap.")
group.add_option(
"--colorbar", dest="colorbar", default=False, action="store_true",
help="Add a colorbar to the plot.")
parser.add_option_group(group)
# Parse the input arguments.
options, args = parser.parse_args()
if not args:
parser.error("Please specify an infile.")
if len(args) > 1:
parser.error(
"Please specify a single infile. Found %d: %s." %
(len(args), ", ".join(args)))
infile, = args
if not os.path.exists(infile):
parser.error("I could not find file %s." % infile)
if options.libpath:
sys.path = options.libpath + sys.path
if options.autoclust:
options.gene_center = "mean"
options.gene_normalize = "var"
options.cluster_genes = True
options.cluster_arrays = True
options.label_genes = True
options.label_arrays = True
if options.no_dendrogram:
options.gene_tree_scale = 0
options.array_tree_scale = 0
if not options.jobname:
x = _guess_filestem(infile)
x = os.path.split(x)[1] # Save results in local directory.
options.jobname = x
# Check the options.
# Not completely implemented yet.
assert options.scale_border > 0 and options.scale_border < 5.0
border_color = 0, 0, 0
# (0, 0, 0) is too dark for small box sizes. 100 looks too washed
# out. 50-75 is about right.
grid_color = 75, 75, 75
if options.border_color:
border_color = _parse_color(options.border_color)
if options.grid_color:
grid_color = _parse_color(options.grid_color)
outfile = options.outfile
# Choose a plotting library.
if options.image_format == "svg":
plotlib = __import__(
"genomicode.svgplot", globals(), locals(), ["svgplot"])
if not outfile:
outfile = "%s.svg" % options.jobname
else:
plotlib = __import__(
"genomicode.pilplot", globals(), locals(), ["pilplot"])
if not outfile:
outfile = "%s.png" % options.jobname
MATRIX, cluster_data = read_data_set(infile)
# Do the normalization, clustering, etc. before plotting the
# results.
x = process_data_set(
MATRIX, options.cluster, cluster_data, options.jobname,
options.gene_indexes, options.gene_names, options.gene_file,
options.select_genes_var,
options.array_indexes, options.array_file,
options.log_transform, options.gene_center, options.gene_normalize,
options.array_center, options.array_normalize,
options.cluster_genes, options.cluster_arrays,
options.cluster_alg, options.distance, options.method,
options.gene_k, options.array_k, options.kmeans_k,
options.scale, options.gain, options.autoscale)
MATRIX, cluster_data, signal_0, signal_1 = x
layout = make_layout(
MATRIX, cluster_data, signal_0, signal_1, plotlib,
options.width, options.height, options.scale_border, options.grid,
options.color_scheme, options.inverse, options.black0,
options.colorbar,
options.cluster_genes, options.gene_tree_scale,
options.gene_tree_thickness,
options.cluster_arrays, options.array_tree_scale,
options.array_tree_thickness,
options.cluster_alg,
options.label_genes, options.label_arrays)
megapixels = layout.heatmap.width() * layout.heatmap.height() / 1024 / 1024
assert megapixels <= MAX_MEGAPIXELS, "%dx%d plot too big [%d:%d]." % (
layout.heatmap.width(), layout.heatmap.height(),
options.width, options.height)
coords = calc_coords_for_layout(layout)
plot(
outfile, MATRIX, cluster_data, plotlib, layout, coords,
border_color, grid_color)
if __name__ == '__main__':
#import sys
#print "Running python: %s" % sys.executable
main()
#import profile; profile.run("main()")
```
#### File: changlab/scripts/bfrmproject.py
```python
import os, sys
# make_file_layout
# init_paths
#
# write_dataset
# write_model
# write_bfrm_dataset
# write_sample_probe_ids
# write_bfrm_files
# check_model
#
# run_bfrm_project
# log_matrix
#
# summarize_factor_scores
def make_file_layout(outpath):
from genomicode import filelayout
outpath = outpath or "."
if not os.path.exists(outpath):
os.mkdir(outpath)
outpath = os.path.realpath(outpath)
Path, File = filelayout.Path, filelayout.File
# Have only set set of these files for the whole analysis.
FILES = [
Path.ATTIC("attic"),
File.DATASET("dataset.gct"),
File.FACTOR_SCORES("factors.pcl"),
File.FACTOR_CDT("factors.cdt"), # created by cluster
File.FACTOR_ATR("factors.atr"), # created by cluster
File.FACTOR_GTR("factors.gtr"), # created by cluster
File.FACTOR_SCORES_PNG("factors.png"),
Path.BFRM("bfrm",
File.BFRM_DATASET("dataset.txt"),
File.BFRM_SPROBE_IDS("probeidsSmp.txt"),
File.BFRM_MA("mA.txt"),
File.BFRM_MPOSTPIB("mPostPib.txt"),
File.BFRM_MPSI("mPsi.txt"),
File.BFRM_MVARIABLESIN("mVariablesIn.txt"), # optional
File.BFRM_PROBE_IDS("probeids.txt"),
File.BFRM_AF("af.txt"), # sample x factor
File.BFRM_Y("Y.txt"), # gene x sample
),
File.BFRM_MODEL("bfrm_model.zip"),
]
file_layout = Path.OUTPATH(outpath, *FILES)
return file_layout
def init_paths(file_layout):
from genomicode import filelayout
for x in filelayout.walk(file_layout):
dirpath, dirnames, filenames = x
if os.path.exists(dirpath):
continue
os.mkdir(dirpath)
def write_dataset(filename, MATRIX):
import arrayio
arrayio.gct_format.write(MATRIX, open(filename, 'w'))
def write_model(filename, file_layout):
check_model(filename)
x = open(filename).read()
open(file_layout.BFRM_MODEL, 'w').write(x)
def write_bfrm_dataset(filename, DATA):
data = DATA.value()
handle = open(filename, 'w')
for x in data:
print >>handle, "\t".join(map(str, x))
handle.close()
def write_sample_probe_ids(filename, DATA):
from genomicode import bfrm
name = bfrm.get_affy_row_name(DATA)
probeset_ids = DATA.row_names(name)
x = ["%s\n" % x for x in probeset_ids]
open(filename, 'w').writelines(x)
def write_bfrm_files(path, model_file):
import zipfile
from genomicode import archive
opj = os.path.join
zfile = zipfile.ZipFile(model_file)
s2f = archive.unzip_dict(model_file)
# list of filename, required
files = [
("mA.txt", 1),
("mPostPib.txt", 1),
("mPsi.txt", 1),
("mVariablesIn.txt", 0),
("probeids.txt", 1),
]
for name, required in files:
if name not in s2f and not required:
continue
assert name in s2f, "I could not find '%s' in the model." % name
x = zfile.open(s2f[name]).read()
open(opj(path, name), 'w').write(x)
def check_model(filename):
from genomicode import archive
s2f = archive.unzip_dict(filename)
assert "mA.txt" in s2f
assert "mPostPib.txt" in s2f
assert "mPsi.txt" in s2f
assert "probeids.txt" in s2f
def run_bfrm_project(file_layout, bfrm_path, matlab_bin):
import arrayio
from genomicode import bfrm
from genomicode import matlab
param_file = "parameters.txt"
model = bfrm.read_clean_model(
file_layout.BFRM_MODEL, param_file=param_file)
num_factors = len(model["FACTOR_O"])
assert num_factors, "No latent factors in the BFRM model."
x = "Projecting %d latent factors onto data set." % num_factors
if num_factors == 1:
x = x.replace("factors", "factor")
print x
DATA = arrayio.read(file_layout.DATASET)
bfrm_path = bfrm.find_bfrm_project(bfrm_path)
assert bfrm_path is not None, "I could not find BFRM_project."
bfrm_path = os.path.realpath(bfrm_path)
# Write out the dataset and probe IDs.
write_bfrm_dataset(file_layout.BFRM_DATASET, DATA)
write_sample_probe_ids(file_layout.BFRM_SPROBE_IDS, DATA)
# Write the BFRM model files.
write_bfrm_files(file_layout.BFRM, file_layout.BFRM_MODEL)
# Make sure some of the probes are the same.
pid = [x.strip() for x in open(file_layout.BFRM_PROBE_IDS)]
pid = [pid[i] for i in model["VariablesIn"]]
spid = [x.strip() for x in open(file_layout.BFRM_SPROBE_IDS)]
pid = [x.lower() for x in pid]
spid = [x.lower() for x in spid]
intersect = [x for x in pid if x in spid]
assert intersect, "No common probes between model and data set."
if len(intersect) < len(pid):
x = "Warning: model contains %d probe IDs, but only matched " + \
"%d in data set."
print x % (len(pid), len(intersect))
# Run the matlab script.
lines = []
w = lines.append
w("addpath '%s';\n" % bfrm_path)
w("addpath '%s/bfrm';\n" % bfrm_path)
w("y = load('%s');\n" % file_layout.BFRM_DATASET)
w("probeidsSmp = readWordlist('%s');\n" % file_layout.BFRM_SPROBE_IDS)
w("[af Y sampleids] = getFacScores('%s/', y, probeidsSmp);" %
file_layout.BFRM)
w("save('%s', 'af', '-ASCII', '-TABS');\n" % file_layout.BFRM_AF)
w("save('%s', 'Y', '-ASCII', '-TABS');\n" % file_layout.BFRM_Y)
script = "".join(lines)
x = matlab.run(
script, matlab_bin=matlab_bin, working_path=file_layout.OUTPATH)
print x
sys.stdout.flush()
def log_matrix(MATRIX):
# Log the matrix if necessary. Will log in place. Return a
# boolean indicating whether anything was logged.
from genomicode import jmath
from genomicode import binreg
if binreg.is_logged_array_data(MATRIX):
return False
print "I will log the matrix."
MATRIX._X = jmath.log(MATRIX._X, base=2, safe=1)
return True
def summarize_factor_scores(file_layout, python, arrayplot, cluster, libpath):
import zipfile
import arrayio
from genomicode import Matrix
from genomicode import jmath
from genomicode import archive
from genomicode import graphlib
from genomicode import bfrm
DATA = arrayio.read(file_layout.DATASET)
param_file = "parameters.txt"
model = bfrm.read_clean_model(
file_layout.BFRM_MODEL, param_file=param_file)
num_factors = model["F"].nrow()
# Load the factor names.
assert zipfile.is_zipfile(file_layout.BFRM_MODEL)
s2f = archive.unzip_dict(file_layout.BFRM_MODEL)
assert "factorids.txt" in s2f, "Missing: factorids.txt"
zfile = zipfile.ZipFile(file_layout.BFRM_MODEL)
factor_names = [x.strip() for x in zfile.open(s2f["factorids.txt"])]
assert len(factor_names) == num_factors
# sample x factor matrix
F = arrayio.read(file_layout.BFRM_AF)
assert F.nrow() == DATA.ncol()
F_X = jmath.transpose(F._X)
# F_X contains all factors, including intercept and design.
# Remove all but the latent factors.
F_X = F_X[-num_factors:]
# Sort the factors so they'll be in the same order as the clean
# model.
assert len(F_X) == len(model["FACTOR_O"])
F_X = [F_X[i] for i in model["FACTOR_O"]]
factor_names = [factor_names[i] for i in model["FACTOR_O"]]
# Write out the projected factor scores.
SAMPLE_NAME = arrayio.tdf.SAMPLE_NAME
row_names = {}
col_names = {}
row_names["xID"] = factor_names
col_names[SAMPLE_NAME] = DATA.col_names(SAMPLE_NAME)
M = Matrix.InMemoryMatrix(F_X, row_names, col_names)
arrayio.pcl_format.write(M, file_layout.FACTOR_SCORES)
# Make the heatmap.
x = graphlib.find_wide_heatmap_size(
M.nrow(), M.ncol(), min_box_height=10, min_box_width=10,
max_total_height=768, max_total_width=1024)
xpix, ypix = x
ypix = min(ypix, xpix*4)
x = graphlib.plot_heatmap(
file_layout.FACTOR_SCORES, file_layout.FACTOR_SCORES_PNG,
xpix, ypix,
color="bild", show_colorbar=True, show_grid=True,
gene_center="mean", gene_normalize="var",
gene_label=True, cluster_genes=True,
array_label=True, cluster_arrays=True,
python=python, arrayplot=arrayplot, cluster=cluster, libpath=libpath)
# Clean up the cluster files.
files = [
file_layout.FACTOR_CDT, file_layout.FACTOR_ATR, file_layout.FACTOR_GTR
]
for filename in files:
if not os.path.exists(filename):
continue
src = filename
x = os.path.split(filename)[1]
dst = os.path.join(file_layout.ATTIC, x)
os.rename(src, dst)
def main():
from optparse import OptionParser, OptionGroup
usage = "usage: %prog [options] <bfrm_model> <dataset>"
parser = OptionParser(usage=usage, version="%prog 01")
parser.add_option(
"", "--bfrm_path", dest="bfrm_path", default=None,
help="Specify the path to BFRM_project.")
parser.add_option(
"", "--matlab", dest="matlab", default="matlab",
help="Specify the command to run matlab.")
parser.add_option(
"", "--python", dest="python", default=None,
help="Specify the command to run python (optional).")
parser.add_option(
"", "--arrayplot", dest="arrayplot", default=None,
help="Specify the command to run arrayplot.")
parser.add_option(
"", "--cluster", dest="cluster", default=None,
help="Specify the command to run cluster.")
parser.add_option(
"", "--libpath", dest="libpath", action="append", default=[],
help="Add to the Python library search path.")
parser.add_option(
"-o", "--outpath", dest="outpath", type="string", default=None,
help="Save files in this path.")
parser.add_option(
"-z", "--archive", dest="archive", action="store_true", default=None,
help="Archive the raw output. Helpful for GenePattern.")
# Parse the arguments.
options, args = parser.parse_args()
if options.libpath:
sys.path = options.libpath + sys.path
# Import this after the library path is set.
import arrayio
from genomicode import archive
from genomicode import genepattern
genepattern.fix_environ_path()
if len(args) != 2:
parser.error("Please specify files.")
model_file, filename = args
assert os.path.exists(model_file), "File not found: %s" % model_file
assert os.path.exists(filename), "File not found: %s" % filename
# Set up the files.
file_layout = make_file_layout(options.outpath)
init_paths(file_layout)
# Read the matrix and convert to GCT format.
x = arrayio.read(filename)
MATRIX = arrayio.convert(x, to_format=arrayio.gct_format)
print "Read data set with %d genes and %d samples." % (
MATRIX.nrow(), MATRIX.ncol())
log_matrix(MATRIX)
# Write out the data sets.
write_dataset(file_layout.DATASET, MATRIX)
# Save the BFRM model.
write_model(model_file, file_layout)
# Run BFRM projection.
run_bfrm_project(
file_layout, options.bfrm_path, options.matlab)
# Generate output files.
summarize_factor_scores(
file_layout, options.python, options.arrayplot, options.cluster,
options.libpath)
if options.archive:
print "Archiving results."
archive.zip_path(file_layout.ATTIC, noclobber=False)
archive.zip_path(file_layout.BFRM, noclobber=False)
print "Done."
if __name__ == '__main__':
main()
```
#### File: changlab/scripts/convert_cel_cc1_to_v3.py
```python
import os
def main():
from optparse import OptionParser, OptionGroup
from genomicode import affyio
usage = "usage: %prog [options] <cel_file>"
parser = OptionParser(usage=usage, version="%prog 01")
parser.add_option(
"--no-clobber", dest="clobber", action="store_false", default=True)
options, args = parser.parse_args()
if len(args) != 1:
parser.error("Please specify an infile.")
filename, = args
assert os.path.exists(filename), "I could not find file: %s" % filename
version = affyio.guess_cel_version(filename)
assert version == "cc1", "File does not appear to be cc1: %s" % filename
outfile = "%s.v3" % filename
assert options.clobber or not os.path.exists(outfile), "noclobber"
affyio.convert_cel_cc1_to_3(filename, open(outfile, 'w'))
if __name__ == '__main__':
main()
```
#### File: changlab/scripts/dwdnorm.py
```python
def parse_indexes(MATRIX, indexes, indexes_include_headers):
from genomicode import parselib
max_index = MATRIX.ncol()
num_headers = len(MATRIX._row_names)
assert max_index, "empty matrix"
I = []
for s, e in parselib.parse_ranges(indexes):
if indexes_include_headers:
s, e = s-num_headers, e-num_headers
assert s >= 1, "Index out of range: %s" % s
assert e <= max_index, "Index out of range: %s" % e
s, e = s - 1, min(e, max_index)
I.extend(range(s, e))
return I
def main():
import os
import sys
import argparse
import arrayio
from genomicode import dwdnorm
usage = "dwdnorm.py [options] expression_file"
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument("expression_file", help="Matrix to normalize.")
parser.add_argument(
"--indexes1",
help="Which columns in batch 1, E.g. 1-5,8 (1-based, "
"inclusive). Columns leftover will be in the remaining batches.")
parser.add_argument(
"--indexes_include_headers", "--iih", action="store_true",
help="If not given (default), then column 1 is the first column "
"with data. If given, then column 1 is the very first column in "
"the file, including the headers.")
args = parser.parse_args()
assert os.path.exists(args.expression_file), \
"File not found: %s" % args.expression_file
assert args.indexes1
MATRIX = arrayio.read(args.expression_file)
# Figure out the batch for each sample.
assert MATRIX.ncol(), "empty matrix"
# Should be -1 or 1.
batches = [-1] * MATRIX.ncol()
I = parse_indexes(MATRIX, args.indexes1, args.indexes_include_headers)
for i in I:
batches[i] = 1
# Make sure there are two batches.
count = {}
for b in batches:
count[b] = count.get(b, 0) + 1
assert len(count) == 2
MATRIX_norm = dwdnorm.normalize(MATRIX, batches)
arrayio.tab_delimited_format.write(MATRIX_norm, sys.stdout)
if __name__=='__main__':
main()
```
#### File: changlab/scripts/gmt2matrix.py
```python
def main():
import os
import argparse
import math
from genomicode import genesetlib
parser = argparse.ArgumentParser()
parser.add_argument("gmt_file")
args = parser.parse_args()
assert os.path.exists(args.gmt_file)
genesets = []
for x in genesetlib.read_gmt(args.gmt_file):
genesets.append(x)
if not genesets:
return
# Figure out the number of columns needed for the file.
num_cols = None
for x in genesets:
name, desc, genes = x
nc = 1 + 1 + len(genes)
if num_cols is None or nc > num_cols:
num_cols = nc
num_digits = int(math.floor(math.log(num_cols, 10))) + 1
header = ["COL%0*d" % (num_digits, i+1) for i in range(num_cols)]
print "\t".join(header)
for x in genesets:
name, desc, genes = x
row = [name, desc] + genes
row = row + [""]*(num_cols-len(row))
print "\t".join(row)
if __name__ == '__main__':
main()
```
#### File: changlab/scripts/pcaplot.py
```python
import os, sys
def read_matrix(filename, num_header_cols=None):
import arrayio
return arrayio.read(filename, hcols=num_header_cols)
def _parse_cluster(options_cluster, indexes_include_headers, MATRIX):
# Return a vector of clusters, where each cluster is an integer
# from 0 to K-1. K is the total number of clusters. The length
# of the vector should be the same as the number of samples in the
# matrix.
from genomicode import parselib
index2cluster = {}
for clust_i, s in enumerate(options_cluster):
ranges = parselib.parse_ranges(s)
for s, e in ranges:
for i in range(s-1, e):
if indexes_include_headers:
i -= len(MATRIX._row_names)
assert i < MATRIX.ncol(), "Index %d out of range" % i
assert i not in index2cluster, \
"Index %d in multiple clusters" % i
index2cluster[i] = clust_i
#cluster = [len(options_cluster)] * MATRIX.ncol()
cluster = [None] * MATRIX.ncol()
for i, g in index2cluster.iteritems():
cluster[i] = g
return cluster
def _parse_cluster_file(cluster_file, MATRIX):
# Return a vector of clusters, where each cluster is an integer
# from 0 to K-1. K is the total number of clusters. The length
# of the vector should be the same as the number of samples in the
# matrix.
from genomicode import clusterio
id2cluster = {}
for (id, cluster) in clusterio.read_kgg_file(cluster_file):
id2cluster[id] = cluster
# Figure out which row header matches the IDs.
header2numids = {} # header -> number of IDs matched.
header = num_ids = None
for cn in MATRIX.col_names():
x = MATRIX.col_names(cn)
col_ids = {}.fromkeys(x)
x = [x for x in id2cluster if x in col_ids]
if header is None or len(x) > num_ids:
header, num_ids = cn, len(x)
index2cluster = {}
if header is not None:
for i, name in enumerate(MATRIX.col_names(header)):
cluster = id2cluster.get(name)
index2cluster[i] = cluster
cluster = [None] * MATRIX.ncol()
for i, g in index2cluster.iteritems():
cluster[i] = g
# Make sure the clusters are 0-based.
clean = [x for x in cluster if x is not None]
if clean and min(clean) > 0:
for i in range(len(cluster)):
if cluster[i] is None:
continue
cluster[i] = cluster[i] - min(clean)
return cluster
def main():
from optparse import OptionParser, OptionGroup
import numpy
import arrayio
from genomicode import jmath
from genomicode import pcalib
from genomicode import colorlib
from genomicode import prismlib
# Does a PCA on the columns.
usage = "usage: %prog [options] filename outfile.png"
parser = OptionParser(usage=usage, version="%prog 01")
#parser.add_option(
# "-l", "--log_transform", default=False,
# action="store_true",
# help="Log transform the data first.")
parser.add_option(
"--num_header_cols", type=int,
help="This number of columns are headers. If not given, will guess.")
parser.add_option(
"-g", "--genes", default=None, type="int",
help="Number of genes to use.")
parser.add_option(
"--prism_file",
help="Write the column principal components to a prism-formatted "
"file.")
parser.add_option(
"--row_pc_file",
help="Write the principal components of the rows to this file.")
parser.add_option(
"--col_pc_file",
help="Write the principal components of the cols to this file.")
#parser.add_option(
# "-v", "--verbose", default=False, action="store_true",
# help="")
group = OptionGroup(parser, "Clustering")
parser.add_option_group(group)
group.add_option(
"-c", "--cluster", default=[], action="append",
help="Group samples into a cluster (e.g. -c 1-5); 1-based.")
group.add_option(
"--indexes_include_headers", "--iih", action="store_true",
help="If not given (default), then index 1 is the first column "
"with data. If given, then index 1 is the very first column "
"in the file, including the headers.")
group.add_option(
"--cluster_file",
help="A KGG format file of the clusters for the samples. "
"Clusters in this file can be 0-based or 1-based.")
group = OptionGroup(parser, "Visualization")
parser.add_option_group(group)
group.add_option(
"--title", help="Put a title on the plot.")
group.add_option(
"--width", default=None, type="int",
help="Width (in pixels) of the plot.")
group.add_option(
"--label", default=False, action="store_true",
help="Label the samples.")
group.add_option(
"--label_axes", default=False, action="store_true",
help="Label the axes.")
group.add_option(
"--scale_label", type=float, default=1.0,
help="Scale the size of the labels.")
# Parse the input arguments.
options, args = parser.parse_args()
if len(args) < 2:
parser.error("Please specify an infile and an outfile.")
elif len(args) > 2:
parser.error("Too many input parameters (%d)." % len(args))
filename, outfile = args
if not os.path.exists(filename):
parser.error("I could not find file %s." % filename)
if options.num_header_cols is not None:
assert options.num_header_cols > 0 and options.num_header_cols < 100
if options.width is not None:
assert options.width > 10, "too small"
assert options.width < 4096*16, "width too big"
assert options.scale_label > 0.01 and options.scale_label < 100
options.log_transform = False
num_genes = options.genes
#K = 10 # number of dimensions
MATRIX = read_matrix(filename, options.num_header_cols)
if options.log_transform:
MATRIX._X = jmath.log(MATRIX._X, base=2, safe=1)
assert MATRIX.nrow() and MATRIX.ncol(), "Empty matrix."
cluster = None
if options.cluster and options.cluster_file:
parser.error("Cannot specify clusters and a cluster file.")
if options.cluster:
cluster = _parse_cluster(
options.cluster, options.indexes_include_headers, MATRIX)
if options.cluster_file:
if not os.path.exists(options.cluster_file):
parser.error(
"I could not find cluster file: %s" % options.cluster_file)
cluster = _parse_cluster_file(options.cluster_file, MATRIX)
# Select a subset of the genes.
if num_genes:
assert MATRIX.ncol() > 1, "Not enough samples to select genes."
I = pcalib.select_genes_var(MATRIX._X, num_genes)
MATRIX = MATRIX.matrix(I, None)
# Calculate the principal components and plot them.
K = min(MATRIX.nrow(), MATRIX.ncol())
principal_components, perc_var = pcalib.svd_project_cols(MATRIX._X, K)
X = [x[0] for x in principal_components]
Y = [x[1] for x in principal_components]
color = None
if cluster is not None:
color = pcalib.choose_colors(cluster)
LABEL = None
if options.label:
LABEL = MATRIX.col_names(arrayio.COL_ID)
assert not LABEL or len(LABEL) == len(X), "%d %d" % (len(X), len(LABEL))
height = width = None
if options.width is not None:
height, width = int(options.width*0.75), options.width
pcalib.plot_scatter(
X, Y, outfile, group=cluster, color=color, title=options.title,
label=LABEL, xlabel=options.label_axes, ylabel=options.label_axes,
scale_label=options.scale_label, height=height, width=width)
# Write out the scatter plot in Prism format.
if options.prism_file:
# Write out as prism format.
num_series = 1
if cluster:
num_series = max(cluster) + 1
names = ["CLUSTER-%d" % (i+1) for i in range(num_series)]
DATA = {}
rownames = {}
for i in range(num_series):
xy = []
n = []
for j in range(len(principal_components)):
if cluster and cluster[j] != i:
continue
x = principal_components[j][0]
y = principal_components[j][1]
xy.append([x, y])
n.append(MATRIX.col_names(arrayio.COL_ID)[j])
if xy:
DATA[names[i]] = xy
rownames[names[i]] = n
prismlib.write_scatterplot(options.prism_file, DATA, rownames)
if options.col_pc_file:
# Write out the principal components.
handle = open(options.col_pc_file, 'w')
assert cluster is None or len(cluster) == len(principal_components)
x = ["PC%02d (%.2f%%)" % (i, 100*perc_var[i]) for i in range(K)]
header = ["Index", "Sample", "Cluster", "Color"] + x
print >>handle, "\t".join(header)
for i in range(len(principal_components)):
x = MATRIX.col_names(arrayio.COL_ID)[i]
c = ""
if color and color[i] is not None:
c = colorlib.rgb2hex(color[i])
clust = ""
if cluster is not None and cluster[i] is not None:
clust = cluster[i]
x = [i+1, x, clust, c] + principal_components[i]
assert len(x) == len(header)
print >>handle, "\t".join(map(str, x))
handle.close()
# Look at the principal components on the rows.
if options.row_pc_file:
handle = open(options.row_pc_file, 'w')
row_names = MATRIX.row_names()
x = ["PC%02d (%.2f%%)" % (i, 100*perc_var[i]) for i in range(K)]
header = ["Index"] + row_names + x
print >>handle, "\t".join(header)
# U nrow x k columns are principal components
# V k x ncol rows are principal components
U, s, V = numpy.linalg.svd(MATRIX._X, full_matrices=False)
for i in range(len(U)):
assert len(U[i]) == K, "%d %d" % (len(U), len(U[i]), K)
n = [MATRIX.row_names(x)[i] for x in row_names]
x = [i+1] + n + list(U[i])
assert len(x) == len(header)
print >>handle, "\t".join(map(str, x))
handle.close()
if __name__ == '__main__':
main()
```
#### File: changlab/scripts/preprocess.py
```python
import os
def find_annotation_file(chipname):
from genomicode import arrayplatformlib
chipname = chipname.replace('-','_')
filename = arrayplatformlib.chipname2filename(chipname)
assert filename, "I could not find a file for chip: %s" % chipname
assert os.path.exists(filename), "I could not find annotation file %s." % \
filename
filename = os.path.realpath(filename)
return filename
def find_normscript():
from genomicode import config
file_ = "normscript.R"
filename = os.path.join(config.changlab_Rlib, file_)
assert os.path.exists(filename), "I could not find %s." % file_
filename = os.path.realpath(filename)
return filename
def main():
import tempfile
from optparse import OptionParser
from genomicode import affyio
usage = (
"usage: %prog [options] algorithm path_to_cel_files\n\n"
'algorithm should be "RMA" or "MAS5".')
parser = OptionParser(usage=usage, version="%prog 01")
parser.add_option(
"--platform",
help="Normalize only arrays from this platform.")
parser.add_option(
"-s", "--filestem", dest="filestem",
help="Use this as the filestem for the normalized matrix.")
parser.add_option(
"-n", "--noclobber", action="store_true", dest="noclobber")
options, args = parser.parse_args()
if len(args) != 2:
parser.error("I expected 2 arguments.")
algorithm, path = args
algorithm = algorithm.upper()
assert os.path.exists(path), "Path %s does not exist." % path
assert algorithm.upper() in ["RMA", "MAS5"], \
"Algorithm must be RMA or MAS5."
if options.filestem:
filestem = options.filestem
else:
# Guess the filestem based on the name of the path.
x = os.path.split(path)[1]
x = os.path.splitext(x)[0]
assert x, "File name is missing from: %s" % path
x = x.replace("_cel", "") # For ArrayExpress paths
x = x.replace(".CEL", "") # For ArrayExpress paths
filestem = x
assert filestem is not None and type(filestem) is type("")
assert filestem.strip()
outfile = "%s.%s" % (filestem, algorithm.lower())
if options.noclobber and os.path.exists(outfile):
print "Outfile %s exists. Will not overwrite." % outfile
return
#if algorithm == "MAS5":
# log_signal, filter_25, filter_50 = 0, 0, 0
#elif algorithm == "RMA":
# log_signal, filter_25, filter_50 = 0, 0, 0
#else:
# raise AssertionError, "Unknown algorithm: %s" % algorithm
assert algorithm in ["RMA", "MAS5"]
# Figure out the chip for each CEL file.
file2chipname = {}
for i, file_ in enumerate(os.listdir(path)):
filename = os.path.join(path, file_)
cn = affyio.extract_chip_name(filename)
assert cn, "Unknown chip type: %s" % filename
file2chipname[file_] = cn
# If the user specified a specific platform, then ignore all files
# not of that platform.
for (file_, cn) in file2chipname.items():
if options.platform and cn != options.platform:
del file2chipname[file_]
# Make sure files exist.
assert file2chipname, "no files in path: %s" % path
# Make sure all the files are the same type.
# Should normalize all the chip files separately.
chipnames = sorted({}.fromkeys(file2chipname.values()).keys())
if len(chipnames) != 1:
# Count the number of times each chip appears.
counts = {}
for x in file2chipname.values():
counts[x] = counts.get(x, 0) + 1
x = ["%s (%d)" % x for x in counts.iteritems()]
x = ", ".join(x)
assert len(chipnames) == 1, "multiple platforms: %s" % x
chipname = chipnames[0]
# Choose the annotation file for the chip name.
annotfile = find_annotation_file(chipname)
# Should allow the user to generate a file that's not annotated.
assert annotfile, "I don't know the annotation file for %s" % chipname
assert os.path.exists(annotfile), "Missing %s [%s]" % (annotfile, chipname)
oligo = 0
# Hack.
IS_OLIGO = ["HTA-2_0"]
if chipname in IS_OLIGO:
oligo = 1
if oligo:
raise NotImplementedError, "Can't handle oligo arrays yet."
temppath = None
try:
# Make a directory with only the CEL files with this type of annotation
temppath = tempfile.mkdtemp(dir=".")
for file_, cn in file2chipname.iteritems():
if cn != chipname:
continue
file1 = os.path.join(os.path.realpath(path), file_)
file2 = os.path.join(temppath, file_)
os.symlink(file1, file2)
#cmd = 'ln -s "%s" "%s"' % (file1, file2)
#os.system(cmd)
# Format the arguments and call the normalize script.
normscript = find_normscript()
x = temppath, annotfile, filestem, algorithm, oligo
x = " ".join(map(str, x))
cmd = "cat %s | R --vanilla %s" % (normscript, x)
print "NORM: %s\n" % cmd
os.system(cmd)
finally:
# Clear out stuff from the temppath.
if temppath and os.path.exists(temppath):
for file_ in os.listdir(temppath):
filename = os.path.join(temppath, file_)
os.unlink(filename)
os.rmdir(temppath)
print "Done"
if __name__ == '__main__':
main()
```
#### File: changlab/scripts/pypeakseq.py
```python
def make_peakseq_preproc_command(bam_file, out_path):
from genomicode import config
from genomicode import filelib
from genomicode import parallel
# samtools view bam11.bam | PeakSeq -preprocess SAM stdin bam12
samtools = filelib.which_assert(config.samtools)
peakseq = filelib.which_assert(config.peakseq)
sq = parallel.quote
cmd = [
sq(samtools),
"view", sq(bam_file),
"|",
sq(peakseq),
"-preprocess",
"SAM",
"stdin",
sq(out_path),
]
return " ".join(cmd)
def make_peakseq_run_command(config_file):
import os
from genomicode import config
from genomicode import filelib
from genomicode import parallel
assert os.path.exists(config_file)
config_file = os.path.realpath(config_file)
# PeakSeq -peak_select <config_file>
peakseq = filelib.which_assert(config.peakseq)
sq = parallel.quote
cmd = [
sq(peakseq),
"-peak_select", config_file,
]
return " ".join(cmd)
def make_config_file(
filename, treatment_path, control_path, mapability_file,
experiment_name=None, fragment_length=None):
import os
experiment_name = experiment_name or "experiment"
fragment_length = fragment_length or 146
assert os.path.exists(treatment_path)
assert os.path.isdir(treatment_path)
assert os.path.exists(control_path)
assert os.path.isdir(control_path)
assert os.path.exists(mapability_file)
assert fragment_length > 0 and fragment_length < 10000
# Make full paths in case running directory changes.
treatment_path = os.path.realpath(treatment_path)
control_path = os.path.realpath(control_path)
mapability_file = os.path.realpath(mapability_file)
handle = open(filename, 'w')
w = handle.write
w("Experiment_id %s\n" % experiment_name)
# Enrichment fragment length For tag extension, this is the value
# of average fragment length.
w("Enrichment_mapped_fragment_length %d\n" % fragment_length)
# Target FDR in the simulations.
w("target_FDR 0.05\n")
# Number of simulations performed while estimating the putative
# peaks.
w("N_Simulations 10\n")
# Minimum distance between consecutive peaks
w("Minimum_interpeak_distance 200\n")
# Mappability file that includes the uniquely mappable number of
# nucleotides per window for each chromosome.
w("Mappability_map_file %s\n" % mapability_file)
# The directory that contains the preprocessed ChIP-Seq reads, can
# specify multiple directories to pool reads from multiple source
# (e.g. replicates)
w("ChIP_Seq_reads_data_dirs %s\n" % treatment_path)
# The directory that contains the preprocessed Input (control)
# experiment reads. (Multiple directories allowed)
w("Input_reads_data_dirs %s\n" % control_path)
# Seed for pseudo-random number generator. This is necessary for
# simulated background option (specified below).
#Simulation_seed 1234567
w("max_Qvalue 0.05\n")
# Background_model Poisson
w("Background_model Simulated\n")
def main():
import os
import shutil
import argparse
from genomicode import filelib
from genomicode import parallel
p = filelib.tswrite
parser = argparse.ArgumentParser(description="")
parser.add_argument("mapability_file", help="PeakSeq mapability file.")
parser.add_argument("treatment_bam", help="BAM file of treated sample.")
parser.add_argument("control_bam", help="BAM file of background sample.")
parser.add_argument("outpath", help="Directory to store the results.")
parser.add_argument("--experiment_name", help="Name of experiment.")
parser.add_argument("--fragment_length", type=int, help="")
#group.add_argument(
# "--noclobber", action="store_true",
# help="Don't overwrite files if they already exist.")
args = parser.parse_args()
filelib.assert_exists_nz(args.mapability_file)
filelib.assert_exists_nz(args.treatment_bam)
filelib.assert_exists_nz(args.control_bam)
if args.fragment_length:
assert args.fragment_length > 0 and args.fragment_length < 10000
# Set up directories to run it on.
p("Setting up directories.\n")
if not os.path.exists(args.outpath):
os.mkdir(args.outpath)
# Copy the mapability file to the outpath.
shutil.copy2(args.mapability_file, args.outpath)
# Do preprocessing for PeakSeq.
p("Preprocessing.\n")
treatment_preproc_path = os.path.join(args.outpath, "preprocess.treatment")
control_preproc_path = os.path.join(args.outpath, "preprocess.control")
if not os.path.exists(treatment_preproc_path):
os.mkdir(treatment_preproc_path)
if not os.path.exists(control_preproc_path):
os.mkdir(control_preproc_path)
x1 = make_peakseq_preproc_command(
args.treatment_bam, treatment_preproc_path)
x2 = make_peakseq_preproc_command(args.control_bam, control_preproc_path)
x = parallel.pshell([x1, x2])
print x
# Make sure expected files exist.
x1 = os.path.join(treatment_preproc_path, "chr_ids.txt")
x2 = os.path.join(control_preproc_path, "chr_ids.txt")
filelib.assert_exists_nz(x1)
filelib.assert_exists_nz(x2)
# Make configuration file.
p("Making configuration file.\n")
config_file = os.path.join(args.outpath, "config.dat")
make_config_file(
config_file, treatment_preproc_path, control_preproc_path,
args.mapability_file, experiment_name=args.experiment_name,
fragment_length=args.fragment_length)
# Run PeakSeq.
p("Running PeakSeq in %s.\n" % args.outpath)
cmd = make_peakseq_run_command(config_file)
x = parallel.sshell(cmd, path=args.outpath)
print x
p("Done.\n")
if __name__ == '__main__':
main()
```
#### File: Betsy/modules/common_utils.py
```python
from gluon import *
processing_info='/home/xchen/chencode/tmp/processing_info/'
def hash_command(time, command_line):
from hashlib import md5
hashstring=time+command_line
hash = md5()
hash.update(hashstring)
hash_result = hash.hexdigest()
return hash_result
```
|
{
"source": "JefftheCloudDog/astropy",
"score": 2
}
|
#### File: funcs/tests/test_comparison.py
```python
import re
import numpy as np
import pytest
from astropy.cosmology import Cosmology, FlatCosmologyMixin, Planck18, cosmology_equal
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.funcs.comparison import (
_cosmology_not_equal, _CosmologyWrapper, _parse_format, _parse_formats)
from astropy.cosmology.io.tests.base import ToFromTestMixinBase
class ComparisonFunctionTestBase(ToFromTestMixinBase):
"""Tests for cosmology comparison functions.
This class inherits from
`astropy.cosmology.io.tests.base.ToFromTestMixinBase` because the cosmology
comparison functions all have a kwarg ``format`` that allow the arguments to
be converted to a |Cosmology| using the ``to_format`` architecture.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must be
inherited in a subclass.
"""
@pytest.fixture(scope="class")
def cosmo(self):
return Planck18
@pytest.fixture(scope="class")
def cosmo_eqvxflat(self, cosmo):
if isinstance(cosmo, FlatCosmologyMixin):
return cosmo.nonflat
pytest.skip("cosmology is not flat, "
"so does not have an equivalent non-flat cosmology.")
@pytest.fixture(scope="class",
params={k for k, _ in convert_registry._readers.keys()} - {"astropy.cosmology"})
def format(self, request):
return request.param
@pytest.fixture(scope="class")
def xfail_cant_autoidentify(self, format):
"""`pytest.fixture` form of method ``can_autoidentify`."""
if not self.can_autodentify(format):
pytest.xfail("cannot autoidentify")
@pytest.fixture(scope="class")
def converted(self, to_format, format):
if format == "astropy.model": # special case Model
return to_format(format, method="comoving_distance")
return to_format(format)
@pytest.fixture(scope="class")
def pert_cosmo(self, cosmo):
# change one parameter
p = cosmo.__parameters__[0]
v = getattr(cosmo, p)
cosmo2 = cosmo.clone(**{p: v * 1.0001 if v != 0 else 0.001 * getattr(v, "unit", 1)})
return cosmo2
@pytest.fixture(scope="class")
def pert_cosmo_eqvxflat(self, pert_cosmo):
if isinstance(pert_cosmo, FlatCosmologyMixin):
return pert_cosmo.nonflat
pytest.skip("cosmology is not flat, "
"so does not have an equivalent non-flat cosmology.")
@pytest.fixture(scope="class")
def pert_converted(self, pert_cosmo, format):
if format == "astropy.model": # special case Model
return pert_cosmo.to_format(format, method="comoving_distance")
return pert_cosmo.to_format(format)
class Test_parse_format(ComparisonFunctionTestBase):
"""Test functions ``_parse_format``."""
@pytest.fixture(scope="class")
def converted(self, to_format, format):
if format == "astropy.model": # special case Model
return to_format(format, method="comoving_distance")
converted = to_format(format)
# Some raise a segfault! TODO: figure out why
if isinstance(converted, _CosmologyWrapper._cantbroadcast):
converted = _CosmologyWrapper(converted)
return converted
# ========================================================================
def test_shortcut(self, cosmo):
"""Test the already-a-cosmology shortcut."""
# A Cosmology
for fmt in {None, True, False, "astropy.cosmology"}:
assert _parse_format(cosmo, fmt) is cosmo, f"{fmt} failed"
# A Cosmology, but improperly formatted
# see ``test_parse_format_error_wrong_format``.
def test_convert(self, converted, format, cosmo):
"""Test converting a cosmology-like object"""
out = _parse_format(converted, format)
assert isinstance(out, Cosmology)
assert out == cosmo
def test_parse_format_error_wrong_format(self, cosmo):
"""
Test ``_parse_format`` errors when given a Cosmology object and format
is not compatible.
"""
with pytest.raises(ValueError, match=re.escape("for parsing a Cosmology, 'format'")):
_parse_format(cosmo, "mapping")
def test_parse_format_error_noncosmology_cant_convert(self):
"""
Test ``_parse_format`` errors when given a non-Cosmology object
and format is `False`.
"""
notacosmo = object()
with pytest.raises(TypeError, match=re.escape("if 'format' is False")):
_parse_format(notacosmo, False)
def test_parse_format_vectorized(self, cosmo, format, converted):
# vectorized on cosmos
out = _parse_format([cosmo, cosmo], None)
assert len(out) == 2
assert np.all(out == cosmo)
# vectorized on formats
out = _parse_format(cosmo, [None, None])
assert len(out) == 2
assert np.all(out == cosmo)
# more complex broadcast
out = _parse_format([[cosmo, converted], [converted, cosmo]],
[[None, format], [format, None]])
assert out.shape == (2, 2)
assert np.all(out == cosmo)
def test_parse_formats_vectorized(self, cosmo):
# vectorized on cosmos
out = _parse_formats(cosmo, cosmo, format=None)
assert len(out) == 2
assert np.all(out == cosmo)
# does NOT vectorize on formats
with pytest.raises(ValueError, match="operands could not be broadcast"):
_parse_formats(cosmo, format=[None, None])
class Test_cosmology_equal(ComparisonFunctionTestBase):
"""Test :func:`astropy.cosmology.comparison.cosmology_equal`"""
def test_cosmology_equal_simple(self, cosmo, pert_cosmo):
# equality
assert cosmology_equal(cosmo, cosmo) is True
# not equal to perturbed cosmology
assert cosmology_equal(cosmo, pert_cosmo) is False
def test_cosmology_equal_equivalent(self, cosmo, cosmo_eqvxflat,
pert_cosmo, pert_cosmo_eqvxflat):
# now need to check equivalent, but not equal, cosmologies.
assert cosmology_equal(cosmo, cosmo_eqvxflat, allow_equivalent=True) is True
assert cosmology_equal(cosmo, cosmo_eqvxflat, allow_equivalent=False) is False
assert cosmology_equal(pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=True) is True
assert cosmology_equal(pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=False) is False
def test_cosmology_equal_too_many_cosmo(self, cosmo):
with pytest.raises(TypeError, match="cosmology_equal takes 2 positional arguments"):
cosmology_equal(cosmo, cosmo, cosmo)
def test_cosmology_equal_format_error(self, cosmo, converted):
# Not converting `converted`
with pytest.raises(TypeError):
cosmology_equal(cosmo, converted)
with pytest.raises(TypeError):
cosmology_equal(cosmo, converted, format=False)
def test_cosmology_equal_format_auto(self, cosmo, converted, xfail_cant_autoidentify):
# These tests only run if the format can autoidentify.
assert cosmology_equal(cosmo, converted, format=None) is True
assert cosmology_equal(cosmo, converted, format=True) is True
def test_cosmology_equal_format_specify(self, cosmo, format, converted, pert_converted):
# equality
assert cosmology_equal(cosmo, converted, format=[None, format]) is True
assert cosmology_equal(converted, cosmo, format=[format, None]) is True
# non-equality
assert cosmology_equal(cosmo, pert_converted, format=[None, format]) is False
def test_cosmology_equal_equivalent_format_specify(self, cosmo, format, converted, cosmo_eqvxflat):
# specifying the format
assert cosmology_equal(cosmo_eqvxflat, converted, format=[None, format], allow_equivalent=True) is True
assert cosmology_equal(converted, cosmo_eqvxflat, format=[format, None], allow_equivalent=True) is True
class Test_cosmology_not_equal(ComparisonFunctionTestBase):
"""Test :func:`astropy.cosmology.comparison._cosmology_not_equal`"""
def test_cosmology_not_equal_simple(self, cosmo, pert_cosmo):
# equality
assert _cosmology_not_equal(cosmo, cosmo) is False
# not equal to perturbed cosmology
assert _cosmology_not_equal(cosmo, pert_cosmo) is True
def test_cosmology_not_equal_too_many_cosmo(self, cosmo):
with pytest.raises(TypeError, match="_cosmology_not_equal takes 2 positional"):
_cosmology_not_equal(cosmo, cosmo, cosmo)
def test_cosmology_not_equal_equivalent(self, cosmo, cosmo_eqvxflat,
pert_cosmo, pert_cosmo_eqvxflat):
# now need to check equivalent, but not equal, cosmologies.
assert _cosmology_not_equal(cosmo, cosmo_eqvxflat, allow_equivalent=False) is True
assert _cosmology_not_equal(cosmo, cosmo_eqvxflat, allow_equivalent=True) is False
assert _cosmology_not_equal(pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=False) is True
assert _cosmology_not_equal(pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=True) is False
def test_cosmology_not_equal_format_error(self, cosmo, converted):
# Not converting `converted`
with pytest.raises(TypeError):
_cosmology_not_equal(cosmo, converted)
with pytest.raises(TypeError):
_cosmology_not_equal(cosmo, converted, format=False)
def test_cosmology_not_equal_format_auto(self, cosmo, pert_converted, xfail_cant_autoidentify):
assert _cosmology_not_equal(cosmo, pert_converted, format=None) is True
assert _cosmology_not_equal(cosmo, pert_converted, format=True) is True
def test_cosmology_not_equal_format_specify(self, cosmo, format, converted, pert_converted):
# specifying the format
assert _cosmology_not_equal(cosmo, pert_converted, format=[None, format]) is True
assert _cosmology_not_equal(pert_converted, cosmo, format=[format, None]) is True
# equality
assert _cosmology_not_equal(cosmo, converted, format=[None, format]) is False
def test_cosmology_not_equal_equivalent_format_specify(self, cosmo, format, converted, cosmo_eqvxflat):
# specifying the format
assert _cosmology_not_equal(cosmo_eqvxflat, converted, format=[None, format], allow_equivalent=False) is True
assert _cosmology_not_equal(cosmo_eqvxflat, converted, format=[None, format], allow_equivalent=True) is False
assert _cosmology_not_equal(converted, cosmo_eqvxflat, format=[format, None], allow_equivalent=True) is False
```
|
{
"source": "JeffTheK/Jeff-OS",
"score": 2
}
|
#### File: JeffTheK/Jeff-OS/install.py
```python
import os
import shutil
import sys
import click
from datetime import datetime
from shutil import ignore_patterns
# message colors
try:
from colorama import Fore, Back, Style
from termcolor import colored
OK = '['+colored("OK", "green")+']'
WARN = '['+colored("WARN", "yellow")+']'
ERR = '['+colored("ERR", "red")+']'
except:
def colored(string, col1, col2=""):
return string
OK = "[OK]"
WARN = "[WARN]"
ERR = "[ERR]"
def install_default_apps():
print(OK+"installing default programs")
if os.path.isdir("Jeff-OS/sys/bin/"):
shutil.rmtree("Jeff-OS/sys/bin/")
shutil.copytree("src/bin", "Jeff-OS/sys/bin", ignore=ignore_patterns('*.c', '*.h'))
def install_default_libraries():
print(OK+"installing libraries")
if os.path.isdir("Jeff-OS/sys/lib/"):
shutil.rmtree("Jeff-OS/sys/lib/")
if os.path.isfile("src/lib/jeffos/jeffos.cfg.py"):
os.remove("src/lib/jeffos/jeffos.cfg.py")
cfg = open("src/lib/jeffos/jeffos.cfg.py", "x")
cfg.write(os.getcwd() + "/Jeff-OS")
cfg.close()
shutil.copytree("src/lib", "Jeff-OS/sys/lib")
os.system("cd Jeff-OS/sys/lib/ && pip install .")
os.remove("src/lib/jeffos/jeffos.cfg.py")
def install():
if os.path.isdir("Jeff-OS"):
print(OK+"Jeff-OS is already installed. Removing old installation")
shutil.rmtree("Jeff-OS")
print(OK+"finished")
print(OK+"starting installation")
os.mkdir("Jeff-OS")
os.mkdir("Jeff-OS/sys")
os.mkdir("Jeff-OS/usr")
install_default_apps()
install_default_libraries()
print(OK+"installing vars")
os.mkdir("Jeff-OS/sys/var")
# sys.cfg
print(OK+"setting sys.cfg")
os_name = "Jeff-OS"
build_time = datetime.now()
build_time = build_time.strftime("%Y-%m-%d %H:%M")
actual_os_dir = os.getcwd() + "/Jeff-OS"
sys_cfg = open("Jeff-OS/sys/var/sys.cfg", 'w')
sys_cfg.write(actual_os_dir + "\n")
sys_cfg.write(os_name + "\n")
sys_cfg.write(build_time + "\n")
sys_cfg.close()
# cmd.cfg
shutil.copy("src/var/cmd.cfg", "Jeff-OS/sys/var/cmd.cfg")
cmd_cfg = open("Jeff-OS/sys/var/cmd.cfg", 'r')
lines = cmd_cfg.readlines()
lines.insert(0, actual_os_dir+"\n")
cmd_cfg.close()
cmd_cfg = open("Jeff-OS/sys/var/cmd.cfg", 'w')
cmd_cfg.writelines(lines)
cmd_cfg.close()
# /dat/
shutil.copytree("src/dat/", "Jeff-OS/sys/dat/")
# booting and restarting
print(OK+"copying booting files")
shutil.copy("src/boot.py", "Jeff-OS/boot.py")
# docs
os.mkdir("Jeff-OS/sys/docs/")
shutil.copy("readme.md", "Jeff-OS/sys/docs/readme.md")
@click.command()
@click.option("-bin", is_flag=True)
@click.option("-lib", is_flag=True)
def main(bin, lib):
if len(sys.argv) <= 1:
install()
else:
if bin:
install_default_apps()
if lib:
install_default_libraries()
if __name__ == "__main__":
main()
```
|
{
"source": "JeffTheK/quiz",
"score": 3
}
|
#### File: quiz/tests/test_main.py
```python
import quiz.main
def test_load_questions():
out = quiz.main.load_questions()
assert(len(out) == 2)
assert(out[0]["answer"] == "Amsterdam")
```
|
{
"source": "jeffthemaximum/jeffPD",
"score": 2
}
|
#### File: migrations/versions/5a7ad54d9297_added_school_class_to_db.py
```python
revision = '<KEY>'
down_revision = '4<PASSWORD>b<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('schools',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.add_column(u'coaches', sa.Column('school_id', sa.Integer(), nullable=True))
op.add_column(u'teachers', sa.Column('school_id', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'teachers', 'school_id')
op.drop_column(u'coaches', 'school_id')
op.drop_table('schools')
### end Alembic commands ###
```
|
{
"source": "jeffthenet/airquality",
"score": 3
}
|
#### File: jeffthenet/airquality/zen.py
```python
import os
import time
from ISStreamer.Streamer import Streamer
import atexit
import datetime
# graphical user interface libraries
#import Tkinter
#from tkinter import ttk
from tkinter import *
#from tkinter.ttk import *
import tkFont
#dexter industries open source libraries
import grovepi
import grove_co2_lib
# project libraries
import grove_sensor_oo_lib
#def ledON():
# print("LED button pressed")
# leds
#led_red = 5 # led on digital x
#grovepi.pinMode(led_red,"OUTPUT")
#- STOCKAGE SUR INTERNET
# Initial State settings
BUCKET_NAME_AQ = "Air Quality Monitoring"
BUCKET_KEY_AQ = "20070308-EV"
# intial state access key for <EMAIL>
ACCESS_KEY = "<KEY>"
# global variables to the application
DEBUG = False
# stream online to Initial State or not
stream_online = True
#################################
# AIR QUALITY APPLICATION CLASS
#################################
class AirQualityApp(Frame):
# global constants
# Set the time between sensor reads
SECONDS_BETWEEN_READS = 30
# -----------------------------------
def __init__(self):
# monitoring stopped
self.sensorMonitoring = False
# SENSORS OBJECT CREATION AND INITIALIZATION
self.air_quality_sensor = grove_sensor_oo_lib.AirQualitySensor(0) # air quality on analog port 0
self.gas_sensor_MQ2 = grove_sensor_oo_lib.GasSensor(2) # MQ2 on Analog port 2
self.co2_sensor = grove_sensor_oo_lib.CO2SensorSerial()
self.dust_sensor = grove_sensor_oo_lib.DustSensor(2, AirQualityApp.SECONDS_BETWEEN_READS)
# Temperatures and humidity to add
# DATA STREAM CREATION FOR SENDING DATA TO INITIAL STATE
# open the streamer
if (stream_online):
self.streamer_aq = Streamer(bucket_name=BUCKET_NAME_AQ, bucket_key=BUCKET_KEY_AQ, access_key=ACCESS_KEY)
Frame.__init__(self)
self.createGUI()
# -----------------------------------
# ALL SENSORS DATA READING & Streaming
# -----------------------------------
def readSubSetSensorsAndUpdateGUI(self):
# Harmful gas
air_quality_sensor_value = self.air_quality_sensor.readAirQuality()
air_type_string = self.air_quality_sensor.getAirQualityStringValue(air_quality_sensor_value)
airquality_text = str(air_quality_sensor_value)+" ("+ str(air_type_string) +")"
self.airQualityLabelValue.set(airquality_text)
if (DEBUG):
print("Harmful gas: "+airquality_text)
#--------------------------------------
# reading and dis[play of gases density
gas_MQ2_density = self.gas_sensor_MQ2.readGasDensity()
self.gasMQ2Value.set(str(gas_MQ2_density))
if (DEBUG):
print("Combustible gases (H2, LPG, CH4, CO, Alcohol, Propane & smoke (MQ2), 200-10000, lower better %d" %(gas_MQ2_density))
#--------------------------------------
# reading CO2
co2_concentration = self.co2_sensor.readConcentration()
self.co2Value.set(str(co2_concentration))
if (DEBUG):
print("Carbon dioxide (CO2) PPM around 400: %d ppm" %(co2_concentration))
# -----------------------------------
# RESET VARIABLES
# -----------------------------------
def reset(self):
self.last_reading_time_seconds = 0
# self.air_quality_sensor
self.co2_sensor.reset()
# self.gasMQ2Value
# self.dust_sensor
# -----------------------------------
# ALL SENSORS DATA READING & Streaming
# -----------------------------------
def readSensorsAndUpdateGUIAndStream(self):
# Harmful gas
air_quality_sensor_value = self.air_quality_sensor.readAirQuality()
air_type_string = self.air_quality_sensor.getAirQualityStringValue(air_quality_sensor_value)
airquality_text = str(air_quality_sensor_value)+" ("+ str(air_type_string) +")"
self.airQualityLabelValue.set(airquality_text)
print("Harmful gas: "+airquality_text)
#--------------------------------------
# reading and dis[play of gases density
gas_MQ2_density = self.gas_sensor_MQ2.readGasDensity()
self.gasMQ2Value.set(str(gas_MQ2_density))
print("Combustible gases (H2, LPG, CH4, CO, Alcohol, Propane & smoke (MQ2), 200-10000, lower better %d" %(gas_MQ2_density))
#--------------------------------------
# reading CO2
co2_concentration = self.co2_sensor.readConcentration()
self.co2Value.set(str(co2_concentration))
print("Carbon dioxide (CO2) PPM around 400: %d ppm" %(co2_concentration))
#--------------------------------------
# dust particules
dust_concentration = self.dust_sensor.readConcentration()
if (dust_concentration>0):
print("Dust particule concentration: %d" %(dust_concentration))
self.dustValue.set(str(dust_concentration))
else:
print("Dust particule: no reading")
if self.dust_sensor.getNbConsecutiveNoReading() > 10:
print ("WARNING: no reading dustsensor %d" %(self.dust_sensor.getNbConsecutiveNoReading()))
self.dustValue.set("Problème de lecture")
# reinit
self.dust_sensor = grove_sensor_oo_lib.DustSensor(2, AirQualityApp.SECONDS_BETWEEN_READS)
#------------------------------------------------------
# stream data points
# ----- Gases ----
if (stream_online):
self.streamer_aq.log("Harmfull Gas (1-900)",air_quality_sensor_value)
self.streamer_aq.log("Combustibles Gas (200-10,000)",gas_MQ2_density)
self.streamer_aq.log("CO2 (0-20,000)", co2_concentration)
# ---------- PARTICULE ------------
# stream dust particule information
if (dust_concentration>0):
self.streamer_aq.log("Dust particule (0-8,000)", dust_concentration)
self.streamer_aq.flush()
# -----------------------------------
# DISPLAY HOUR AND DATE
# -----------------------------------
def displayDateaAdnTime(self, timenow):
strtimenow = "{:02d}:{:02d}:{:02d}".format(timenow.hour, timenow.minute, timenow.second)
self.informationLabelValue1.set(strtimenow)
# -----------------------------------
# CREATION OF THE DESKTOP GUI
# -----------------------------------
def createGUI(self):
#main window
#self.main_window = Tk()
self.myFont = tkFont.Font(family = 'Verdana', size = 16, weight = 'bold')
self.mySmallFont = tkFont.Font(family = 'Verdana', size = 12, weight = 'bold')
self.myLargeFont = tkFont.Font(family = 'Verdana', size = 18, weight = 'bold')
self.master.title("Système de mesure de la qualité de l'air") #----
#self.master.title("Program window")
self.master.geometry("800x480+0+0")
#ledButton = Button(main_window, text = "LED ON", font = myFont, command = ledON, height = 2, width =8 )
#ledButton.pack()
#self.startMonitoringButton = Tkinter.Button(self.main_window, text = "Démarrer", font = self.myFont, command = self.startMonitoringCallback, height = 2, width =8 )
#create the grid
self.master.rowconfigure( 0, weight = 2 )
self.master.columnconfigure( 0, weight = 2 )
self.grid(stick=W+E+N+S)
#set up the widgets
row_value=0
self.informationLabelValue1=StringVar()
Label(self, textvariable=self.informationLabelValue1, font = self.myFont).grid(row=row_value,sticky=W)
self.informationLabelValue1.set("Appuyer sur <Démarrer>")
self.informationLabelValue3=StringVar()
Label(self, textvariable=self.informationLabelValue3, font = self.myFont).grid(row=row_value, column=1, sticky=W)
self.informationLabelValue3.set(" ")
row_value = row_value +1
Label(self, text="Date & heure de la dernière mesure:", font = self.myFont).grid(row=row_value, column=0, sticky=W)
self.informationLabelValue2=StringVar()
Label(self, textvariable=self.informationLabelValue2, font = self.myFont).grid(row=row_value, column=1, sticky=W)
self.informationLabelValue2.set(" aucune")
row_value = row_value +1
Label(self, text="(c) <NAME> & <NAME>", font = self.mySmallFont).grid(row=row_value, column=0, sticky=W)
row_value = row_value +1
Label(self, text=" ", font = self.myFont).grid(row=row_value, column=0, sticky=W)
# row_value = row_value +1
# Label(self, text=" ", font = self.myFont).grid(row=row_value, column=0, sticky=W)
# Sensors 1: air quality
row_value = row_value +1
Label(self, text="Gaz dangereux (1-900):", font = self.myLargeFont).grid(row=row_value, column=0, sticky=W)
self.airQualityLabelValue=StringVar()
airQualityLabel=Label(self, textvariable=self.airQualityLabelValue, font = self.myLargeFont).grid(row=row_value, column=1, sticky=W)
self.airQualityLabelValue.set(" - ")
# Sensors 2: MQ2
row_value = row_value +1
Label(self, text="Gaz inflammables (200-10,000):", font = self.myLargeFont).grid(row=row_value, column=0, sticky=W)
self.gasMQ2Value=StringVar()
gasMQ2Label=Label(self, textvariable=self.gasMQ2Value, font = self.myLargeFont).grid(row=row_value, column=1, sticky=W)
self.gasMQ2Value.set(" - ")
# Sensors 3: CO2
row_value = row_value +1
Label(self, text="CO2 (0-20,000):", font = self.myLargeFont).grid(row=row_value, column=0, sticky=W)
self.co2Value=StringVar()
gasMQ2Label=Label(self, textvariable=self.co2Value, font = self.myLargeFont).grid(row=row_value, column=1, sticky=W)
self.co2Value.set(" - ")
# Sensors 4: DUST
row_value = row_value +1
Label(self, text="Particules fines (0-8,000):", font = self.myLargeFont).grid(row=row_value, column=0, sticky=W)
self.dustValue=StringVar()
gasMQ2Label=Label(self, textvariable=self.dustValue, font = self.myLargeFont).grid(row=row_value, column=1, sticky=W)
self.dustValue.set(" - ")
row_value = row_value +1
Label(self, text=" ", font = self.myFont).grid(row=row_value, column=0, sticky=W)
row_value = row_value +1
Label(self, text=" ", font = self.myFont).grid(row=row_value, column=0, sticky=W)
row_value = row_value +1
Label(self, text=" ", font = self.myFont).grid(row=row_value, column=0, sticky=W)
# row_value = row_value +1
# Label(self, text=" ", font = self.myFont).grid(row=row_value, column=0, sticky=W)
# row_value = row_value +1
# Label(self, text=" ", font = self.myFont).grid(row=row_value, column=0, sticky=W)
# row_value = row_value +1
# Label(self, text=" ", font = self.myFont).grid(row=row_value, column=0, sticky=W)
# buttons
row_value = row_value +1
self.startButtonLabel=StringVar()
Button(self, textvariable =self.startButtonLabel, font = self.myFont, command = lambda: self.startMonitoringCallback(), height = 2, width =8 ).grid(row=row_value, column=0, sticky=W+S)
self.startButtonLabel.set("Démarrer")
Button(self, text = "Quitter", font = self.myFont, command = lambda: self.quitCallback(), height = 2, width =8 ).grid(row=row_value, column=1, sticky=W+S)
'''
#main input box
self.inputBox=Text(self)
self.inputBox.grid(row=1, sticky=W+E+S+N)
self.inputBox.insert(END, 'Example content\ngoes here')
#other buttons
Button(self, text="Run", command=self.startMonitoringCallback).grid(row=3, sticky=W)
self.rowconfigure(1, weight=1)
self.columnconfigure(0, weight=1)
self.startMonitoringButton = Button(self, text = "Démarrer", font = self.myFont, command = lambda: self.startMonitoringCallback(), height = 2, width =8 )
# self.startMonitoringButton.pack()
self.optionsButton = Button(self, text = "Options", font = self.myFont, command = lambda: self.optionsCallback(), height = 2, width =8 )
# self.optionsButton.pack()
self.quitButton = Button(self, text = "Quit", font = self.myFont, command = lambda: self.quitCallback(), height = 2, width =8 )
# self.quitButton.pack()
'''
#style=ttk.Style()
#style.configure("BW.TLabel", foreground="black", background="white")
'''
self.airQualityLabelValue=StringVar()
airQualityLabel=Label(self, textvariable=self.airQualityLabelValue, font = self.myFont)
# airQualityLabel.pack()
self.airQualityLabelValue.set("Air Quality value: ")
'''
#END createGUI
# -----------------------------------
# CALLBACK
# -----------------------------------
def startMonitoringCallback(self):
if (self.sensorMonitoring == True):
# stop the capture
# self.sensorMonitoring = False
self.startButtonLabel.set("Démarrer")
self.informationLabelValue1.set("Appuyer sur <Démarrer>")
#self.informationLabelValue2.set(" ")
self.informationLabelValue3.set(" ")
else:
# reset
self.reset()
# start the capture
# self.sensorMonitoring = True
self.startButtonLabel.set("Arrêter")
self.informationLabelValue1.set("Mesure de la Qualité de l'air")
# -----------------------------------
# CALLBACK
# -----------------------------------
def quitCallback(self):
self.infiniteLoop=False
self.master.quit()
# -----------------------------------
# MAIN LOOP
# -----------------------------------
def mainLoop(self):
# start
self.sensorMonitoring = True
# Init last time the reading was done
last_reading_time_seconds = 0 #time.mktime(datetime.datetime.utcnow().timetuple())
self.infiniteLoop=True
# infinite loop
while (self.infiniteLoop):
try:
#------------------------------------------------------
#------------------------------------------------------
# LED OFF
# grovepi.digitalWrite(led_red,0)
#------------------------------------------------------
# DATE HOURS TIME
now = datetime.datetime.utcnow()
# update time and date
self.displayDateaAdnTime(now)
# main loop for TKinter
self.master.update_idletasks()
self.master.update()
now_seconds = time.mktime(now.timetuple())
if (DEBUG):
print("now_seconds:"+str(now_seconds))
print("last_reading_time_seconds:"+str(last_reading_time_seconds))
print("SECONDS_BETWEEN_READS:"+str(AirQualityApp.SECONDS_BETWEEN_READS))
#------------------------------------------------------
# MEASUREMENT
# data acquisition
# do we read data this loop?
if (self.sensorMonitoring) and (now_seconds >= last_reading_time_seconds + AirQualityApp.SECONDS_BETWEEN_READS):
print ("---------------------")
print("Now (utc): "+str(now))
print ("---------------------")
# --- read the sensors, upate the GUI send to Internet and print in the terminal ---
self.readSensorsAndUpdateGUIAndStream()
self.informationLabelValue2.set(str(now))
last_reading_time_seconds = now_seconds
# endif data reading
elif (self.sensorMonitoring):
# Update all sensor values beside dust
self.readSubSetSensorsAndUpdateGUI()
# next capture
self.informationLabelValue3.set("Prochaine mesure dans %d s" %(round(last_reading_time_seconds + AirQualityApp.SECONDS_BETWEEN_READS - now_seconds)))
'''
#------------------------------------------------------
# wait until next acquisition
for i in range (1, SECONDS_BETWEEN_READS):
grovepi.digitalWrite(led_red,1)
time.sleep(.5)
grovepi.digitalWrite(led_red,0)
time.sleep(.5)
'''
# main loop for TKinter
self.master.update_idletasks()
self.master.update()
# free some processor time
time.sleep(.5)
# endtry
except KeyboardInterrupt: # Turn LED off before stopping
grovepi.digitalWrite(led_red,0)
self.master.quit()
break
except IOError:
print ("Error")
# end while
#----------------------------------
# MAIN PROGRAM
#----------------------------------
if __name__ == '__main__':
# main_window = Tk()
# airQualityApp = AirQualityApp(main_window)
AirQualityApp().mainLoop() # custom mainloop for data acquisition
```
|
{
"source": "jefftheprogrammer/sbot",
"score": 3
}
|
#### File: sbot/tests/test_metadata.py
```python
from os import environ
from pathlib import Path
from pytest import raises
from sbot.metadata import METADATA_ENV_VAR, MetadataKeyError, load
def test_metadata_env_var() -> None:
"""Check that the env_var is as expected."""
assert METADATA_ENV_VAR == "SBOT_METADATA_PATH"
def test_metadata_key_error() -> None:
"""Test the key error exception."""
e = MetadataKeyError("beans")
assert str(e) == "Key \'beans\' not present in metadata, or no metadata was available"
def test_load_no_env_var() -> None:
"""Test the behaviour when the environment variable is not set."""
data = load()
assert data == {}
data = load(fallback={"beans": 5})
assert data == {"beans": 5}
def test_load_file() -> None:
"""Test that we can load a file."""
data_path = Path(__file__).parent
environ[METADATA_ENV_VAR] = str(data_path.absolute())
data = load()
assert data == {"is_competition": True, "zone": 1}
data = load(fallback={"beans": 5})
assert data == {"is_competition": True, "zone": 1}
def test_load_file_not_found() -> None:
"""Test that the fallback data is loaded if no file is found."""
data_path = Path(__file__).parent.joinpath("test_data", "empty")
environ[METADATA_ENV_VAR] = str(data_path.absolute())
data = load()
assert data == {}
data = load(fallback={"beans": 5})
assert data == {"beans": 5}
def test_load_bad_file() -> None:
"""Test that an exception is thrown when the JSON file is bad."""
data_path = Path(__file__).parent.joinpath("test_data", "bad")
environ[METADATA_ENV_VAR] = str(data_path.absolute())
with raises(RuntimeError):
load()
def test_load_bad_data() -> None:
"""Test that an exception is thrown when the JSON is not an object."""
data_path = Path(__file__).parent.joinpath("test_data", "not_object")
environ[METADATA_ENV_VAR] = str(data_path.absolute())
with raises(TypeError):
load()
```
|
{
"source": "Jeff-Tian/mybnb",
"score": 3
}
|
#### File: Lib/test/test_functools.py
```python
import functools
import sys
import unittest
from test import test_support
from weakref import proxy
import pickle
@staticmethod
def PythonPartial(func, *args, **keywords):
'Pure Python approximation of partial()'
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class TestPartial(unittest.TestCase):
thetype = functools.partial
def test_basic_examples(self):
p = self.thetype(capture, 1, 2, a=10, b=20)
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.thetype(map, lambda x: x*10)
self.assertEqual(p([1,2,3,4]), [10, 20, 30, 40])
def test_attributes(self):
p = self.thetype(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
# attributes should not be writable
self.assertRaises(TypeError, setattr, p, 'func', map)
self.assertRaises(TypeError, setattr, p, 'args', (1, 2))
self.assertRaises(TypeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.thetype(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_argument_checking(self):
self.assertRaises(TypeError, self.thetype) # need at least a func arg
try:
self.thetype(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.thetype(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.thetype(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.thetype(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.thetype(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.thetype(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.thetype(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.thetype(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.thetype(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x // y
self.assertRaises(ZeroDivisionError, self.thetype(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.thetype(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.thetype(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.thetype(f, y=0), 1)
def test_weakref(self):
f = self.thetype(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = map(str, range(10))
join = self.thetype(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.thetype(''.join)
self.assertEqual(join(data), '0123456789')
def test_pickle(self):
f = self.thetype(signature, 'asdf', bar=True)
f.add_something_to__dict__ = True
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f), signature(f_copy))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.thetype(object)
self.assertRaises(SystemError, f.__setstate__, BadSequence())
class PartialSubclass(functools.partial):
pass
class TestPartialSubclass(TestPartial):
thetype = PartialSubclass
class TestPythonPartial(TestPartial):
thetype = PythonPartial
# the python version isn't picklable
test_pickle = test_setstate_refcount = None
# the python version isn't a type
test_attributes = None
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertTrue(getattr(wrapper, name) is getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
self.assertTrue(wrapped_attr[key] is wrapper_attr[key])
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
@test_support.requires_docstrings
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f)
def wrapper():
pass
self.check_wrapper(wrapper, f)
return wrapper
def test_default_update(self):
wrapper = self._default_update()
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce(unittest.TestCase):
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
reduce = functools.reduce
self.assertEqual(reduce(lambda x, y: x+y, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
reduce(lambda x, y: x+y, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
reduce(lambda x, y: x*y, range(2,21), 1L),
2432902008176640000L
)
self.assertEqual(reduce(lambda x, y: x+y, Squares(10)), 285)
self.assertEqual(reduce(lambda x, y: x+y, Squares(10), 0), 285)
self.assertEqual(reduce(lambda x, y: x+y, Squares(0), 0), 0)
self.assertRaises(TypeError, reduce)
self.assertRaises(TypeError, reduce, 42, 42)
self.assertRaises(TypeError, reduce, 42, 42, 42)
self.assertEqual(reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, reduce, 42, (42, 42))
class TestCmpToKey(unittest.TestCase):
def test_cmp_to_key(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=functools.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_hash(self):
def mycmp(x, y):
return y - x
key = functools.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash(k))
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(str):
pass
self.assertTrue(A("a") < A("b"))
self.assertTrue(A("b") > A("a"))
self.assertTrue(A("a") <= A("b"))
self.assertTrue(A("b") >= A("a"))
self.assertTrue(A("b") <= A("b"))
self.assertTrue(A("b") >= A("b"))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_bug_10042(self):
@functools.total_ordering
class TestTO:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, TestTO):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, TestTO):
return self.value < other.value
raise TypeError
with self.assertRaises(TypeError):
TestTO(8) <= ()
def test_main(verbose=None):
test_classes = (
TestPartial,
TestPartialSubclass,
TestPythonPartial,
TestUpdateWrapper,
TestTotalOrdering,
TestWraps,
TestReduce,
)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == '__main__':
test_main(verbose=True)
```
|
{
"source": "Jeff-Tian/pycom",
"score": 2
}
|
#### File: Jeff-Tian/pycom/configure.py
```python
import tkinter as tk
import yaml
def is_stimulate(command):
if 'stimulate' in command:
if command['stimulate']:
return 1
else:
return 0
else:
return 0
def is_module_checked(command, module_index):
if 'module' in command and \
type(command['module']) is dict and \
module_index in command['module'] and \
command['module'][module_index] is True:
return 1
else:
return 0
class ConfigureDialog:
def __init__(self, parent):
self.modal_window = tk.Toplevel(parent)
# self.label = tk.Label(self.modal_window, text='实验配置')
# self.label.pack()
self.commands = {}
self.read_config()
self.modal_window.grab_set()
def save(self):
self.modal_window.destroy()
def save_config(self, config_file_path='./config.yaml'):
with open(config_file_path, 'w', encoding='utf8') as outfile:
print('config = ', self.config)
yaml.dump(self.config, outfile, default_flow_style=False, allow_unicode=True)
def read_config(self, config_file_path='./config.yaml'):
with open(config_file_path, 'r') as stream:
self.config = yaml.load(stream)
self.update_ui()
def update_ui(self):
row = 0
row = self.make_config(row)
row = self.make_button_row(row)
def make_config(self, row):
for config_item, value in self.config.items():
if type(value) is not list:
row = self.make_config_item(config_item, row, value)
else:
row = self.make_list_name(config_item, row)
row = self.make_list_items(config_item, row)
return row
def make_config_item(self, config_item, row, value):
label = tk.Label(self.modal_window, text=config_item)
label.grid(row=row, column=0, sticky=tk.W)
the_value = tk.StringVar()
the_value.set(value)
input_box = tk.Entry(self.modal_window, textvariable=the_value)
input_box.grid(row=row, column=1, sticky=tk.W)
row += 1
return row
def make_list_name(self, key, row):
label = tk.Label(self.modal_window, text=key)
label.grid(row=row, column=0, sticky=tk.W)
row += 1
return row
def make_list_items(self, config_item_key, row):
for item in self.config[config_item_key]:
row = self.make_list_item(item, row)
return row
def make_list_item(self, o, row):
self.make_command_name_label(o, row)
row += 1
self.make_is_stimulate_checkbox(o, row)
self.make_at_field(o, row)
self.make_modules_checkboxes(o, row)
row += 1
return row
def make_command_name_label(self, o, row):
label = tk.Label(self.modal_window, text=o['command'])
label.grid(row=row, column=0, sticky=tk.W)
def make_at_field(self, o, row):
label = tk.Label(self.modal_window, text='触发时间')
label.grid(row=row, column=2, sticky=tk.W)
at_text = tk.StringVar(value=o['at'])
text_box = tk.Entry(self.modal_window, width=20, textvariable=at_text)
text_box.grid(row=row, column=3, sticky=tk.W)
def make_is_stimulate_checkbox(self, command, row):
self.commands[row] = {'is_stimulate': tk.IntVar(value=is_stimulate(command))}
chk = tk.Checkbutton(self.modal_window, text='刺激', variable=self.commands[row]['is_stimulate'], onvalue=1,
offvalue=0)
chk.grid(row=row, column=1, sticky=tk.W)
def make_modules_checkboxes(self, command, row):
self.make_module_checkbox(command, 1, row, 5)
self.make_module_checkbox(command, 2, row, 6)
self.make_module_checkbox(command, 3, row, 7)
self.make_module_checkbox(command, 4, row, 8)
def make_module_checkbox(self, command, module_index, row, col):
command['module_' + str(module_index) + '_check'] = tk.IntVar(value=is_module_checked(command, module_index))
chk = tk.Checkbutton(self.modal_window, text='模块 ' + str(module_index),
variable=command['module_' + str(module_index) + '_check'],
onvalue=1, offvalue=0)
chk.grid(row=row, column=col, sticky=tk.W)
def make_button_row(self, row):
save_button = tk.Button(self.modal_window, text='保存', command=self.sync_config_and_save)
save_button.grid(row=row, column=0, sticky=tk.W)
def sync_config_and_save(self):
self.save_config()
```
#### File: Jeff-Tian/pycom/helper.py
```python
__all__ = ['hex_decode', 'try_hex_encode']
def hex_decode(bytes):
return [try_hex_encode(c) for c in bytes]
def try_hex_encode(c):
try:
return hex(c)
except TypeError:
return c
```
|
{
"source": "JeffTM/Markov-Bot-Py",
"score": 4
}
|
#### File: JeffTM/Markov-Bot-Py/MarkovBot.py
```python
from collections import Counter
import random
class MarkovBot(object):
'''A class representing a markov chain of strings.
Data members:
self.__data
Data is held in an adjacency list represented by a dictionary that maps strings to collections.Counter objects.
The counter holds counts of the number of times specific strings have followed the dictionary string.
That is, self.__data[s1][s2] contains a count of the number of times the token s2 has followed s1.
'''
def __init__(self, tokens = None):
'''Initializes the MarkovBot. If an optional list of tokens is provided then build(tokens) is immediately called.'''
self.__data = {}
if tokens: #if tokens is not None or an empty list
self.build(tokens)
def __str__(self):
'''Returns a string representation of the MarkovObject.'''
if self.empty():
return 'MarkovBot object: empty'
verts = sorted(self.__data.keys())
for i in range(len(verts)):
s = "'" + verts[i] + "'\t{"
counts = sorted(self.__data[verts[i]].items())
counts = list(map(lambda t : "'" + t[0] + "': " + str(t[1]), counts))
s += ', '.join(counts) + '}'
verts[i] = s
return 'MarkovBot object:\n' + '\n'.join(verts)
def build(self, tokens):
'''Accepts a list of tokens and adds them to the MarkovBot.
Counts the number of times each token is immediately followed by another.
The last token is added to the dictionary but is not considered to have any token following it.
Calling build repeatedly will not cause the first element of the second call to be considered following the last element of the first.
To acheive this behavior combine the lists of tokens.
Tokens are case sensitive. Using all lowercase tokens then formatting afterwards is recommended.
Passing anything other than a list or tuple of strings causes undefined behavior.
tokens -- a list of stings representing the tokens. Use of a function from the Tokenizer module is recommended.
'''
if len(tokens) == 0:
return
elif len(tokens) == 1:
if tokens[0] not in self.__data:
self.__data[tokens[0]] = Counter()
return
#For each token except the last
for i in range(len(tokens) - 1):
if tokens[i] not in self.__data: #if the token is not already in the dict add it
self.__data[tokens[i]] = Counter()
self.__data[tokens[i]][tokens[i + 1]] += 1 #Increment the counter
#Add the last item if it doesn't already exist
if tokens[-1] not in self.__data:
self.__data[tokens[-1]] = Counter()
def empty(self):
'''Returns true if this objects dictionary is empty.'''
return not self.__data #an empty dict evaluates to false
def keys(self):
'''Returns a list of all tokens that have been encountered.'''
return list(self.__data.keys())
def walk(self, count, start = None, stop = None):
'''Performs a weighted random walk of the graph stored by the MarkovBot.
First chooses a starting node with an unwieghted choice of all encountered tokens.
If start is not None it uses the value provided instead.
From there the next node is chosen by a weighted choice of all nodes that have followed the last chosen node.
The weights are the counts of the number of times the node has been followed.
This is repeated until the number of nodes exceeds count, or until a token matching stop is encountered.
The walk is returned as a list of strings.
count -- the maximum number of nodes to return. A large number is recommended if you want to generate until stop is reached.
start -- the string to start the walk at. Must be a token that has been encountered by the MarkovBot during building. Defaults to None.
stop -- the string to stop at if it is encountered during the walk.
return -- the list of strings that were encountered during the walk.
'''
result = []
if self.empty():
return result
if start:
result.append(start)
else:
result.append(random.choice(self.keys())) #choose a random starting point
for i in range(count):
if result[-1] == stop: #if the list ends with the stop string exit
break
k = list(self.__data[result[-1]].keys()) #keys
w = list(self.__data[result[-1]].values()) #weights
if not w: #if we have reached a vertex with no outgoing edges stop
break
result.append(random.choices(k, weights = w)[0])
return result
```
#### File: JeffTM/Markov-Bot-Py/Tokenizer.py
```python
_groupchars = {'(': ')', '[': ']', '<': '>', '{': '}'}
def _closer(c):
'''Returns the closing tag equivalent of c.
c -- the character to find a closing tag for.
return -- the closing character or None if c is not an opening char character.
'''
return _groupchars.get(c, None)
def _find_closing_char(s, c, start = 0):
'''Returns the index of the closing character of c in src starting at start.
s -- the string to search in.
c -- the opening character.
start -- the index to start the search.
return -- the index of the closing tag as an integer. -1 if not found.
'''
if start >= len(s):
return -1
c = _closer(c)
return s.find(c, start)
def _find_stopper(s, start = 0):
'''Returns the index of the first character in s for which is_stopper returns true.
s -- the string to search in.
start -- the index to start the search.
return -- the index of the first stopper in s[start:]. -1 if end of string reached.
'''
while start < len(s):
if _is_stopper(s[start]):
return start
start += 1
return -1
def _is_opening_char(c):
'''Returns true if c is in ('(', '[', '<', '{')'''
return c in _groupchars
def _is_punctuation(c):
'''Returns true if c is in ('!', ',', '.', ':', ';', '?')'''
return c in ('!', ',', '.', ':', ';', '?')
def _is_stopper(c):
'''Returns true if c is a stopping character.
C is a stopping character if is_punctuation(c) or c.isspace() returns true.
'''
return c.isspace() or _is_punctuation(c)
#public functions --------------------------------
def read_all(txtfile):
'''Reads the entirety of a text file and returns it as a string.
txtfile -- the path of the text file.
return -- a string containing the entirety of the file.
'''
f = open(txtfile, 'r')
s = f.read()
f.close()
return s
def basic(s):
'''Minimal tokenizer. Returns s.lower().split()'''
return s.lower().split()
def group_aware(s):
'''Group aware tokenizer.
Keeps text between (), [], <>, {} as a single token.
Also keeps punctuation and groups of the same punctuation as a token.
For example: 'Hello World...' would be tokenized as [hello, world, ...].
s -- the string to be tokenized.
return -- a list of tokens.
'''
#Preprocessing
s = s.strip().lower()
if (len(s) == 0):
return []
i = 0 #Current character
tokens = [] #List of tokens
while i < len(s):
#Advance past any whitespace characters
while i < len(s) and s[i].isspace():
i += 1
if i == len(s):
break
#If src[i] is an opening char add the substring contained between i and its closing char
if _is_opening_char(s[i]):
close_index = _find_closing_char(s, s[i], i + 1)
if close_index == -1:
tokens.append(s[i:])
break
close_index += 1 #Index 1 after the closing char
tokens.append(s[i:close_index])
i = close_index
#Else if scr[i] is a punctuation character add it and any following equal characters
elif _is_punctuation(s[i]):
stop_index = i + 1
while stop_index < len(s) and s[stop_index] == s[i]:
stop_index += 1
if stop_index == len(s):
tokens.append(s[i:])
break
tokens.append(s[i:stop_index])
i = stop_index
#else the token is the substring from i up to but not including the next stop character
else:
stopper_index = _find_stopper(s, i)
if stopper_index == -1:
tokens.append(s[i:])
break
tokens.append(s[i:stopper_index])
i = stopper_index
return tokens
```
|
{
"source": "jefftp/iland-sdk-example",
"score": 2
}
|
#### File: jefftp/iland-sdk-example/iland_api.py
```python
import iland, json, argparse, time, sys
def init_api_client():
with open('./creds.json', 'r') as f:
creds = json.load(f)
return Client(creds['client_id'], creds['client_secret'], creds['username'], creds['password'])
class Client:
def __init__(self, client_id, client_secret, username, password):
self.username = username
self.api = iland.Api(
client_id=client_id,
client_secret=client_secret,
username=username,
password=password)
def get_entity(self, entity):
items = []
entity_lookup = {
'company' : 'COMPANY',
'location' : 'IAAS_LOCATION',
'org' : 'IAAS_ORGANIZATION',
'vdc' : 'IAAS_VDC',
'vapp' : 'IAAS_VAPP',
'vm' : 'IAAS_VM'
}
inventory = self.api.get(f"/users/{self.username}/inventory")
api_entity = entity_lookup[entity]
for company in inventory['inventory']:
for item in company['entities'][api_entity]:
items.append(item)
return items
def get_vm(self, uuid):
vm_data = self.api.get(f"/vms/{uuid}")
return VirtualMachine(self, vm_data)
class VirtualMachine:
def __init__(self, client, vm_data):
self.client = client
self.uuid = vm_data['uuid']
self.status = vm_data['status']
def do_action(self, action):
actions = {
'power_on': '/vms/{}/actions/poweron',
'shutdown': '/vms/{}/actions/shutdown',
'power_off': '/vms/{}/actions/poweroff',
'reboot': '/vms/{}/actions/reboot',
'suspend': '/vms/{}/actions/suspend'}
if action in actions:
task_data = self.client.api.post(actions[action].format(self.uuid))
return Task(self.client, task_data)
sys.exit(f"Error: Virtual Machines do not support the {action} action.")
class Task:
def __init__(self, client, task_data):
self.client = client
self.uuid = task_data['uuid']
self.status = task_data['status']
self.active = task_data['active']
self.message = task_data['message']
self.operation = task_data['operation']
def refresh(self):
task = self.client.api.get(f"/tasks/{self.uuid}")
self.status = task['status']
self.active = task['active']
self.message = task['message']
self.operation = task['operation']
return
def watch(self):
while True:
self.refresh()
if self.active == False:
if self.status == 'success':
print(f"{self.operation} - {self.status}")
else:
print(f"{self.operation} - {self.status} ({self.message})")
return
else:
print(f"{self.operation} - {self.status}")
time.sleep(5)
def handle_input(client,args):
action_objects = {
'list' : ('company', 'location', 'org', 'vdc', 'vapp', 'vm'),
'power_on' : ('vm'),
'shutdown' : ('vm'),
'power_off' : ('vm'),
'reboot' : ('vm'),
'suspend' : ('vm')
}
if not (args.object in action_objects[args.action]):
sys.exit(f"Error: Action {args.action} not supported on object {args.object}.")
if args.action == 'list':
if args.object == 'company':
for company in client.get_entity(args.object):
print(f"{company['name']}, {company['uuid']}")
if args.object == 'location':
for location in client.get_entity(args.object):
print(f"{location['name']}")
if args.object == 'org':
for org in client.get_entity(args.object):
print(f"{org['name']}, {org['uuid']}")
if args.object == 'vdc':
for vdc in client.get_entity(args.object):
print(f"{vdc['name']}, {vdc['uuid']}")
if args.object == 'vapp':
for vapp in client.get_entity(args.object):
print(f"{vapp['name']}, {vapp['uuid']}")
if args.object == 'vm':
for vm in client.get_entity(args.object):
print(f"{vm['name']}, {vm['uuid']}")
elif 'vm' in action_objects[args.action]:
if args.uuid:
vm = client.get_vm(args.uuid)
task = vm.do_action(args.action)
task.watch()
else:
sys.exit(f"Error: UUID required to perform action {args.action} on object {args.object}.")
if __name__ == '__main__':
client = init_api_client()
parser = argparse.ArgumentParser()
parser.add_argument('action', help='available actions', choices=['list','power_on','shutdown','power_off','reboot', 'suspend'], default=None)
parser.add_argument('object', help='target object type', choices=['company', 'location','org','vdc','vapp','vm'], default=None)
parser.add_argument('--uuid', help='target object uuid', default=None, required=False)
args = parser.parse_args()
handle_input(client,args)
```
|
{
"source": "jefftp/python-sdk",
"score": 3
}
|
#### File: python-sdk/tests/test_iland.py
```python
import json
import time
import unittest
import iland
import requests_mock
BASE_URL = 'http://example.com/ecs'
VALID_TOKEN_PAYLOAD = {'expires_in': 12,
'refresh_expires_in': 17,
'access_token': '<PASSWORD>',
'refresh_token': '<PASSWORD>'}
VALID_REFRESH_TOKEN_PAYLOAD = {'expires_in': 12,
'refresh_expires_in': 17,
'access_token': '<PASSWORD>',
'refresh_token': '<PASSWORD>'}
class TestIland(unittest.TestCase):
session = None
adapter = None
def setUp(self):
self.api = iland.Api(client_id='fake',
client_secret='fake',
username='fake',
password='<PASSWORD>')
self.api._base_url = BASE_URL
self.api._access_token_url = iland.ACCESS_URL
def test_login_ok_200(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
def test_login_ok_201(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=201)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
def test_login_ok_202(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=202)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
def test_login_ko_500(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps({'error': 'an error occured'}),
status_code=500)
with self.assertRaises(iland.UnauthorizedException):
self.api.login()
def test_login_ko_400(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps({'error': 'an error occured'}),
status_code=400)
with self.assertRaises(iland.UnauthorizedException):
self.api.login()
def test_refresh_token_ok(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
m.post(iland.REFRESH_URL,
text=json.dumps(VALID_REFRESH_TOKEN_PAYLOAD),
status_code=200)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
# manually refresh token
self.api.refresh_access_token()
# still the same since not expired therefore not renewed
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
# let's wait for expiration
time.sleep(5)
self.api.refresh_access_token()
self.assertEqual(VALID_REFRESH_TOKEN_PAYLOAD,
self.api.get_access_token())
# manually remove the actual token so that we refetch an access
# token
self.api._token = None
self.api.refresh_access_token()
self.assertEqual(VALID_TOKEN_PAYLOAD,
self.api.get_access_token())
def test_refresh_token_ko_400(self):
with requests_mock.mock() as m:
login_spy = m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
refresh_spy = m.post(iland.REFRESH_URL,
text=json.dumps(VALID_REFRESH_TOKEN_PAYLOAD),
status_code=400)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
# wait for access token expiration. since the refresh endpoint
# returns a 400, we expect that a new login will be initiated
# because the existing session can no longer be refreshed
time.sleep(5)
self.assertEqual(VALID_REFRESH_TOKEN_PAYLOAD,
self.api.refresh_access_token())
self.assertEqual(1, refresh_spy.call_count)
self.assertEqual(2, login_spy.call_count)
def test_refresh_token_ko_500(self):
with requests_mock.mock() as m:
login_spy = m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
refresh_spy = m.post(iland.REFRESH_URL,
text=json.dumps(VALID_REFRESH_TOKEN_PAYLOAD),
status_code=500)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
# wait for access token expiration. since the refresh endpoint
# returns a 400, we expect that a new login will be initiated
# because the existing session can no longer be refreshed
time.sleep(5)
self.assertEqual(VALID_REFRESH_TOKEN_PAYLOAD,
self.api.refresh_access_token())
self.assertEqual(1, refresh_spy.call_count)
self.assertEqual(2, login_spy.call_count)
def test_refresh_token_expired(self):
with requests_mock.mock() as m:
login_spy = m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
# wait for refresh token expiration. since the refresh token
# expired, we expect that a new login will be initiated because
# the existing session can no longer be refreshed
time.sleep(8)
self.assertEqual(VALID_TOKEN_PAYLOAD,
self.api.refresh_access_token())
self.assertEqual(2, login_spy.call_count)
def test_get_ok_200(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_get_ok_201(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=201)
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_get_ok_202(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=202)
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_get_ok_204(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=204)
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_get_ko_400(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=400)
with self.assertRaises(iland.ApiException):
self.api.get(rpath)
def test_get_ko_500(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=500)
with self.assertRaises(iland.ApiException):
self.api.get(rpath)
def test_post_ok(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.post(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.post(rpath, form_data={'a': 'b'})
self.assertEquals(user_data, req)
def test_post_ok_no_formdata(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.post(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.post(rpath)
self.assertEquals(user_data, req)
def test_put_ok(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.put(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.put(rpath, form_data={'a': 'b'})
self.assertEquals(user_data, req)
def test_put_ok_no_formdata(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.put(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.put(rpath)
self.assertEquals(user_data, req)
def test_delete_ok(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.delete(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.delete(rpath)
self.assertEquals(user_data, req)
def test_unknown_verb_internal(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.delete(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
with self.assertRaises(iland.ApiException):
self.api._do_request(rpath, verb='ACK')
def test_with_default_base_url(self):
self.api = iland.Api(client_id='fake',
client_secret='fake',
username='fake',
password='<PASSWORD>')
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(iland.BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_with_proxies_set(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
self.api._proxies = {'https': 'https://10.10.10.10:3128'}
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_get_with_extra_header(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
request_headers={'Host': 'api.ilandcloud.com'},
status_code=200)
req = self.api.get(rpath, headers={'Host': 'api.ilandcloud.com'})
self.assertEquals(user_data, req)
def test_get_with_extra_disallowed_header(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
# Set Accept to text/csv but it's ignored by api, so we get json
req = self.api.get(rpath, headers={'Accept': 'text/csv'})
self.assertEquals(user_data, req)
def test_get_with_timeout(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.get(rpath, timeout=5.0)
self.assertEquals(user_data, req)
```
|
{
"source": "jefftriplett/datasette",
"score": 3
}
|
#### File: datasette/tests/test_utils.py
```python
from datasette.app import Datasette
from datasette import utils
from datasette.utils.asgi import Request
import json
import os
import pathlib
import pytest
import sqlite3
import tempfile
from unittest.mock import patch
@pytest.mark.parametrize(
"path,expected",
[
("foo", ["foo"]),
("foo,bar", ["foo", "bar"]),
("123,433,112", ["123", "433", "112"]),
("123%2C433,112", ["123,433", "112"]),
("123%2F433%2F112", ["123/433/112"]),
],
)
def test_urlsafe_components(path, expected):
assert expected == utils.urlsafe_components(path)
@pytest.mark.parametrize(
"path,added_args,expected",
[
("/foo", {"bar": 1}, "/foo?bar=1"),
("/foo?bar=1", {"baz": 2}, "/foo?bar=1&baz=2"),
("/foo?bar=1&bar=2", {"baz": 3}, "/foo?bar=1&bar=2&baz=3"),
("/foo?bar=1", {"bar": None}, "/foo"),
# Test order is preserved
(
"/?_facet=prim_state&_facet=area_name",
(("prim_state", "GA"),),
"/?_facet=prim_state&_facet=area_name&prim_state=GA",
),
(
"/?_facet=state&_facet=city&state=MI",
(("city", "Detroit"),),
"/?_facet=state&_facet=city&state=MI&city=Detroit",
),
(
"/?_facet=state&_facet=city",
(("_facet", "planet_int"),),
"/?_facet=state&_facet=city&_facet=planet_int",
),
],
)
def test_path_with_added_args(path, added_args, expected):
request = Request.fake(path)
actual = utils.path_with_added_args(request, added_args)
assert expected == actual
@pytest.mark.parametrize(
"path,args,expected",
[
("/foo?bar=1", {"bar"}, "/foo"),
("/foo?bar=1&baz=2", {"bar"}, "/foo?baz=2"),
("/foo?bar=1&bar=2&bar=3", {"bar": "2"}, "/foo?bar=1&bar=3"),
],
)
def test_path_with_removed_args(path, args, expected):
request = Request.fake(path)
actual = utils.path_with_removed_args(request, args)
assert expected == actual
# Run the test again but this time use the path= argument
request = Request.fake("/")
actual = utils.path_with_removed_args(request, args, path=path)
assert expected == actual
@pytest.mark.parametrize(
"path,args,expected",
[
("/foo?bar=1", {"bar": 2}, "/foo?bar=2"),
("/foo?bar=1&baz=2", {"bar": None}, "/foo?baz=2"),
],
)
def test_path_with_replaced_args(path, args, expected):
request = Request.fake(path)
actual = utils.path_with_replaced_args(request, args)
assert expected == actual
@pytest.mark.parametrize(
"row,pks,expected_path",
[
({"A": "foo", "B": "bar"}, ["A", "B"], "foo,bar"),
({"A": "f,o", "B": "bar"}, ["A", "B"], "f%2Co,bar"),
({"A": 123}, ["A"], "123"),
(
utils.CustomRow(
["searchable_id", "tag"],
[
("searchable_id", {"value": 1, "label": "1"}),
("tag", {"value": "feline", "label": "feline"}),
],
),
["searchable_id", "tag"],
"1,feline",
),
],
)
def test_path_from_row_pks(row, pks, expected_path):
actual_path = utils.path_from_row_pks(row, pks, False)
assert expected_path == actual_path
@pytest.mark.parametrize(
"obj,expected",
[
(
{
"Description": "Soft drinks",
"Picture": b"\x15\x1c\x02\xc7\xad\x05\xfe",
"CategoryID": 1,
},
"""
{"CategoryID": 1, "Description": "Soft drinks", "Picture": {"$base64": true, "encoded": "FRwCx60F/g=="}}
""".strip(),
)
],
)
def test_custom_json_encoder(obj, expected):
actual = json.dumps(obj, cls=utils.CustomJSONEncoder, sort_keys=True)
assert expected == actual
@pytest.mark.parametrize(
"bad_sql",
[
"update blah;",
"-- sql comment to skip\nupdate blah;",
"update blah set some_column='# Hello there\n\n* This is a list\n* of items\n--\n[And a link](https://github.com/simonw/datasette-render-markdown).'\nas demo_markdown",
"PRAGMA case_sensitive_like = true",
"SELECT * FROM pragma_not_on_allow_list('idx52')",
],
)
def test_validate_sql_select_bad(bad_sql):
with pytest.raises(utils.InvalidSql):
utils.validate_sql_select(bad_sql)
@pytest.mark.parametrize(
"good_sql",
[
"select count(*) from airports",
"select foo from bar",
"--sql comment to skip\nselect foo from bar",
"select '# Hello there\n\n* This is a list\n* of items\n--\n[And a link](https://github.com/simonw/datasette-render-markdown).'\nas demo_markdown",
"select 1 + 1",
"explain select 1 + 1",
"explain query plan select 1 + 1",
"SELECT\nblah FROM foo",
"WITH RECURSIVE cnt(x) AS (SELECT 1 UNION ALL SELECT x+1 FROM cnt LIMIT 10) SELECT x FROM cnt;",
"explain WITH RECURSIVE cnt(x) AS (SELECT 1 UNION ALL SELECT x+1 FROM cnt LIMIT 10) SELECT x FROM cnt;",
"explain query plan WITH RECURSIVE cnt(x) AS (SELECT 1 UNION ALL SELECT x+1 FROM cnt LIMIT 10) SELECT x FROM cnt;",
"SELECT * FROM pragma_index_info('idx52')",
"select * from pragma_table_xinfo('table')",
],
)
def test_validate_sql_select_good(good_sql):
utils.validate_sql_select(good_sql)
@pytest.mark.parametrize("open_quote,close_quote", [('"', '"'), ("[", "]")])
def test_detect_fts(open_quote, close_quote):
sql = """
CREATE TABLE "Dumb_Table" (
"TreeID" INTEGER,
"qSpecies" TEXT
);
CREATE TABLE "Street_Tree_List" (
"TreeID" INTEGER,
"qSpecies" TEXT,
"qAddress" TEXT,
"SiteOrder" INTEGER,
"qSiteInfo" TEXT,
"PlantType" TEXT,
"qCaretaker" TEXT
);
CREATE VIEW Test_View AS SELECT * FROM Dumb_Table;
CREATE VIRTUAL TABLE {open}Street_Tree_List_fts{close} USING FTS4 ("qAddress", "qCaretaker", "qSpecies", content={open}Street_Tree_List{close});
CREATE VIRTUAL TABLE r USING rtree(a, b, c);
""".format(
open=open_quote, close=close_quote
)
conn = utils.sqlite3.connect(":memory:")
conn.executescript(sql)
assert None is utils.detect_fts(conn, "Dumb_Table")
assert None is utils.detect_fts(conn, "Test_View")
assert None is utils.detect_fts(conn, "r")
assert "Street_Tree_List_fts" == utils.detect_fts(conn, "Street_Tree_List")
@pytest.mark.parametrize(
"url,expected",
[
("http://www.google.com/", True),
("https://example.com/", True),
("www.google.com", False),
("http://www.google.com/ is a search engine", False),
],
)
def test_is_url(url, expected):
assert expected == utils.is_url(url)
@pytest.mark.parametrize(
"s,expected",
[
("simple", "simple"),
("MixedCase", "MixedCase"),
("-no-leading-hyphens", "no-leading-hyphens-65bea6"),
("_no-leading-underscores", "no-leading-underscores-b921bc"),
("no spaces", "no-spaces-7088d7"),
("-", "336d5e"),
("no $ characters", "no--characters-59e024"),
],
)
def test_to_css_class(s, expected):
assert expected == utils.to_css_class(s)
def test_temporary_docker_directory_uses_hard_link():
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
open("hello", "w").write("world")
# Default usage of this should use symlink
with utils.temporary_docker_directory(
files=["hello"],
name="t",
metadata=None,
extra_options=None,
branch=None,
template_dir=None,
plugins_dir=None,
static=[],
install=[],
spatialite=False,
version_note=None,
secret="secret",
) as temp_docker:
hello = os.path.join(temp_docker, "hello")
assert "world" == open(hello).read()
# It should be a hard link
assert 2 == os.stat(hello).st_nlink
@patch("os.link")
def test_temporary_docker_directory_uses_copy_if_hard_link_fails(mock_link):
# Copy instead if os.link raises OSError (normally due to different device)
mock_link.side_effect = OSError
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
open("hello", "w").write("world")
# Default usage of this should use symlink
with utils.temporary_docker_directory(
files=["hello"],
name="t",
metadata=None,
extra_options=None,
branch=None,
template_dir=None,
plugins_dir=None,
static=[],
install=[],
spatialite=False,
version_note=None,
secret=None,
) as temp_docker:
hello = os.path.join(temp_docker, "hello")
assert "world" == open(hello).read()
# It should be a copy, not a hard link
assert 1 == os.stat(hello).st_nlink
def test_temporary_docker_directory_quotes_args():
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
open("hello", "w").write("world")
with utils.temporary_docker_directory(
files=["hello"],
name="t",
metadata=None,
extra_options="--$HOME",
branch=None,
template_dir=None,
plugins_dir=None,
static=[],
install=[],
spatialite=False,
version_note="$PWD",
secret="secret",
) as temp_docker:
df = os.path.join(temp_docker, "Dockerfile")
df_contents = open(df).read()
assert "'$PWD'" in df_contents
assert "'--$HOME'" in df_contents
assert "ENV DATASETTE_SECRET 'secret'" in df_contents
def test_compound_keys_after_sql():
assert "((a > :p0))" == utils.compound_keys_after_sql(["a"])
assert """
((a > :p0)
or
(a = :p0 and b > :p1))
""".strip() == utils.compound_keys_after_sql(
["a", "b"]
)
assert """
((a > :p0)
or
(a = :p0 and b > :p1)
or
(a = :p0 and b = :p1 and c > :p2))
""".strip() == utils.compound_keys_after_sql(
["a", "b", "c"]
)
async def table_exists(table):
return table == "exists.csv"
@pytest.mark.asyncio
@pytest.mark.parametrize(
"table_and_format,expected_table,expected_format",
[
("blah", "blah", None),
("blah.csv", "blah", "csv"),
("blah.json", "blah", "json"),
("blah.baz", "blah.baz", None),
("exists.csv", "exists.csv", None),
],
)
async def test_resolve_table_and_format(
table_and_format, expected_table, expected_format
):
actual_table, actual_format = await utils.resolve_table_and_format(
table_and_format, table_exists, ["json"]
)
assert expected_table == actual_table
assert expected_format == actual_format
def test_table_columns():
conn = sqlite3.connect(":memory:")
conn.executescript(
"""
create table places (id integer primary key, name text, bob integer)
"""
)
assert ["id", "name", "bob"] == utils.table_columns(conn, "places")
@pytest.mark.parametrize(
"path,format,extra_qs,expected",
[
("/foo?sql=select+1", "csv", {}, "/foo.csv?sql=select+1"),
("/foo?sql=select+1", "json", {}, "/foo.json?sql=select+1"),
("/foo/bar", "json", {}, "/foo/bar.json"),
("/foo/bar", "csv", {}, "/foo/bar.csv"),
("/foo/bar.csv", "json", {}, "/foo/bar.csv?_format=json"),
("/foo/bar", "csv", {"_dl": 1}, "/foo/bar.csv?_dl=1"),
("/foo/b.csv", "json", {"_dl": 1}, "/foo/b.csv?_dl=1&_format=json"),
(
"/sf-trees/Street_Tree_List?_search=cherry&_size=1000",
"csv",
{"_dl": 1},
"/sf-trees/Street_Tree_List.csv?_search=cherry&_size=1000&_dl=1",
),
],
)
def test_path_with_format(path, format, extra_qs, expected):
request = Request.fake(path)
actual = utils.path_with_format(request=request, format=format, extra_qs=extra_qs)
assert expected == actual
def test_path_with_format_replace_format():
request = Request.fake("/foo/bar.csv")
assert (
utils.path_with_format(request=request, format="blob")
== "/foo/bar.csv?_format=blob"
)
assert (
utils.path_with_format(request=request, format="blob", replace_format="csv")
== "/foo/bar.blob"
)
@pytest.mark.parametrize(
"bytes,expected",
[
(120, "120 bytes"),
(1024, "1.0 KB"),
(1024 * 1024, "1.0 MB"),
(1024 * 1024 * 1024, "1.0 GB"),
(1024 * 1024 * 1024 * 1.3, "1.3 GB"),
(1024 * 1024 * 1024 * 1024, "1.0 TB"),
],
)
def test_format_bytes(bytes, expected):
assert expected == utils.format_bytes(bytes)
@pytest.mark.parametrize(
"query,expected",
[
("dog", '"dog"'),
("cat,", '"cat,"'),
("cat dog", '"cat" "dog"'),
# If a phrase is already double quoted, leave it so
('"cat dog"', '"cat dog"'),
('"cat dog" fish', '"cat dog" "fish"'),
# Sensibly handle unbalanced double quotes
('cat"', '"cat"'),
('"cat dog" "fish', '"cat dog" "fish"'),
],
)
def test_escape_fts(query, expected):
assert expected == utils.escape_fts(query)
@pytest.mark.parametrize(
"input,expected",
[
("dog", "dog"),
('dateutil_parse("1/2/2020")', r"dateutil_parse(\0000221/2/2020\000022)"),
("this\r\nand\r\nthat", r"this\00000Aand\00000Athat"),
],
)
def test_escape_css_string(input, expected):
assert expected == utils.escape_css_string(input)
def test_check_connection_spatialite_raises():
path = str(pathlib.Path(__file__).parent / "spatialite.db")
conn = sqlite3.connect(path)
with pytest.raises(utils.SpatialiteConnectionProblem):
utils.check_connection(conn)
def test_check_connection_passes():
conn = sqlite3.connect(":memory:")
utils.check_connection(conn)
def test_call_with_supported_arguments():
def foo(a, b):
return f"{a}+{b}"
assert "1+2" == utils.call_with_supported_arguments(foo, a=1, b=2)
assert "1+2" == utils.call_with_supported_arguments(foo, a=1, b=2, c=3)
with pytest.raises(TypeError):
utils.call_with_supported_arguments(foo, a=1)
@pytest.mark.parametrize(
"data,should_raise",
[
([["foo", "bar"], ["foo", "baz"]], False),
([("foo", "bar"), ("foo", "baz")], False),
((["foo", "bar"], ["foo", "baz"]), False),
([["foo", "bar"], ["foo", "baz", "bax"]], True),
({"foo": ["bar", "baz"]}, False),
({"foo": ("bar", "baz")}, False),
({"foo": "bar"}, True),
],
)
def test_multi_params(data, should_raise):
if should_raise:
with pytest.raises(AssertionError):
utils.MultiParams(data)
return
p1 = utils.MultiParams(data)
assert "bar" == p1["foo"]
assert ["bar", "baz"] == list(p1.getlist("foo"))
@pytest.mark.parametrize(
"actor,allow,expected",
[
# Default is to allow:
(None, None, True),
# {} means deny-all:
(None, {}, False),
({"id": "root"}, {}, False),
# true means allow-all
({"id": "root"}, True, True),
(None, True, True),
# false means deny-all
({"id": "root"}, False, False),
(None, False, False),
# Special case for "unauthenticated": true
(None, {"unauthenticated": True}, True),
(None, {"unauthenticated": False}, False),
# Match on just one property:
(None, {"id": "root"}, False),
({"id": "root"}, None, True),
({"id": "simon", "staff": True}, {"staff": True}, True),
({"id": "simon", "staff": False}, {"staff": True}, False),
# Special "*" value for any key:
({"id": "root"}, {"id": "*"}, True),
({}, {"id": "*"}, False),
({"name": "root"}, {"id": "*"}, False),
# Supports single strings or list of values:
({"id": "root"}, {"id": "bob"}, False),
({"id": "root"}, {"id": ["bob"]}, False),
({"id": "root"}, {"id": "root"}, True),
({"id": "root"}, {"id": ["root"]}, True),
# Any matching role will work:
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["staff"]}, True),
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["dev"]}, True),
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["otter"]}, False),
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["dev", "otter"]}, True),
({"id": "garry", "roles": []}, {"roles": ["staff"]}, False),
({"id": "garry"}, {"roles": ["staff"]}, False),
# Any single matching key works:
({"id": "root"}, {"bot_id": "my-bot", "id": ["root"]}, True),
],
)
def test_actor_matches_allow(actor, allow, expected):
assert expected == utils.actor_matches_allow(actor, allow)
@pytest.mark.parametrize(
"config,expected",
[
({"foo": "bar"}, {"foo": "bar"}),
({"$env": "FOO"}, "x"),
({"k": {"$env": "FOO"}}, {"k": "x"}),
([{"k": {"$env": "FOO"}}, {"z": {"$env": "FOO"}}], [{"k": "x"}, {"z": "x"}]),
({"k": [{"in_a_list": {"$env": "FOO"}}]}, {"k": [{"in_a_list": "x"}]}),
],
)
def test_resolve_env_secrets(config, expected):
assert expected == utils.resolve_env_secrets(config, {"FOO": "x"})
@pytest.mark.parametrize(
"actor,expected",
[
({"id": "blah"}, "blah"),
({"id": "blah", "login": "l"}, "l"),
({"id": "blah", "login": "l"}, "l"),
({"id": "blah", "login": "l", "username": "u"}, "u"),
({"login": "l", "name": "n"}, "n"),
(
{"id": "blah", "login": "l", "username": "u", "name": "n", "display": "d"},
"d",
),
({"weird": "shape"}, "{'weird': 'shape'}"),
],
)
def test_display_actor(actor, expected):
assert expected == utils.display_actor(actor)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"dbs,expected_path",
[
(["one_table"], "/one/one"),
(["two_tables"], "/two"),
(["one_table", "two_tables"], "/"),
],
)
async def test_initial_path_for_datasette(tmp_path_factory, dbs, expected_path):
db_dir = tmp_path_factory.mktemp("dbs")
one_table = str(db_dir / "one.db")
sqlite3.connect(one_table).execute("create table one (id integer primary key)")
two_tables = str(db_dir / "two.db")
sqlite3.connect(two_tables).execute("create table two (id integer primary key)")
sqlite3.connect(two_tables).execute("create table three (id integer primary key)")
datasette = Datasette(
[{"one_table": one_table, "two_tables": two_tables}[db] for db in dbs]
)
path = await utils.initial_path_for_datasette(datasette)
assert path == expected_path
```
|
{
"source": "jefftriplett/django-alexa",
"score": 2
}
|
#### File: django_alexa/internal/fields.py
```python
from __future__ import absolute_import
class AmazonSlots(object):
'''Base for all amazon slots'''
pass
class AmazonField(object):
'''Base for all amazon fields'''
amazon_name = None
def get_slot_name(self):
return self.amazon_name
class AmazonCustom(AmazonField):
def get_choices(self):
return []
class AmazonLiteral(AmazonField):
amazon_name = "AMAZON.LITERAL"
class AmazonNumber(AmazonField):
amazon_name = "AMAZON.NUMBER"
class AmazonDate(AmazonField):
amazon_name = "AMAZON.DATE"
class AmazonTime(AmazonField):
amazon_name = "AMAZON.TIME"
class AmazonDuration(AmazonField):
amazon_name = "AMAZON.DURATION"
class AmazonUSCity(AmazonField):
amazon_name = "AMAZON.US_CITY"
class AmazonFirstName(AmazonField):
amazon_name = "AMAZON.US_FIRST_NAME"
class AmazonUSState(AmazonField):
amazon_name = "AMAZON.US_STATE"
class AmazonFourDigitNumber(AmazonField):
amazon_name = "AMAZON.FOUR_DIGIT_NUMBER"
class AmazonPostalAddress(AmazonField):
amazon_name = "AMAZON.PostalAddress"
```
|
{
"source": "jefftriplett/django-codemod",
"score": 2
}
|
#### File: django_codemod/visitors/models.py
```python
from typing import Optional, Union
from libcst import (
Arg,
Attribute,
BaseExpression,
BaseSmallStatement,
BaseStatement,
Call,
FunctionDef,
ImportFrom,
ImportStar,
MaybeSentinel,
Name,
RemovalSentinel,
RemoveFromParent,
Return,
)
from libcst import matchers as m
from libcst.codemod.visitors import AddImportsVisitor
from django_codemod.constants import DJANGO_1_9, DJANGO_1_11, DJANGO_2_0, DJANGO_2_1
from django_codemod.utils.calls import find_keyword_arg
from django_codemod.visitors.base import BaseDjCodemodTransformer, module_matcher
class ModelsPermalinkTransformer(BaseDjCodemodTransformer):
"""Replace `@models.permalink` decorator by a call to `reverse()`."""
deprecated_in = DJANGO_1_11
removed_in = DJANGO_2_1
ctx_key_prefix = "ModelsPermalinkTransformer"
ctx_key_inside_method = f"{ctx_key_prefix}-inside_method"
ctx_key_decorator_matchers = f"{ctx_key_prefix}-decorator_matchers"
def leave_ImportFrom(
self, original_node: ImportFrom, updated_node: ImportFrom
) -> Union[BaseSmallStatement, RemovalSentinel]:
if isinstance(updated_node.names, ImportStar):
return super().leave_ImportFrom(original_node, updated_node)
if m.matches(
updated_node,
m.ImportFrom(module=module_matcher(["django", "db"])),
):
for imported_name in updated_node.names:
if m.matches(imported_name, m.ImportAlias(name=m.Name("models"))):
self.add_decorator_matcher(
m.Decorator(
decorator=m.Attribute(
value=m.Name("models"), attr=m.Name("permalink")
)
)
)
if m.matches(
updated_node,
m.ImportFrom(module=module_matcher(["django", "db", "models"])),
):
updated_names = []
for imported_name in updated_node.names:
if m.matches(imported_name, m.ImportAlias(name=m.Name("permalink"))):
decorator_name_str = (
imported_name.evaluated_alias or imported_name.evaluated_name
)
self.add_decorator_matcher(
m.Decorator(decorator=m.Name(decorator_name_str))
)
else:
updated_names.append(imported_name)
if not updated_names:
return RemoveFromParent()
# sort imports
new_names = sorted(updated_names, key=lambda n: n.evaluated_name)
# remove any trailing commas
last_name = new_names[-1]
if last_name.comma != MaybeSentinel.DEFAULT:
new_names[-1] = last_name.with_changes(comma=MaybeSentinel.DEFAULT)
return updated_node.with_changes(names=new_names)
return super().leave_ImportFrom(original_node, updated_node)
def add_decorator_matcher(self, matcher):
if self.ctx_key_decorator_matchers not in self.context.scratch:
self.context.scratch[self.ctx_key_decorator_matchers] = []
self.context.scratch[self.ctx_key_decorator_matchers].append(matcher)
@property
def decorator_matcher(self):
matchers_list = self.context.scratch.get(self.ctx_key_decorator_matchers, [])
if len(matchers_list) == 0:
return None
if len(matchers_list) == 1:
return matchers_list[0]
return m.OneOf(*[matcher for matcher in matchers_list])
def visit_FunctionDef(self, node: FunctionDef) -> Optional[bool]:
for decorator in node.decorators:
if m.matches(decorator, self.decorator_matcher):
self.context.scratch[self.ctx_key_inside_method] = True
return super().visit_FunctionDef(node)
def leave_FunctionDef(
self, original_node: FunctionDef, updated_node: FunctionDef
) -> Union[BaseStatement, RemovalSentinel]:
if self.visiting_permalink_method:
for decorator in updated_node.decorators:
if m.matches(decorator, self.decorator_matcher):
AddImportsVisitor.add_needed_import(
context=self.context,
module="django.urls",
obj="reverse",
)
updated_decorators = list(updated_node.decorators)
updated_decorators.remove(decorator)
self.context.scratch.pop(self.ctx_key_inside_method, None)
return updated_node.with_changes(
decorators=tuple(updated_decorators)
)
return super().leave_FunctionDef(original_node, updated_node)
@property
def visiting_permalink_method(self):
return self.context.scratch.get(self.ctx_key_inside_method, False)
def leave_Return(
self, original_node: Return, updated_node: Return
) -> Union[BaseSmallStatement, RemovalSentinel]:
if self.visiting_permalink_method and m.matches(
updated_node.value, m.Tuple() # type: ignore
):
elem_0, *elem_1_3 = updated_node.value.elements[:3] # type: ignore
args = (
Arg(elem_0.value),
Arg(Name("None")),
*[Arg(el.value) for el in elem_1_3],
)
return updated_node.with_changes(
value=Call(func=Name("reverse"), args=args)
)
return super().leave_Return(original_node, updated_node)
def is_foreign_key(node: Call) -> bool:
return m.matches(node, m.Call(func=m.Attribute(attr=m.Name(value="ForeignKey"))))
def is_one_to_one_field(node: Call) -> bool:
return m.matches(
node,
m.Call(func=m.Attribute(attr=m.Name(value="OneToOneField"))),
)
def has_on_delete(node: Call) -> bool:
# if on_delete exists in any kwarg we return True
if find_keyword_arg(node.args, "on_delete"):
return True
# if there are two or more nodes and there are no keywords
# then we can assume that positional arguments are being used
# and on_delete is being handled.
return len(node.args) >= 2 and node.args[1].keyword is None
class OnDeleteTransformer(BaseDjCodemodTransformer):
"""Add the `on_delete=CASCADE` to `ForeignKey` and `OneToOneField`."""
deprecated_in = DJANGO_1_9
removed_in = DJANGO_2_0
ctx_key_prefix = "OnDeleteTransformer"
def leave_Call(self, original_node: Call, updated_node: Call) -> BaseExpression:
if (
is_one_to_one_field(original_node) or is_foreign_key(original_node)
) and not has_on_delete(original_node):
AddImportsVisitor.add_needed_import(
context=self.context,
module="django.db",
obj="models",
)
updated_args = (
*updated_node.args,
Arg(
keyword=Name("on_delete"),
value=Attribute(value=Name("models"), attr=Name("CASCADE")),
),
)
return updated_node.with_changes(args=updated_args)
return super().leave_Call(original_node, updated_node)
```
#### File: tests/visitors/test_base.py
```python
import pytest
from libcst import matchers as m
from parameterized import parameterized
from django_codemod.visitors.base import (
BaseFuncRenameTransformer,
BaseModuleRenameTransformer,
module_matcher,
)
from .base import BaseVisitorTest
@pytest.mark.parametrize(
("parts", "expected_matcher"),
[
(["django"], m.Name("django")),
(
["django", "contrib"],
m.Attribute(value=m.Name("django"), attr=m.Name("contrib")),
),
(
["django", "contrib", "admin"],
m.Attribute(
value=m.Attribute(value=m.Name("django"), attr=m.Name("contrib")),
attr=m.Name("admin"),
),
),
],
)
def test_module_matcher(parts, expected_matcher):
matcher = module_matcher(parts)
# equality comparision doesn't work with matcher:
# compare their representation seems to work
assert repr(matcher) == repr(expected_matcher)
class SameModuleFuncRenameTransformer(BaseFuncRenameTransformer):
"""Simple transformer renaming function from same module."""
rename_from = "django.dummy.module.func"
rename_to = "django.dummy.module.better_func"
class TestFuncRenameTransformer(BaseVisitorTest):
transformer = SameModuleFuncRenameTransformer
def test_simple_substitution(self) -> None:
before = """
from django.dummy.module import func
result = func()
"""
after = """
from django.dummy.module import better_func
result = better_func()
"""
self.assertCodemod(before, after)
def test_reference_without_call(self) -> None:
"""Replace reference of the function even is it's not called."""
before = """
from django.dummy.module import func
new_func = func
"""
after = """
from django.dummy.module import better_func
new_func = better_func
"""
self.assertCodemod(before, after)
def test_already_imported(self) -> None:
"""Function to modify is already imported with an alias."""
before = """
from django.dummy.module import func, better_func
result = func(content)
"""
after = """
from django.dummy.module import better_func
result = better_func(content)
"""
self.assertCodemod(before, after)
def test_import_with_alias(self) -> None:
"""Function to modify is imported with an alias."""
before = """
from django.dummy.module import func as aliased_func
result = aliased_func()
"""
after = """
from django.dummy.module import better_func as aliased_func
result = aliased_func()
"""
self.assertCodemod(before, after)
def test_import_star_ignored(self) -> None:
"""Should not change anything in case of a star import."""
before = """
from django.dummy.module import *
result = func()
"""
after = """
from django.dummy.module import *
result = func()
"""
self.assertCodemod(before, after)
def test_same_name_function(self) -> None:
"""Should not be fooled by a function bearing the same name."""
before = """
from utils.helpers import func
result = func()
"""
after = """
from utils.helpers import func
result = func()
"""
self.assertCodemod(before, after)
def test_same_name_with_alias_import_function(self) -> None:
"""Imported with alias and other function with the same name."""
before = """
from django.dummy.module import func as aliased_func
from utils.helpers import func
result = func()
aliased_func()
"""
after = """
from utils.helpers import func
from django.dummy.module import better_func as aliased_func
result = func()
aliased_func()
"""
self.assertCodemod(before, after)
def test_extra_trailing_comma_when_last(self) -> None:
"""Extra trailing comma when removed import is the last one."""
before = """
from django.dummy.module import better_func, func
result = func(content)
"""
after = """
from django.dummy.module import better_func
result = better_func(content)
"""
self.assertCodemod(before, after)
def test_parse_call_no_value(self) -> None:
"""Bug with function call without name."""
before = """
factory()()
"""
after = """
factory()()
"""
self.assertCodemod(before, after)
def test_lambda_no_value(self) -> None:
"""Bug with lambda call without name."""
before = """
(lambda x: x)(something)
"""
after = """
(lambda x: x)(something)
"""
self.assertCodemod(before, after)
def test_name_from_outer_scope(self) -> None:
"""When import from outer scope has same name as function variable."""
before = """
from django.dummy.module import func
result = func()
def something():
func = get_func()
return func
"""
after = """
from django.dummy.module import better_func
result = better_func()
def something():
func = get_func()
return func
"""
self.assertCodemod(before, after)
@parameterized.expand(
[
("response.func",),
("response.func.other",),
("response.func.other.one",),
]
)
def test_attribute_access(self, attribute_access) -> None:
"""When accessing an attribute that looks like the imported name."""
before = f"""
from django.dummy.module import func
result = func()
def test_something():
response = get_response()
assert {attribute_access} == 1
"""
after = f"""
from django.dummy.module import better_func
result = better_func()
def test_something():
response = get_response()
assert {attribute_access} == 1
"""
self.assertCodemod(before, after)
def test_kwargs(self) -> None:
"""When function is called with a kwargs bearing the same name."""
before = """
from django.dummy.module import func
func()
something(func="test")
"""
after = """
from django.dummy.module import better_func
better_func()
something(func="test")
"""
self.assertCodemod(before, after)
class OtherModuleFuncRenameTransformer(BaseFuncRenameTransformer):
"""Transformer with different module."""
rename_from = "django.dummy.module.func"
rename_to = "django.better.dummy.better_func"
class TestOtherModuleFuncRenameTransformer(BaseVisitorTest):
transformer = OtherModuleFuncRenameTransformer
def test_simple_substitution(self) -> None:
before = """
from django.dummy.module import func
result = func()
"""
after = """
from django.better.dummy import better_func
result = better_func()
"""
self.assertCodemod(before, after)
def test_already_imported(self) -> None:
before = """
from django.dummy.module import func
from django.better.dummy import better_func
result = func(content)
"""
after = """
from django.better.dummy import better_func
result = better_func(content)
"""
self.assertCodemod(before, after)
def test_import_with_alias(self) -> None:
before = """
from django.dummy.module import func as aliased_func
result = aliased_func()
"""
after = """
from django.better.dummy import better_func as aliased_func
result = aliased_func()
"""
self.assertCodemod(before, after)
class OtherModuleRenameTransformer(BaseModuleRenameTransformer):
"""Simple transformer renaming function from same module."""
rename_from = "django.dummy.module"
rename_to = "django.dummy.other_module"
class TestModuleRenameTransformer(BaseVisitorTest):
transformer = OtherModuleRenameTransformer
def test_simple_substitution(self) -> None:
before = """
from django.dummy.module import func
result = func()
"""
after = """
from django.dummy.other_module import func
result = func()
"""
self.assertCodemod(before, after)
```
|
{
"source": "jefftriplett/djangocon-discord-scheduler",
"score": 2
}
|
#### File: jefftriplett/djangocon-discord-scheduler/app.py
```python
import requests
from os import environ
from celery import Celery
from celery.schedules import crontab
from environs import Env
from typing import Any, Literal
# from announce_talk import post_about_talks
from process_folder import main as process_folder
environ.setdefault("CELERY_CONFIG_MODULE", "celery_config")
app = Celery()
app.config_from_envvar("CELERY_CONFIG_MODULE")
env = Env()
app.conf.beat_schedule = {
"every-minute": {
"task": "app.add",
"schedule": crontab(),
"args": (16, 16),
},
"process-folder": {
"task": "app.schedule_process_folder",
"schedule": crontab(),
},
}
@app.task
def add(x, y):
return x + y
@app.task(
autoretry_for=[requests.exceptions.RequestException],
retry_backoff=True,
)
def schedule_process_folder():
process_folder()
@app.task(
autoretry_for=[requests.exceptions.RequestException],
retry_backoff=True,
)
def post_to_webhook(*, webhook_url: str, body: dict[str, Any]) -> Literal[None]:
"""Post the body to the webhook URL"""
response = requests.post(webhook_url, json=body)
response.raise_for_status()
```
|
{
"source": "jefftriplett/django-rq-email-backend",
"score": 2
}
|
#### File: jefftriplett/django-rq-email-backend/runtests.py
```python
import sys
import django
from django.conf import settings
settings.configure(
DATABASES={
'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory;'}
},
INSTALLED_APPS=[
'django_rq',
'django_rq_email_backend',
],
MIDDLEWARE_CLASSES=[
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
)
def runtests(*test_args):
import django.test.utils
try:
# Django 1.7 (2.0)
django.setup()
except AttributeError:
pass
runner_class = django.test.utils.get_runner(settings)
test_runner = runner_class(verbosity=1, interactive=True, failfast=False)
failures = test_runner.run_tests(['django_rq_email_backend'])
sys.exit(failures)
if __name__ == '__main__':
runtests()
```
#### File: jefftriplett/django-rq-email-backend/setup.py
```python
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
errcode = tox.cmdline(self.test_args)
sys.exit(errcode)
setup(
name='django-rq-email-backend',
version='0.1.3',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/jefftriplett/django-rq-email-backend',
license='BSD',
description='Provides Django email integration for RQ (Redis Queue)',
long_description=open('README.rst').read(),
zip_safe=False,
include_package_data=True,
packages=[
'django_rq_email_backend',
],
package_data={'': ['README.rst']},
install_requires=[
'Django>=1.4',
'rq>=0.3.4',
'django_rq>=0.4.6'
],
tests_require=['tox'],
cmdclass={'test': Tox},
test_suite='run_tests',
extras_require={
'testing': ['pytest'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
```
|
{
"source": "jefftriplett/django-sockpuppet",
"score": 2
}
|
#### File: sockpuppet/scaffolds/view.py
```python
from django.views.generic.base import TemplateView
class {{ reflex_name|title }}View(TemplateView):
template_name = '{{ reflex_name }}.html'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['count'] = 0
return context
```
#### File: django-sockpuppet/sockpuppet/utils.py
```python
import re
def camelize(word):
word = re.sub(
r'[\s_](.)',
lambda m: m.group(1).title(),
word, flags=re.DOTALL
)
return word
def camelize_value(value):
if isinstance(value, list):
value = [camelize_value(val) for val in value]
elif isinstance(value, dict):
value = {camelize(key): camelize_value(val) for key, val in value.items()}
return value
def classify(word):
tail = camelize(word[1:])
head = word[:1].title()
return '{}{}'.format(head, tail)
```
#### File: jefftriplett/django-sockpuppet/tasks.py
```python
from invoke import task
@task
def clean_build(c):
"""
Remove build artifacts
"""
c.run("rm -fr build/")
c.run("rm -fr dist/")
c.run("rm -fr *.egg-info")
@task
def clean_pyc(c):
"""
Remove python file artifacts
"""
c.run("find . -name '*.pyc' -exec rm -f {} +")
c.run("find . -name '*.pyo' -exec rm -f {} +")
c.run("find . -name '*~' -exec rm -f {} +")
@task
def clean(c):
"""
Remove python file and build artifacts
"""
clean_build(c)
clean_pyc(c)
@task
def integration(c):
"""
Run integration tests
"""
c.run("npm install")
c.run("npm run build_test")
c.run("python manage.py migrate")
c.run("python manage.py runserver 2>&1 > /dev/null &")
c.run("npm run cypress:run")
@task
def unittest(c):
"""
Run unittests
"""
c.run("python manage.py test")
@task
def lint(c):
"""
Check style with flake8
"""
c.run("flake8 sockpuppet tests")
@task(help={'bumpsize': 'Bump either for a "feature" or "breaking" change'})
def release(c, bumpsize=''):
"""
Package and upload a release
"""
clean(c)
if bumpsize:
bumpsize = '--' + bumpsize
c.run("bumpversion {bump} --no-input".format(bump=bumpsize))
import sockpuppet
c.run("python setup.py sdist bdist_wheel")
c.run("twine upload dist/*")
c.run('git tag -a {version} -m "New version: {version}"'.format(version=sockpuppet.__version__))
c.run("git push --tags")
c.run("git push origin master")
```
|
{
"source": "jefftriplett/dotfiles",
"score": 2
}
|
#### File: home/bin/spiked-cider.py
```python
import click
import json
import subprocess
def run_command(cmd, as_list=True):
try:
output = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
shell=True
)
except Exception as e:
print 'Warning: {0}'.format(e)
return []
if as_list:
lines = [line for line in output.split('\n') if line]
return lines
else:
return output
def get_casks():
casks = run_command('brew cask list')
return casks or []
def get_formulas():
formulas = run_command('brew list')
return formulas or []
def get_taps():
taps = run_command('brew tap')
return taps or []
@click.command()
@click.option('--indent', default=4, help='JSON indentation length.')
def main(indent):
doc = {
'casks': get_casks(),
'formulas': get_formulas(),
'taps': get_taps(),
}
print json.dumps(doc, indent=indent)
if __name__ == '__main__':
main()
```
|
{
"source": "jefftriplett/pinboard-bot",
"score": 3
}
|
#### File: jefftriplett/pinboard-bot/main.py
```python
import pinboard
import requests
import typer
from bs4 import BeautifulSoup
from environs import Env
from pathlib import Path
from stop_words import safe_get_stop_words
from titlecase import titlecase
from unidecode import unidecode
from yarl import URL
env = Env()
GITHUB_TOKEN = env.str("GITHUB_TOKEN")
GITHUB_USERNAME = env.str("GITHUB_USERNAME")
PINBOARD_TOKEN = env.str("PINBOARD_TOKEN")
IGNORE_WORDS = set(
[word.lower() for word in Path("IGNORE_WORDS.txt").read_text().split()]
)
STOP_WORDS = set([word.lower() for word in Path("STOP_WORDS.txt").read_text().split()])
STOP_WORDS.update(set(safe_get_stop_words("english")))
IGNORE_TAGS = IGNORE_WORDS | STOP_WORDS
def get_dev_to_info_for_url(url):
try:
req = requests.get(url, timeout=1.0)
soup = BeautifulSoup(req.text, "html.parser")
data = {
"tags": [
tag.text.lstrip("#") for tag in soup.find_all("a", {"class": "tag"})
]
}
return data
except Exception as e:
print(e)
return {}
def get_github_info_for_url(url):
bits = url.replace("https://github.com/", "").split("/")
owner, repo = bits[0], bits[1]
url = "https://api.github.com/repos/{owner}/{repo}".format(owner=owner, repo=repo)
req = requests.get(
url,
auth=(GITHUB_USERNAME, GITHUB_TOKEN),
headers={"Accept": "application/vnd.github.mercy-preview+json"},
timeout=1.0,
)
try:
return req.json()
except Exception as e:
print(e)
return {}
def normalize_tags(original_tags, ignore_meta_tags=False):
tags = [unidecode(tag.lower()) for tag in original_tags if len(tag)]
if ignore_meta_tags:
tags = [tag for tag in tags if ":" not in tag]
tags = set(tags).difference(IGNORE_TAGS)
return tags
class Bookmarks(object):
def __init__(self, pinboard_token, start=0, count=20):
self.pinboard_token = pinboard_token
self.pinboard = pinboard.Pinboard(pinboard_token)
self.count = count
self.start = start
"""
TODO:
Implement a clean() and clean_fieldname() approach to help normalize
our bookmark model.
- Store the initial values.
- Run the clean script.
- clean
- clean_fieldname
- Mark bookmark as modified.
- If the link changed, delete the old, and replace the url.
- Save bookmark.
"""
def get_bookmarks(self, start=None, count=None):
return self.pinboard.posts.all(
start=start or self.start, results=count or self.count
)
def fix_tags(self, start=None, count=None):
links = self.get_bookmarks(start=start, count=count)
for link in links:
dirty = False
try:
description = unidecode(link.description)
titlecase_description = titlecase(description)
extended = link.extended = unidecode(link.extended)
url = URL(link.url)
"""
TODO: Add better support for common websites like:
- dev.to
- github.com
- medium.com
Possible features:
- more accurate tags
- check for meta descriptions
"""
if url.host == "github.com":
github = get_github_info_for_url(link.url)
github_tags = set(github.get("topics", []))
description = github.get("full_name")
titlecase_description = titlecase(description)
github_description = github.get("description")
extended = (
"> {0}".format(github_description)
if github_description
else link.extended
)
# Github projects should be visible...
if not link.shared:
link.shared = True
dirty = True
if len(link.description) == 0 or link.description == "github.com":
link.description = titlecase_description
dirty = True
if len(link.extended) == 0:
link.extended = extended
dirty = True
# dev.to articles should be shared by default...
elif url.host == "dev.to":
devto_data = get_dev_to_info_for_url(link.url)
github_tags = set(devto_data.get("tags", []))
if not link.shared:
link.shared = True
dirty = True
if "- DEV" in link.description:
link.description = (link.description.split("- DEV")[0]).strip()
dirty = True
if not github_tags.issubset(set(link.tags)):
dirty = True
else:
github_tags = set([])
if len(description.split(" ")) == 1 and url.host != "github.com":
typer.secho("description is blank", fg="red")
try:
doc = requests.get(link.url, timeout=1.0)
soup = BeautifulSoup(doc.text, "html.parser")
description = soup.find("title").text
link.description = description
dirty = True
except (Exception, requests.exceptions.Timeout) as e:
typer.secho(e, fg="red")
if len(link.extended) == 0:
typer.secho("extended is blank", fg="red")
try:
doc = requests.get(link.url, timeout=1.0)
soup = BeautifulSoup(doc.text, "html.parser")
try:
content = ""
if soup.find("meta", {"name": "description"}):
content = soup.find(
"meta", {"name": "description"}
).get("content")
if soup.find("meta", {"name": "description"}):
content = soup.find(
"meta", {"name": "description"}
).get("value")
if soup.find("meta", {"property": "og:description"}):
content = soup.find(
"meta", {"property": "og:description"}
).get("content")
if content:
# TODO: Split this out by the first paragraph
link.extended = f"> {content.strip()}"
typer.echo(link.extended)
dirty = True
except AttributeError as e:
print(e)
# try:
# content = soup.find('meta', {'property': 'og:description'}).get('content')
# link.extended = f'> {content}'
# typer.echo(link.extended)
# dirty = True
# except AttributeError:
# pass
pass
except (Exception, requests.exceptions.Timeout) as e:
typer.secho(e, fg="red")
# link.extended = titlecase_description
# dirty = True
# Sets
tags = set(normalize_tags(link.tags))
suggested = self.pinboard.posts.suggest(url=link.url)
popular, recommended = suggested
popular = normalize_tags(popular.get("popular"), ignore_meta_tags=True)
recommended = normalize_tags(
recommended.get("recommended"), ignore_meta_tags=True
)
new_tags = list(tags | popular | recommended | github_tags)
if len(new_tags) != len(tags) or dirty:
typer.echo("saving... {}".format(link.url))
typer.echo("description: {}".format(titlecase_description))
if extended:
typer.echo("extended: {}".format(extended))
typer.echo("my tags: {}".format(tags))
typer.echo("updating to: {}".format(new_tags))
try:
link.tags = new_tags
link.save()
except UnicodeEncodeError:
try:
link.description = description
link.extended = extended
link.save()
except Exception as e:
typer.echo("=" * 100)
typer.echo(e)
typer.echo(type(e))
typer.echo("=" * 100)
except Exception as e:
typer.echo("=" * 100)
typer.echo(e)
typer.echo(type(e))
typer.echo("=" * 100)
except Exception as e:
typer.echo("=" * 100)
typer.echo(e)
typer.echo(type(e))
typer.echo("=" * 100)
def fix_titlecase(self, start=None, count=None):
links = self.get_bookmarks(start=start, count=count)
for link in links:
description = unidecode(link.description)
titlecase_description = titlecase(description)
extended = unidecode(link.extended)
if description != titlecase_description:
typer.echo("description: {}".format(description))
typer.echo("description: {}".format(titlecase_description))
try:
link.description = titlecase_description
link.save()
except UnicodeEncodeError:
try:
link.description = titlecase_description
link.extended = extended
link.save()
except UnicodeEncodeError:
typer.echo("*" * 60)
typer.echo(
"description: {}".format(unidecode(link.description))
)
typer.echo("extended: {}".format(unidecode(link.extended)))
typer.echo("url: {}".format(link.url))
typer.echo("tags: {}".format(set(normalize_tags(link.tags))))
typer.echo("*" * 60)
except Exception as e:
typer.echo("=" * 100)
typer.echo(e)
typer.echo(type(e))
typer.echo(link.url)
typer.echo("=" * 100)
def remove_dupes(self, start=None, count=None):
links = self.get_bookmarks(start=start, count=count)
for link in links:
tags = link.tags
tags = [
tag for tag in tags if len(tags) and tag.startswith(("http", "https"))
]
tag = tags[0] if len(tags) else ""
if tag.startswith(("http", "https")) and tag not in ["http", "https"]:
typer.echo("description: {}".format(unidecode(link.description)))
typer.echo("extended: {}".format(unidecode(link.extended)))
typer.echo("url: {}".format(link.url))
typer.echo("tags: {}".format(tags))
typer.echo("tag: {}".format(tag))
if tag.startswith("http://xn--%20https:-dk9c//"):
tag = tag.replace("http://xn--%20https:-dk9c//", "https://")
new_description = link.description
new_url = tag
link.delete()
if new_url.startswith("http://xn--%20https:-dk9c//"):
new_url = new_url.replace("http://xn--%20https:-dk9c//", "https://")
self.pinboard.posts.add(
url=new_url, description=unidecode(new_description), private=True
)
typer.echo("---")
# CLI api
app = typer.Typer()
@app.command("fix_tags")
def fix_tags(start: int = 0, count: int = 10):
typer.secho("fix_tags()...", fg="green")
bookmarks = Bookmarks(PINBOARD_TOKEN)
bookmarks.fix_tags(start, count)
@app.command("fix_titlecase")
def fix_titlecase(start: int = 0, count: int = 10):
typer.secho("fix_titlecase()...", fg="green")
bookmarks = Bookmarks(PINBOARD_TOKEN)
bookmarks.fix_titlecase(start, count)
@app.command("remove_dupes")
def remove_dupes(start: int = 0, count: int = 10):
typer.secho("remove_dupes()...", fg="green")
bookmarks = Bookmarks(PINBOARD_TOKEN)
bookmarks.remove_dupes(start, count)
if __name__ == "__main__":
app()
```
|
{
"source": "jefftriplett/pit_transcriptor_amazon",
"score": 2
}
|
#### File: jefftriplett/pit_transcriptor_amazon/audio_splitter.py
```python
import boto3
import click
import json
import logging
import os
import requests
import time
from pathlib import Path
from slugify import slugify
bucket = os.environ.get("BUCKET_NAME")
storage = boto3.client("s3")
transcribe = boto3.client("transcribe")
@click.group()
@click.version_option()
def cli():
"Transcribe an audio file"
@cli.command()
@click.argument(
"filename",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
def json_builder(filename):
with open(filename) as json_file:
transcript_json = json.load(json_file)
logging.debug(transcript_json["results"]["transcripts"][0]["transcript"])
json_results = transcript_json["results"]
channels = json_results["channel_labels"]["channels"]
voices = {"ch_0": "speaker 1", "ch_1": "speaker 2"}
speaker = voices["ch_0"]
text_lines = [f"{speaker}\n"]
for item in json_results["items"]:
for channel in channels:
if item in channel["items"]:
ch = channel["channel_label"]
content = item["alternatives"][0]["content"]
if item["type"] != "punctuation":
if speaker != voices[ch]:
speaker = voices[ch]
start_time = round(float(item["start_time"]))
text_lines.append(f"\n\n{speaker}: {start_time}\n")
if float(item["alternatives"][0]["confidence"]) < 0.85:
content = f"%%{content}"
elif text_lines[-1] == content:
continue
text_lines.append(content)
output_filename = filename.replace(".json", ".txt")
with open(output_filename, "w") as transcript:
content = " ".join(text_lines)
content = content.replace(".", ".\n\n")
# content, count = re.subn(r" (?=[\.\,\?\!])", "\n", content)
# content, count = re.subn(r" (?=[\.\?\!])", "\n", content)
# print(count)
transcript.write(content)
@cli.command()
@click.argument(
"filename",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.option("--delay", default=30)
def start_transcription(delay, filename):
stem = slugify(Path(filename).stem)
suffix = Path(filename).suffix
key = f"{stem}{suffix}"
transcribe_job_uri = f"{storage.meta.endpoint_url}/{bucket}/{key}"
click.echo(transcribe_job_uri)
transcribe.start_transcription_job(
TranscriptionJobName=key,
Media={"MediaFileUri": transcribe_job_uri},
MediaFormat=suffix[1:],
LanguageCode="en-US",
Settings={"ChannelIdentification": True},
)
@cli.command()
@click.argument(
"filename",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.option("--delay", default=30)
def transcription(delay, filename):
stem = slugify(Path(filename).stem)
suffix = Path(filename).suffix
key = f"{stem}{suffix}"
transcribe_job_uri = f"{storage.meta.endpoint_url}/{bucket}/{key}"
click.echo(transcribe_job_uri)
# transcribe.start_transcription_job(
# TranscriptionJobName=key,
# Media={"MediaFileUri": transcribe_job_uri},
# MediaFormat=suffix[1:],
# LanguageCode="en-US",
# Settings={"ChannelIdentification": True},
# )
click.echo("transcription started")
job = transcribe.get_transcription_job(TranscriptionJobName=key)
while job["TranscriptionJob"]["TranscriptionJobStatus"] == "IN_PROGRESS":
time.sleep(delay)
job = transcribe.get_transcription_job(TranscriptionJobName=key)
r = requests.get(job["TranscriptionJob"]["Transcript"]["TranscriptFileUri"])
r.raise_for_status()
with open(f"{stem}.json", "w") as json_file:
json_file.write(json.dumps(r.json(), indent=2))
@cli.command()
@click.argument(
"filename",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
def upload(filename):
stem = slugify(Path(filename).stem)
suffix = Path(filename).suffix
key = f"{stem}{suffix}"
upload = storage.upload_file(Filename=filename, Bucket=bucket, Key=key)
click.echo("Audio Uploaded, Beginning Transcription")
print(upload)
if __name__ == "__main__":
cli()
```
|
{
"source": "jefftriplett/prorality",
"score": 2
}
|
#### File: prorality/proposals/models.py
```python
import uuid
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.functional import cached_property
from markupfield.fields import MarkupField
from simple_history.models import HistoricalRecords
from organizations.models import Organization
from whatnots.models import ContentManageable
DEFAULT_MARKUP_TYPE = getattr(settings, 'DEFAULT_MARKUP_TYPE', 'markdown')
class Proposal(ContentManageable):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
organization = models.ForeignKey(Organization, null=True, blank=True)
subject = models.TextField()
body = MarkupField(default_markup_type=DEFAULT_MARKUP_TYPE,
null=True, blank=True)
url = models.TextField(null=True, blank=True)
closing_date = models.DateField(null=True, blank=True)
allow_comments = models.BooleanField(default=False)
STATUS_DRAFT = 'draft'
STATUS_FINAL = 'final'
STATUS_WITHDRAWN = 'withdrawn'
STATUS_ACCEPTED = 'accepted'
STATUS_REJECTED = 'rejected'
STATUS_SUPERSEDED = 'superseded'
STATUS_CHOICES = [
(STATUS_DRAFT, 'Draft'),
(STATUS_FINAL, 'Final'),
(STATUS_WITHDRAWN, 'Withdrawn'),
(STATUS_ACCEPTED, 'Accepted'),
(STATUS_REJECTED, 'Rejected'),
# (STATUS_SUPERSEDED, 'Superseded'),
]
status = models.CharField(max_length=16, choices=STATUS_CHOICES, default=STATUS_DRAFT)
history = HistoricalRecords(excluded_fields=['_body_rendered', 'body_markup_type'])
def __str__(self):
return self.subject
def get_absolute_url(self):
return reverse('proposals:proposal_detail', kwargs={
'organization_slug': self.organization.slug,
'pk': self.pk,
})
def get_accepting_votes(self):
return self.status in [self.STATUS_DRAFT, self.STATUS_FINAL]
def get_proposal_vote_form_url(self):
return reverse('proposals:proposal_vote_form', kwargs={
'organization_slug': self.organization.slug,
'pk': self.id,
})
@cached_property
def positive_votes(self):
votes = Vote.objects.filter(proposal=self, vote=Vote.VOTE_PLUS_ONE).count()
return votes
@cached_property
def neutral_votes(self):
votes = Vote.objects.filter(proposal=self, vote__in=[Vote.VOTE_PLUS_ZERO, Vote.VOTE_MINUS_ZERO]).count()
return votes
@cached_property
def negative_votes(self):
votes = Vote.objects.filter(proposal=self, vote=Vote.VOTE_MINUS_ONE).count()
return votes
class Vote(ContentManageable):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
proposal = models.ForeignKey('proposals.Proposal', on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
VOTE_PLUS_ONE = 'plus_one'
VOTE_PLUS_ZERO = 'plus_zero'
VOTE_MINUS_ZERO = 'minus_zero'
VOTE_MINUS_ONE = 'minus_one'
VOTE_CHOICES = (
(VOTE_PLUS_ONE, "+1: Yes, I agree"),
(VOTE_PLUS_ZERO, "+0: I don't feel strongly about it, but I'm okay with this."),
(VOTE_MINUS_ZERO, "-0: I won't get in the way, but I'd rather we didn't do this."),
(VOTE_MINUS_ONE, "-1: I object on the following grounds"),
)
vote = models.CharField(max_length=16, choices=VOTE_CHOICES, null=True, blank=True)
reason = models.TextField(null=True, blank=True)
history = HistoricalRecords()
class Meta:
unique_together = (
('proposal', 'user'),
)
def __str__(self):
return '{vote} from {proposal} on {proposal}'.format(
vote=self.get_vote_display(),
token=self.proposal.subject,
proposal=self.proposal)
```
|
{
"source": "jefftriplett/reditty",
"score": 3
}
|
#### File: jefftriplett/reditty/reditty_web.py
```python
__author__ = '<NAME>'
__version__ = ('0', '1', '0')
import redis
from itty import get, post, run_itty, Response
r = redis.Redis()
def template(file, vars={}, **args):
"""
Super simple template language...
"""
contents = open(file, 'r').read()
return str(contents % (dict(vars, **args)))
def keys_template(all_keys):
temp = []
for keys in all_keys:
temp.append('<p><a href="/keyvalue/show/%(keys)s">%(keys)s</p>' % ({'keys': keys}))
return '\n'.join(temp)
@get('/keyvalue/')
def template_keyvalue(request):
all_keys = r.keys('*')
values = dict(title='Key-Value Store', all_keys=keys_template(all_keys))
return Response(template('templates/keys.html', values))
@post('/keyvalue/add/')
def template_add(request):
key = request.POST.get('key')
value = request.POST.get('value')
if key and value:
r.set(key, value)
all_keys = r.keys('*')
values = dict(title="Key-Value Pair", key=key, value=value, all_keys=keys_template(all_keys))
return Response(template('templates/keys.html', values))
@post('/keyvalue/delete/')
def template_delete(request):
key_delete = request.POST.get('key_delete')
if key_delete:
r.delete(key_delete)
all_keys = r.keys('*')
values = dict(title="Key-Value Pair", key=key_delete, all_keys=all_keys)
return Response(template('templates/delete.html', values))
@get('/keyvalue/show/(?P<key>\w+)/')
def template_show(request, key):
the_key = key.strip()
value = r.get(the_key)
values = dict(title=the_key, the_key=the_key, value=value)
return Response(template('templates/show.html', values))
if __name__ == '__main__':
run_itty()
```
|
{
"source": "jefftriplett/requests-forecast",
"score": 3
}
|
#### File: requests-forecast/test/test_requests_forecast.py
```python
import httpretty
import pytz
from datetime import datetime
from requests_forecast import Forecast
API_KEY = "1234"
LATITUDE = 38.9717
LONGITUDE = -95.235
API_URL = "https://api.darksky.net/forecast/{}/{},{}".format(
API_KEY, LATITUDE, LONGITUDE
)
@httpretty.activate
def test_alerts():
body_fixture = open("test/fixtures/alerts.json").read()
httpretty.register_uri(
httpretty.GET, API_URL, body=body_fixture, content_type="text/json"
)
forecast = Forecast(API_KEY, latitude=LATITUDE, longitude=LONGITUDE)
alerts = forecast.alerts()
assert len(alerts) == 1
assert alerts[0]["title"] == "Freeze Warning for Marin, CA"
# assert str(alerts[0]["time"].astimezone(pytz.utc)) == str(
# pytz.utc.localize(datetime(2013, 12, 12, 1, 8))
# )
# assert str(alerts[0]["expires"].astimezone(pytz.utc)) == str(
# pytz.utc.localize(datetime(2013, 12, 12, 17, 0))
# )
@httpretty.activate
def test_currently():
body_fixture = open("test/fixtures/full.json").read()
httpretty.register_uri(
httpretty.GET, API_URL, body=body_fixture, content_type="text/json"
)
forecast = Forecast(API_KEY, latitude=LATITUDE, longitude=LONGITUDE)
currently = forecast.currently()
assert "precipIntensity" in currently.keys()
assert "temperature" in currently.keys()
assert "icon" in currently.keys()
assert "cloudCover" in currently.keys()
assert "summary" in currently.keys()
assert "pressure" in currently.keys()
assert "windSpeed" in currently.keys()
assert "visibility" in currently.keys()
assert "time" in currently.keys()
assert "humidity" in currently.keys()
assert "windBearing" in currently.keys()
assert currently["temperature"] == 58.9
assert currently.temperature == 58.9
assert currently["summary"] == "Mostly Cloudy"
assert currently.summary == "Mostly Cloudy"
# assert str(currently["time"].astimezone(pytz.utc)) == str(
# pytz.utc.localize(datetime(2013, 3, 29, 0, 8, 25))
# )
@httpretty.activate
def test_daily():
body_fixture = open("test/fixtures/full.json").read()
httpretty.register_uri(
httpretty.GET, API_URL, body=body_fixture, content_type="text/json"
)
forecast = Forecast(API_KEY, latitude=LATITUDE, longitude=LONGITUDE)
daily = forecast.daily()
assert "data" in daily.keys()
assert "icon" in daily.keys()
assert "summary" in daily.keys()
assert daily.icon == "rain"
assert daily["icon"] == "rain"
assert (
daily.summary
== "Mixed precipitation off-and-on throughout the week; temperatures peaking at 70\xb0 on Sunday."
)
assert (
daily["summary"]
== "Mixed precipitation off-and-on throughout the week; temperatures peaking at 70\xb0 on Sunday."
)
assert len(daily["data"]) == 8
assert "cloudCover" in daily["data"][0].keys()
assert "humidity" in daily["data"][0].keys()
assert "icon" in daily["data"][0].keys()
assert "precipIntensity" in daily["data"][0].keys()
assert "precipType" in daily["data"][0].keys()
assert "pressure" in daily["data"][0].keys()
assert "summary" in daily["data"][0].keys()
assert "sunriseTime" in daily["data"][0].keys()
assert "sunsetTime" in daily["data"][0].keys()
assert "temperatureMax" in daily["data"][0].keys()
assert "temperatureMaxTime" in daily["data"][0].keys()
assert "temperatureMin" in daily["data"][0].keys()
assert "temperatureMinTime" in daily["data"][0].keys()
assert "time" in daily["data"][0].keys()
assert "visibility" in daily["data"][0].keys()
assert "windBearing" in daily["data"][0].keys()
assert "windSpeed" in daily["data"][0].keys()
assert daily["data"][0]["temperatureMax"] == 63.85
# assert daily["data"][0].temperatureMax == 63.85
assert daily["data"][0]["temperatureMin"] == 35.05
# assert daily['data'][0].temperatureMin == 35.05
# assert str(daily["data"][0]["time"].astimezone(pytz.utc)) == str(
# pytz.utc.localize(datetime(2013, 3, 28, 5, 0))
# )
# assert str(daily["data"][0]["sunriseTime"].astimezone(pytz.utc)) == str(
# pytz.utc.localize(datetime(2013, 3, 28, 12, 12, 29))
# )
# assert str(daily["data"][0]["sunsetTime"].astimezone(pytz.utc)) == str(
# pytz.utc.localize(datetime(2013, 3, 29, 00, 41, 39))
# )
# assert str(daily["data"][0]["temperatureMaxTime"].astimezone(pytz.utc)) == str(
# pytz.utc.localize(datetime(2013, 3, 28, 21, 0))
# )
# assert str(daily["data"][0]["temperatureMinTime"].astimezone(pytz.utc)) == str(
# pytz.utc.localize(datetime(2013, 3, 28, 12, 0))
# )
@httpretty.activate
def test_hourly():
body_fixture = open("test/fixtures/full.json").read()
httpretty.register_uri(
httpretty.GET, API_URL, body=body_fixture, content_type="text/json"
)
forecast = Forecast(API_KEY, latitude=LATITUDE, longitude=LONGITUDE)
hourly = forecast.hourly()
assert {"data", "icon", "summary"} == set(hourly.keys())
assert hourly["icon"] == "partly-cloudy-day"
assert hourly["summary"] == "Mostly cloudy until tomorrow afternoon."
assert "cloudCover" in hourly["data"][0].keys()
assert "humidity" in hourly["data"][0].keys()
assert "icon" in hourly["data"][0].keys()
assert "precipIntensity" in hourly["data"][0].keys()
assert "pressure" in hourly["data"][0].keys()
assert "summary" in hourly["data"][0].keys()
assert "temperature" in hourly["data"][0].keys()
assert "time" in hourly["data"][0].keys()
assert "visibility" in hourly["data"][0].keys()
assert "windBearing" in hourly["data"][0].keys()
assert "windSpeed" in hourly["data"][0].keys()
assert len(hourly["data"]) == 49
assert hourly["data"][0]["temperature"] == 59.52
# assert str(hourly["data"][0]["time"].astimezone(pytz.utc)) == str(
# pytz.utc.localize(datetime(2013, 3, 29, 0, 0))
# )
@httpretty.activate
def test_minutely():
body_fixture = open("test/fixtures/full.json").read()
httpretty.register_uri(
httpretty.GET, API_URL, body=body_fixture, content_type="text/json"
)
forecast = Forecast(API_KEY, latitude=LATITUDE, longitude=LONGITUDE)
minutely = forecast.minutely()
assert "data" in minutely.keys()
assert "icon" in minutely.keys()
assert "summary" in minutely.keys()
assert minutely["icon"] == "partly-cloudy-day"
assert minutely["summary"] == "Mostly cloudy for the hour."
assert len(minutely["data"]) == 61
assert "precipIntensity" in minutely["data"][0].keys()
assert "time" in minutely["data"][0].keys()
# assert str(minutely["data"][0]["time"].astimezone(pytz.utc)) == str(
# pytz.utc.localize(datetime(2013, 3, 29, 0, 8))
# )
```
|
{
"source": "jefftriplett/transcriptor",
"score": 2
}
|
#### File: transcriptor/tests/test_amazon.py
```python
import pytest
import requests
import requests_mock
from transcriptor import amazon
###SUPPORTING VALUES###
speaker_segment = {
"speakers": 4,
"segments": [
{
"start_time": "0.44",
"speaker_label": "spk_0",
"end_time": "3.3",
"items": [
{
"start_time": "0.44",
"speaker_label": "spk_0",
"end_time": "0.99",
},
]
}],
}
no_speaker_segment = [{
"text": 'Lorem autem veritatis.',
"start_time": "0.44",
"end_time": "3.3",
}]
test_job_no_speaker = {
'jobName': 'TestJob',
'results': {
'transcripts': [
{'transcript': 'Lorem consequatur nesciunt...'},
],
'items': no_speaker_segment,
}
}
### TESTS ###
def test_add_speaker_creates_speaker_object():
"""Amazon add_speaker creates a speaker object"""
speaker = amazon.add_speaker(0)
assert speaker.base_name == 'spk_0'
assert speaker.speaker_index == 0
def test_add_marker_creates_marker_object_with_speaker():
"""Amazon add_marker creates a marker object"""
marker = amazon.add_marker(speaker_segment['segments'][0], has_speakers=True)
assert marker.speaker == amazon.add_speaker(0)
assert marker.start_time == 0.44
assert marker.end_time == 3.3
def test_add_marker_creates_marker_object_with_no_speaker():
"""Amazon add_marker creates a marker object"""
marker = amazon.add_marker(no_speaker_segment)
assert marker.speaker == None
assert marker.start_time == 0.44
assert marker.end_time == 3.3
def test_from_amazon_uri_gets_job_json_from_uri(requests_mock, mocker):
'''Given a uri, fetch JSON'''
requests_mock.get(
'https://example.com/example_job.json',
json=test_job_no_speaker,
)
mocker.patch('transcriptor.amazon.Job')
job = amazon.from_amazon_uri('https://example.com/example_job.json')
amazon.Job.assert_called()
```
|
{
"source": "jefftrull/variant-boost-python",
"score": 2
}
|
#### File: jefftrull/variant-boost-python/test_vip.py
```python
import sys, os
sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/build"))
from tc import *
from pytest import approx,raises
# variant parameter
def test_passbool(capfd):
doSomething(True)
captured = capfd.readouterr()
assert captured.out == "got a b\n"
def test_passint(capfd):
doSomething(42)
captured = capfd.readouterr()
assert captured.out == "got a i\n"
def test_passdouble(capfd):
doSomething(12.1)
captured = capfd.readouterr()
assert captured.out == "got a d\n"
# variant return type
def test_retdouble():
assert produceSomething(True) == approx(12.1)
def test_retstring():
assert produceSomething(False) == 'surprise!'
# wrapped custom class inside variant
def test_cc_double():
assert type(foo(3.14)) is Foo
def test_cc_wrapped():
assert type(foo(Foo(42, "hello"))) is str
# reference wrappers
def test_rw_cc(capfd):
bar(Foo(42, "hello"))
captured = capfd.readouterr()
assert captured.out.endswith("got a St17reference_wrapperIK3FooE\n") # std::reference_wrapper<Foo const>
def test_rw_int(capfd):
bar(7)
captured = capfd.readouterr()
assert captured.out == "got a i\n"
# treatment of std::monostate and None
def test_ms_int():
assert type(baz(12)) is int
def test_ms_none():
assert baz(None) is None
def test_ms_not_allowed():
with raises(TypeError):
bar(None) # baz allows monostate but bar does NOT
```
|
{
"source": "jeffu110616/argGen",
"score": 3
}
|
#### File: src/modules/model.py
```python
import torch
import torch.nn as nn
from modules import content_decoder
from modules import sentence_planner
from sys import exit
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
class EncoderRNN(nn.Module):
def __init__(self, opt):
super(EncoderRNN, self).__init__()
self.hidden_size = opt.hidden_size // 2 # use bidirectional RNN
self.LSTM = nn.LSTM(input_size=300,
hidden_size=self.hidden_size,
num_layers=2,
batch_first=True,
dropout=opt.dropout,
bidirectional=True)
return
def forward(self, input_embedded, input_lengths):
"""forward path, note that inputs are batch first"""
lengths_list = input_lengths.view(-1).tolist()
packed_emb = pack(input_embedded, lengths_list, True)
memory_bank, encoder_final = self.LSTM(packed_emb)
memory_bank = unpack(memory_bank)[0].view(input_embedded.size(0),input_embedded.size(1), -1)
return memory_bank, encoder_final
class Model(nn.Module):
def __init__(self, word_emb, vocab_size, opt):
super(Model, self).__init__()
self.word_emb = word_emb
self.vocab_size = vocab_size
self.sp_dec = sentence_planner.SentencePlanner(opt)
self.wd_dec = content_decoder.WordDecoder(vocab_size, opt)
self.ce_loss = nn.CrossEntropyLoss(reduction="sum", ignore_index=-1)
self.nll_loss = nn.NLLLoss(reduction="sum", ignore_index=-1)
self.bce_loss = nn.BCELoss(reduction="none")
def forward(self, *args, **kwargs):
raise NotImplementedError
def compute_word_loss_probs(self, word_prob, word_targets):
"""
self.nll_loss = nn.NLLLoss(reduction="sum", ignore_index=-1)
Calculate cross-entropy loss on words.
Args:
word_prob: [batch_size, ]
word_targets: [batch_size, ]
"""
word_loss = self.nll_loss(torch.log(word_prob).view(-1, self.vocab_size), word_targets.view(-1))
ppl = torch.exp(word_loss / torch.sum(word_targets >= 0))
word_loss /= word_targets.size(0)
return word_loss, ppl
def compute_stype_loss(self, stype_pred, stype_labels):
"""
Calculate cross-entropy loss on sentence type prediction.
Args:
stype_pred: [batch_size, max_sent_num, 4]: logits for type prediction
stype_labels: [batch_size, max_sent_num]: gold-standard sentence type indices
Returns:
st_loss: scalar loss value averaged over all samples in the batch
"""
st_loss = self.ce_loss(stype_pred.view(-1, self.sp_dec.sentence_type_n),
stype_labels.view(-1)) / stype_labels.size(0)
return st_loss
def compute_content_selection_loss(self, cs_pred, cs_labels, ph_bank_mask):
"""
Calculate binary cross-entropy loss on keyphrase selection.
Args:
cs_pred: [batch_size, max_sent_num, max_ph_bank_size]
cs_labels: [batch_size, max_sent_num, max_ph_bank_size]
ph_bank_mask: [batch_size, max_sent_num, max_ph_bank_size]
Returns:
cs_loss: scalar loss value averaged over all samples in the batch.
"""
cs_loss_flat = self.bce_loss(cs_pred.view(-1), cs_labels.view(-1))
cs_loss_masked = ph_bank_mask.view(-1) * cs_loss_flat
cs_loss = torch.sum(cs_loss_masked) / torch.sum(ph_bank_mask)
return cs_loss
class ArgGenModel(Model):
def __init__(self, word_emb, vocab_size, opt):
super(ArgGenModel, self).__init__(word_emb, vocab_size, opt)
self.encoder = EncoderRNN(opt)
def forward_enc(self, src_inputs_tensor, src_len_tensor):
src_emb = self.word_emb(src_inputs_tensor)
enc_outs, enc_final = self.encoder.forward(input_embedded=src_emb, input_lengths=src_len_tensor)
# self.sp_dec.init_state(enc_final)
# self.wd_dec.init_state(enc_final)
return enc_outs, enc_final
def forward(self, tensor_dict, device=None):
batch_size, sent_num, _ = tensor_dict["phrase_bank_selection_index"].size()
enc_outs_op, enc_final_op = self.forward_enc(src_inputs_tensor=tensor_dict["src_inputs"],
src_len_tensor=tensor_dict["src_lens"])
# Needed to sort manually
_, sorted_indice = torch.sort(tensor_dict["src_inner_lens"], descending=True)
_, inv_sorted_indice = torch.sort(sorted_indice, descending=False)
enc_outs_inner, enc_final_inner = self.forward_enc(src_inputs_tensor=tensor_dict["src_inner_inputs"][sorted_indice],
src_len_tensor=tensor_dict["src_inner_lens"][sorted_indice])
# print(enc_outs_op.size())
enc_outs_inner = enc_outs_inner[inv_sorted_indice]
# print(enc_outs_inner.size())
enc_outs_inner_bi = enc_outs_inner.view(enc_outs_inner.size(0), enc_outs_inner.size(1), 2, -1)
# print(enc_outs_inner_bi.size())
enc_outs_inner_last = torch.cat( [enc_outs_inner_bi[:, -1, 0], enc_outs_inner_bi[:, 0, 1]], -1 ).view(batch_size, 1, -1)
# print(enc_outs_inner_last.size())
enc_outs_inner_last = enc_outs_inner_last.repeat_interleave(enc_outs_op.size(1), 1)
enc_outs = torch.cat([enc_outs_op, enc_outs_inner_last], -1)
self.sp_dec.init_state(enc_final_op)
self.wd_dec.init_state(enc_final_op)
ph_bank_emb_raw = self.word_emb(tensor_dict["phrase_bank"])
ph_bank_emb = torch.sum(ph_bank_emb_raw, -2)
_, sp_dec_outs, stype_pred_logits, next_sent_sel_pred_probs, kp_mem_outs = \
self.sp_dec.forward(
ph_bank_emb=ph_bank_emb,
ph_bank_sel_ind_inputs=tensor_dict["phrase_bank_selection_index"],
stype_one_hot_tensor=tensor_dict["tgt_sent_type_onehot"],
ph_sel_ind_mask=tensor_dict["phrase_bank_selection_index_mask"],
)
wd_dec_state, enc_attn, wd_pred_prob, wd_logits = self.wd_dec.forward(
word_inputs_emb=self.word_emb(tensor_dict["tgt_word_ids_input"]),
sent_planner_output=sp_dec_outs,
sent_id_tensor=tensor_dict["tgt_sent_ids"],
sent_mask_tensor=tensor_dict["tgt_word_ids_input_mask"],
memory_bank=enc_outs,
memory_len=tensor_dict["phrase_bank_len"],
ph_bank_word_ids=tensor_dict["phrase_bank"],
ph_bank_word_mask=tensor_dict["phrase_bank_word_mask"],
stype_one_hot=tensor_dict["tgt_sent_type_onehot"].float(),
)
return stype_pred_logits, next_sent_sel_pred_probs, wd_pred_prob, wd_logits, enc_attn, kp_mem_outs
class AbsGenModel(Model):
def __init__(self, word_emb, vocab_size, opt):
super(AbsGenModel, self).__init__(word_emb, vocab_size, opt)
self.encoder = EncoderRNN(opt)
def forward_enc(self, src_inputs_tensor, src_len_tensor):
src_emb = self.word_emb(src_inputs_tensor)
enc_outs, enc_final = self.encoder.forward(src_emb, src_len_tensor)
self.sp_dec.init_state(enc_final)
self.wd_dec.init_state(enc_final)
return enc_outs, enc_final
def forward(self, tensor_dict, device=None):
enc_outs, _ = self.forward_enc(src_inputs_tensor=tensor_dict["src_inputs"],
src_len_tensor=tensor_dict["src_lens"])
ph_bank_emb_raw = self.word_emb(tensor_dict["phrase_bank"])
ph_bank_emb = torch.sum(ph_bank_emb_raw, -2)
_, sp_dec_outs, _, next_sent_sel_pred_probs, kp_mem_outs = \
self.sp_dec.forward(
ph_bank_emb=ph_bank_emb,
ph_bank_sel_ind_inputs=tensor_dict["phrase_bank_selection_index"],
stype_one_hot_tensor=None,
ph_sel_ind_mask=tensor_dict["phrase_bank_selection_index_mask"],
)
wd_dec_state, enc_attn, wd_pred_prob, wd_logits = self.wd_dec.forward(
word_inputs_emb=self.word_emb(tensor_dict["tgt_word_ids_input"]),
sent_planner_output=sp_dec_outs,
sent_id_tensor=tensor_dict["tgt_sent_ids"],
sent_mask_tensor=tensor_dict["tgt_word_ids_input_mask"],
memory_bank=kp_mem_outs,
memory_len=tensor_dict["phrase_bank_len"],
ph_bank_word_ids=tensor_dict["phrase_bank"],
ph_bank_word_mask=tensor_dict["phrase_bank_word_mask"],
stype_one_hot=None,
)
return None, next_sent_sel_pred_probs, wd_pred_prob, wd_logits, enc_attn, kp_mem_outs
class WikiGenModel(Model):
def __init__(self, word_emb, vocab_size, opt):
super(WikiGenModel, self).__init__(word_emb, vocab_size, opt)
# For Wikipedia generation, encoder is simply a linear layer for the title
# self.encoder = (nn.Linear(300, 2 * 512, bias=True).cuda(), nn.Linear(300, 2 * 512, bias=True).cuda())
self.encoder = nn.ModuleList([nn.Linear(300, 2 * 512, bias=True), nn.Linear(300, 2 * 512, bias=True)])
def forward_enc(self, src_inputs_tensor):
"""
Run feedforward encoder layer, where the input is the sum of word embeddings
in the Wikipedia article title.
Args:
src_inputs_tensor: [batch_size, num_words] input title word ids
"""
src_emb_word = self.word_emb(src_inputs_tensor)
src_emb_instance = torch.sum(src_emb_word, dim=-2)
enc_vec_h = torch.tanh(self.encoder[0](src_emb_instance))
enc_vec_c = torch.tanh(self.encoder[1](src_emb_instance))
self.sp_dec.init_state(encoder_final=(enc_vec_h.view(2, -1, 512),
enc_vec_c.view(2, -1, 512)))
self.wd_dec.init_state(encoder_final=(enc_vec_h.view(2, -1, 512),
enc_vec_c.view(2, -1, 512)))
def forward(self, tensor_dict, device=None):
self.forward_enc(src_inputs_tensor=tensor_dict["src_inputs"])
batch_size, sent_num, _ = tensor_dict["phrase_bank_selection_index"].size()
if "style" in tensor_dict:
style_embedded = tensor_dict["style"].float()
else:
style_embedded = torch.ones([batch_size, sent_num, 1], dtype=torch.float).to(device)
# convert keyphrases into word embeddings
# ph_bank_emb_raw: [batch_size, max_ph_bank, max_ph_words, 300]
ph_bank_emb_raw = self.word_emb(tensor_dict["phrase_bank"])
# sum up word embeddings for the keyphrase and create keyphrase embeddings
# ph_bank_emb: [batch_size, max_ph_bank, 300]
ph_bank_emb = torch.sum(ph_bank_emb_raw, -2)
_, sp_dec_outs, stype_pred_logits, next_sent_sel_pred_probs, kp_mem_outs = \
self.sp_dec.forward(
ph_bank_emb=ph_bank_emb,
ph_bank_sel_ind_inputs=tensor_dict["phrase_bank_selection_index"],
stype_one_hot_tensor=tensor_dict["tgt_sent_type_onehot"],
ph_sel_ind_mask=tensor_dict["phrase_bank_selection_index_mask"],
global_style_emb=style_embedded
)
wd_dec_state, enc_attn, wd_pred_prob, wd_logits = self.wd_dec.forward(
word_inputs_emb=self.word_emb(tensor_dict["tgt_word_ids_input"]),
sent_planner_output=sp_dec_outs,
sent_id_tensor=tensor_dict["tgt_sent_ids"],
sent_mask_tensor=tensor_dict["tgt_word_ids_input_mask"],
memory_bank=kp_mem_outs,
memory_len=tensor_dict["phrase_bank_len"],
ph_bank_word_ids=tensor_dict["phrase_bank"],
ph_bank_word_mask=tensor_dict["phrase_bank_word_mask"],
stype_one_hot=tensor_dict["tgt_sent_type_onehot"].float(),
)
return stype_pred_logits, next_sent_sel_pred_probs, wd_pred_prob, wd_logits, enc_attn, kp_mem_outs
```
#### File: src/modules/sentence_planner.py
```python
import torch
import torch.nn as nn
class SentencePlanner(nn.Module):
def __init__(self, opt):
super(SentencePlanner, self).__init__()
self.hidden_size = opt.hidden_size
self.batch_size = opt.batch_size
planner_hidden_size = 300
if opt.task == "arggen":
self.sentence_type_n = 4
elif opt.task == "wikigen":
self.sentence_type_n = 5
planner_hidden_size = 301 # global style extra bit
else:
self.sentence_type_n = 0
self.opt = opt
self.state = dict()
self.planner = nn.LSTM(input_size=planner_hidden_size,
hidden_size=self.hidden_size,
num_layers=2,
dropout=opt.dropout,
batch_first=True,
bias=True)
self.keyphrase_reader = nn.LSTM(input_size=300,
hidden_size=150,
num_layers=1,
batch_first=True,
bias=True,
bidirectional=True)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax(dim=-1)
self.sigmoid = nn.Sigmoid()
self.stype_inner = nn.Linear(self.hidden_size + planner_hidden_size, self.hidden_size, bias=True)
self.stype_readout = nn.Linear(self.hidden_size, self.sentence_type_n, bias=True)
self.keyphrase_sel_hidden_weights = nn.Linear(self.hidden_size, 1, bias=True)
self.bilinear_layer = nn.Linear(300, 300, bias=False)
return
def init_state(self, encoder_final):
def _fix_enc_hidden(hidden):
hidden = torch.cat([hidden[0:hidden.size(0):2],
hidden[1:hidden.size(0):2]], 2)
return hidden
if self.opt.task == "wikigen":
self.state["hidden"] = encoder_final
elif self.opt.task in ["arggen", "absgen"]:
self.state["hidden"] = tuple([_fix_enc_hidden(enc_hid) for enc_hid in encoder_final])
def forward_onestep(self, kp_ph_bank_vec, ph_bank_sel_ind_inputs, ph_bank_sel_ind_history):
"""
run forward pass on text planning decoder for one step only
Args:
kp_ph_bank_vec [batch_size x max_ph_bank_size x ph_vec_dim]: keyphrase memory representations
ph_bank_sel_ind_inputs: [batch_size x max_ph_bank_size]: keyphrase selection for current step
ph_bank_sel_ind_history: [batch_size x max_ph_bank_size x step-1]: selection history so far
"""
ph_bank_sel_ind_inputs_tensor_sq = ph_bank_sel_ind_inputs.unsqueeze(-1).float()
ph_sel_raw = ph_bank_sel_ind_inputs_tensor_sq * kp_ph_bank_vec
# ph_sum_emb:
ph_sum_emb = torch.sum(ph_sel_raw, -2).unsqueeze(1)
self.rnn_output, dec_state = self.planner(ph_sum_emb, self.state["hidden"])
self.state["hidden"] = dec_state
stype_pred_logits = self._predict_sentence_type(ph_sum_emb)
stype_onehot = self.sigmoid(stype_pred_logits)
next_sentence_pred_probs = self._predict_keyphrase_selection(stype_onehot,
ph_bank_sel_ind_history,
kp_ph_bank_vec)
return self.rnn_output, stype_pred_logits, next_sentence_pred_probs
def forward_with_ph_bank_vec(self, ph_bank_vec, style_emb, ph_bank_sel_ind_inputs, stype_one_hot_tensor,
ph_sel_ind_mask):
ph_bank_sel_ind_inputs_tensor_sq = ph_bank_sel_ind_inputs.unsqueeze(-1).float()
kp_ph_bank_vec_sq = ph_bank_vec.unsqueeze(-3)
ph_sel_raw = ph_bank_sel_ind_inputs_tensor_sq * kp_ph_bank_vec_sq
ph_sum_emb = torch.sum(ph_sel_raw, -2)
ph_batch_size, max_sent_len, _ = ph_sum_emb.size()
rnn_input = torch.cat((ph_sum_emb, style_emb), -1)
self.rnn_output, dec_state = self.planner(rnn_input, self.state["hidden"])
stype_pred_logits = self._predict_sentence_type(ph_sum_emb)
ph_bank_sel_cumulative = torch.cumsum(ph_bank_sel_ind_inputs, dim=1).float() * ph_sel_ind_mask
next_sentence_sel_pred = self._predict_keyphrase_selection(stype_one_hot_tensor,
ph_bank_sel_cumulative,
ph_bank_vec)
return dec_state, self.rnn_output, stype_pred_logits, next_sentence_sel_pred, ph_bank_vec
def forward(self, ph_bank_emb, ph_bank_sel_ind_inputs, stype_one_hot_tensor, ph_sel_ind_mask, global_style_emb=None):
"""
Args:
ph_bank_emb: [batch_size x max_ph_bank x 300] word embedding based phrase vectors for ph_bank
ph_bank_sel_ind_inputs: [batch_size x max_sent_num x max_ph_bank] 1-hot encoding of phrase selection
style_emb: [batch_size x max_sent_num x 300]
stype_one_hot_tensor: [batch_size x max_sent_num x 4] 1-hot encoding of sentence type
ph_sel_ind_mask: [batch_size x max_sent_num x max_ph_bank] 1-hot encoding to indiacate paddings for ph_bank
global_style_emb: [batch_size] global style for each instance, only applicable for wikigen
Returns:
dec_state:
self.rnn_output:
stype_pred_logits:
next_sentence_sel_pred:
"""
# run RNN over ph_bank to obtain context-aware keyphrase representation
# kp_ph_bank_vec: [batch_size x max_ph_bank x 300]
kp_ph_bank_vec, _ = self.keyphrase_reader(ph_bank_emb)
ph_bank_sel_ind_inputs_tensor_sq = ph_bank_sel_ind_inputs.unsqueeze(-1).float()
kp_ph_bank_vec_sq = kp_ph_bank_vec.unsqueeze(-3)
ph_sel_raw = ph_bank_sel_ind_inputs_tensor_sq * kp_ph_bank_vec_sq
ph_sum_emb = torch.sum(ph_sel_raw, -2)
ph_batch_size, max_sent_len, _ = ph_sum_emb.size()
if global_style_emb is not None:
global_style_emb = global_style_emb.unsqueeze(1).unsqueeze(1)
global_style_emb = global_style_emb.repeat((1, ph_sum_emb.shape[1], 1))
ph_sum_emb = torch.cat((ph_sum_emb, global_style_emb), -1)
self.rnn_output, dec_state = self.planner(ph_sum_emb, self.state["hidden"])
stype_pred_logits = self._predict_sentence_type(ph_sum_emb)
ph_bank_sel_cumulative = torch.cumsum(ph_bank_sel_ind_inputs, dim=1).float() * ph_sel_ind_mask
next_sentence_sel_pred = self._predict_keyphrase_selection(stype_one_hot_tensor,
ph_bank_sel_cumulative,
kp_ph_bank_vec)
return dec_state, self.rnn_output, stype_pred_logits, next_sentence_sel_pred, kp_ph_bank_vec
def _predict_sentence_type(self, ph_sum_emb):
"""
predict sentence type based on hidden state s_j and phrase sum embedding m_j:
t_j = softmax( self.readout( tanh(W[m_j; s_j]) ))
"""
concat_kp_hidden = torch.cat((ph_sum_emb, self.rnn_output), dim=-1)
stype_logits = self.stype_readout(self.tanh(self.stype_inner(concat_kp_hidden)))
return stype_logits
def _predict_keyphrase_selection(self, stype_one_hot, ph_bank_sel_cum, ph_bank_vec):
"""
using history selection weights together with decoder RNN, keyphrase are represented as RNN states
Args:
stype_one_hot:
ph_bank_sel_cum:
ph_bank_vec:
"""
sentence_level_features = self.keyphrase_sel_hidden_weights(self.rnn_output)
max_ph_size = ph_bank_sel_cum.size(-1)
sentence_level_features_broadcast = sentence_level_features.repeat((1, 1, max_ph_size))
ph_bank_weighted_sum = torch.bmm(ph_bank_sel_cum.float(), ph_bank_vec) # [batch_size x max_sent_num x 300]
ph_bank_weighted_sum_repeat = ph_bank_weighted_sum.unsqueeze(-2).repeat(1, 1, ph_bank_vec.size(1), 1)
ph_bank_emb_unsqueeze = ph_bank_vec.unsqueeze(-3)
partial_prods = self.bilinear_layer(ph_bank_emb_unsqueeze)
prods = partial_prods * ph_bank_weighted_sum_repeat
word_level_features = torch.sum(prods, dim=-1)
# word_level_features = self.keyphrase_sel_cov_weights(ph_bank_weighted_sum) # [batch_size x max_sent_num x 1]
# word_level_features = self.keyphrase_sel_cov_weights(ph_bank_sel_cum.unsqueeze(dim=-1)).squeeze() * ph_bank_mask
content_sel_logits = sentence_level_features_broadcast + word_level_features
content_sel_pred = self.sigmoid(content_sel_logits)
return content_sel_pred
```
|
{
"source": "jeffujioka/envio_challenge",
"score": 3
}
|
#### File: scripts/python/gpio.py
```python
from os import path
class Gpio:
_GPIO_BASE_PATH = '/sys/class/gpio'
def _set_gpio_base_path_only_for_testing_purposes(self, new_path):
# "taking advantage of python not having constant for changing the value of _GPIO_BASE_PATH
self._GPIO_BASE_PATH = new_path
def _get_gpio_path(self, pin):
return path.join(self._GPIO_BASE_PATH, 'gpio{0}'.format(pin))
def _get_gpio_file_path(self, pin, file):
return path.join(self._GPIO_BASE_PATH, 'gpio{0}'.format(pin), file)
def _get_file(self, file_path, mode):
file = None
try:
file = open(file_path, mode)
except Exception as e:
print(e)
return file
def _write(self, file_path, data):
file = None
try:
file = self._get_file(file_path, 'w')
file.write(str(data))
file.flush()
except Exception as e:
print(e)
finally:
if (file): file.close()
def _read(self, file_path):
ret = None
file = None
try:
file = self._get_file(file_path, 'r')
if (file):
ret = file.read().strip()
except Exception as e:
print(e)
finally:
if (file): file.close()
return ret
def export(self, pin):
"""Exports a given gpio pin.
Equivalent to: echo pin > /sys/class/gpio/export
Parameter:
pin : int
Gpio pin to be exported
"""
self._write(path.join(self._GPIO_BASE_PATH, 'export'), pin)
def unexport(self, pin):
"""Unexports a given gpio pin.
Equivalent to: echo pin > /sys/class/gpio/unexport
Parameter:
pin : int
Gpio pin to be unexported
"""
self._write(path.join(self._GPIO_BASE_PATH, 'unexport'), pin)
def set_direction(self, pin, direction):
"""Sets the direction of a given gpio pin.
Equivalent to: echo <direction> /sys/class/gpio/<gpio_pin>/direction
Parameters:
pin : int
Gpio pin to set the direction
direction : str
Direction to be set to the given gpio pin
Possible values are 'in' or 'out'
"""
if direction not in ['in', 'out']:
raise ValueError(f"ERROR! Setting direction [{direction}] of pin [{pin}]. Direction must be 'in' or 'out'")
self._write(self._get_gpio_file_path(pin, 'direction'), direction)
def output(self, pin, value):
"""Sets the output of a given gpio pin.
Parameters:
pin : int
Gpio pin to be set the output to
value : int
Value to be set to the given gpio pin
Possible values are 0 or 1
"""
# TODO validate parameters
self._write(self._get_gpio_file_path(pin, 'value'), value)
def input(self, pin):
"""Reads the value of a given gpio pin.
Parameters:
pin : int
Gpio pin to be read the state from
value : int
Value to be set to the given gpio pin
Possible values are 0 or 1
Returns:
str
'0' if gpio pin level is low,
'1' if gpio pin level is high
"""
return self._read(self._get_gpio_file_path(pin, 'value'))
```
|
{
"source": "JeffUTF8/tensorflow-hello-world",
"score": 3
}
|
#### File: tensorflow-hello-world/tensorflow-hello-world/tensorflow_hello_world.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpi
print (tf.__version__)
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure()
plt.imshow(train_images[0])
plt.grid(False)
plt.show()
#turn values to a number between 0 and 1
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs = 10)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose = 2)
print ('\nTest accuracy:', test_acc)
predictions = model.predict(test_images)
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
for i in range(10):
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
i = i + 1
```
|
{
"source": "JeffValenti/SME",
"score": 3
}
|
#### File: SME/test/test_atmo.py
```python
from pathlib import Path
from pytest import raises
from sme.atmo import (
ContinuousOpacityFlags, SmeAtmo, AtmoFileAtlas9, AtmoFileError)
def test_continuousopacityflags():
"""Code coverage tests for ContinuousOpacityFlags() class and methods.
"""
cof = ContinuousOpacityFlags()
assert cof.smelib[2] == 1
cof['H-'] = False
assert cof.smelib[2] == 0
with raises(ValueError):
cof['H++'] = True
with raises(ValueError):
cof['H-'] = 1
assert 'H-' in cof.__str__()
def test_smeatmo():
"""Code coverage tests for SmeAtmo() class and methods.
Demonstrate that modeltype is case insensitive ('rhox', 'RHOX').
Test that wavelength and radius are only specified when appropriate.
"""
scale = [1, 2, 3]
atmo = SmeAtmo('RhoX', scale)
assert 'rhox' in atmo.__str__()
atmo = SmeAtmo('tau', scale, wavelength=5000)
assert '5000' in atmo.__str__()
atmo = SmeAtmo('SPH', scale, radius='7e10')
assert 'None' in atmo.__str__()
with raises(ValueError, match='Invalid modeltype'):
atmo = SmeAtmo('_', scale)
with raises(AttributeError, match='do not specify'):
atmo = SmeAtmo('rhox', scale, radius=7e10)
with raises(AttributeError, match='do not specify'):
atmo = SmeAtmo('rhox', scale, wavelength=5000)
with raises(AttributeError, match='specify wavelength'):
atmo = SmeAtmo('tau', scale)
with raises(AttributeError, match='but not radius'):
atmo = SmeAtmo('tau', scale, radius=7e10)
with raises(AttributeError, match='specify radius'):
atmo = SmeAtmo('sph', scale)
with raises(AttributeError, match='but not wavelength'):
atmo = SmeAtmo('sph', scale, wavelength=5000)
def test_atmofileatlas9():
"""Code coverage tests for Atlas9AtmoFile() class and methods.
"""
datadir = Path(__file__).parent / 'atmo' / 'atlas9'
a9atmo = AtmoFileAtlas9(datadir / 'ap00t5750g45k2odfnew.dat')
assert 'turbulence' in str(a9atmo)
assert 'Abundances' in str(a9atmo.abund)
assert len(a9atmo.atmo['T']) == a9atmo.ndepth
a9atmo = AtmoFileAtlas9(datadir / 'conv_off_turb_on.atlas9')
assert 'turbulence' in str(a9atmo)
def test_atmofileatlas9_exceptions():
"""Handle exceptions raised by AtmoFileAtlas9().
"""
datadir = Path(__file__).parent / 'atmo' / 'atlas9'
AtmoFileAtlas9(datadir / 'complete_file.atlas9')
# header section
with raises(AtmoFileError, match='incomplete header'):
AtmoFileAtlas9(datadir / 'incomplete_header.atlas9')
with raises(AtmoFileError, match='error parsing header'):
AtmoFileAtlas9(datadir / 'bad_header_label.atlas9')
with raises(AtmoFileError, match='error parsing header'):
AtmoFileAtlas9(datadir / 'bad_header_value.atlas9')
# abund section
with raises(AtmoFileError, match='incomplete abund'):
AtmoFileAtlas9(datadir / 'incomplete_abund.atlas9')
with raises(AtmoFileError, match='error parsing abund'):
AtmoFileAtlas9(datadir / 'bad_abund_label.atlas9')
with raises(AtmoFileError, match='error parsing abund'):
AtmoFileAtlas9(datadir / 'bad_abund_value.atlas9')
# ndepth section
with raises(AtmoFileError, match='incomplete ndepth'):
AtmoFileAtlas9(datadir / 'incomplete_ndepth.atlas9')
with raises(AtmoFileError, match='error parsing ndepth'):
AtmoFileAtlas9(datadir / 'bad_ndepth_label.atlas9')
with raises(AtmoFileError, match='error parsing ndepth'):
AtmoFileAtlas9(datadir / 'bad_ndepth_value.atlas9')
# atmo section
with raises(AtmoFileError, match='incomplete atmo'):
AtmoFileAtlas9(datadir / 'incomplete_atmo.atlas9')
with raises(AtmoFileError, match='error parsing atmo'):
AtmoFileAtlas9(datadir / 'bad_atmo_label.atlas9')
with raises(AtmoFileError, match='error parsing atmo'):
AtmoFileAtlas9(datadir / 'bad_atmo_value.atlas9')
# pradk section
with raises(AtmoFileError, match='incomplete pradk'):
AtmoFileAtlas9(datadir / 'incomplete_pradk.atlas9')
with raises(AtmoFileError, match='error parsing pradk'):
AtmoFileAtlas9(datadir / 'bad_pradk_label.atlas9')
with raises(AtmoFileError, match='error parsing pradk'):
AtmoFileAtlas9(datadir / 'bad_pradk_value.atlas9')
# niter section
with raises(AtmoFileError, match='incomplete niter'):
AtmoFileAtlas9(datadir / 'incomplete_niter.atlas9')
with raises(AtmoFileError, match='error parsing niter'):
AtmoFileAtlas9(datadir / 'bad_niter_label.atlas9')
with raises(AtmoFileError, match='error parsing niter'):
AtmoFileAtlas9(datadir / 'bad_niter_value.atlas9')
```
#### File: SME/test/test_util.py
```python
from pytest import raises
from sme.util import (
change_waveunit, change_energyunit,
air_to_vacuum, vacuum_to_air, vacuum_angstroms)
def test_change_waveunit():
"""Test code paths and cases in util.change_waveunit().
Test conversion from/to Angstroms to/from all units listed in `cases`.
Test units specified in uppercase and lowercase.
Test scalar wavelength and list of wavelengths.
"""
cases = {
'a': 5000,
'nm': 500,
'um': 0.5,
'micron': 0.5,
'cm^-1': 20000,
'1/cm': 20000}
for unit in cases:
wave = cases[unit]
assert change_waveunit(5000, 'A', unit) == wave
assert change_waveunit(5000, 'A', unit.upper()) == wave
assert change_waveunit([5000], 'A', unit) == [wave]
assert change_waveunit(wave, unit, 'A') == 5000
assert change_waveunit(wave, unit.upper(), 'A') == 5000
assert change_waveunit([wave, wave], unit, 'A') == [5000, 5000]
with raises(ValueError, match='invalid wavelength unit'):
change_waveunit(5000, 'A', 'erg')
with raises(ValueError, match='invalid wavelength unit'):
change_waveunit(5000, 'erg', 'A')
def test_change_energyunit():
"""Test code paths and cases in util.change_energyunit().
Test conversion from/to eV to/from all units listed in `cases`.
Test units specified in uppercase and lowercase.
Test scalar energy and list of energies.
"""
cases = {
'ev': 1,
'erg': 1.602176634e-12,
'j': 1.602176634e-19,
'cm^-1': 8065.543937,
'1/cm': 8065.543937}
for unit in cases:
energy = cases[unit]
assert change_energyunit(1, 'eV', unit) == energy
assert change_energyunit(1, 'eV', unit.upper()) == energy
assert change_energyunit([1], 'eV', unit) == [energy]
assert change_energyunit(energy, unit, 'eV') == 1
assert change_energyunit(energy, unit.upper(), 'eV') == 1
assert change_energyunit([energy, energy], unit, 'eV') == [1, 1]
with raises(ValueError, match='invalid energy unit'):
change_energyunit(5000, 'eV', 'A')
with raises(ValueError, match='invalid energy unit'):
change_energyunit(5000, 'A', 'eV')
def test_air_to_vacuum():
"""Test code paths and cases in util.air_to_vacuum().
Test conversion for input wavelengths in Angstroms and nm.
Test wavelength above and below 2000 Angstroms.
Test scalar wavelength and list of wavelengths.
Test that air_to_vacuum(vacuum_to_air( )) is an identity operator.
Allow discrepancies smaller than 1e-8 Angstroms.
"""
wvac_a = [1999, 5000, 5010]
wair_a = [1999, 4998.605522013399, 5008.602864587058]
wvac_nm = [100, 200, 1000, 2000, 5000, 10000, 20000]
for wv, wa in zip(wvac_a, wair_a):
assert abs(air_to_vacuum(wa, 'A') - wv) < 1e-8
wvac_nm = air_to_vacuum([w / 10 for w in wair_a], 'nm')
assert all([abs(10 * wnm - wv) < 1e-8 for wnm, wv in zip(wvac_nm, wvac_a)])
for wnm in wvac_nm:
assert abs(air_to_vacuum(vacuum_to_air(wnm, 'nm'), 'nm') - wnm) < 1e-8
def test_vacuum_to_air():
"""Test code paths and cases in util.vacuum_to_air().
Test conversion for input wavelengths in Angstroms and nm.
Test wavelength above and below 2000 Angstroms.
Test scalar wavelength and list of wavelengths.
Allow discrepancies smaller than 1e-8 Angstroms.
"""
wvac_a = [1999, 5000, 5010]
wair_a = [1999, 4998.605522013399, 5008.602864587058]
for wv, wa in zip(wvac_a, wair_a):
assert abs(vacuum_to_air(wv, 'A') - wa) < 1e-8
wair_nm = vacuum_to_air([w / 10 for w in wvac_a], 'nm')
assert all([abs(10 * wnm - wa) < 1e-8 for wnm, wa in zip(wair_nm, wair_a)])
def test_vacuum_angstroms():
"""Test code paths and cases in util.vacuum_angstroms().
"""
win = [5000, 20000, 500]
uin = ['A', 'cm^-1', 'nm']
for w, u in zip(win, uin):
assert vacuum_angstroms(w, u, 'vac') == 5000
assert vacuum_angstroms(w, u, 'air') == 5001.39484863807
with raises(ValueError, match='invalid wavelength unit'):
vacuum_angstroms(5000, 'erg', 'vac')
with raises(ValueError, match='invalid medium'):
vacuum_angstroms(5000, 'A', 'water')
```
|
{
"source": "jeffvandervort/terrasnow-enterprise",
"score": 2
}
|
#### File: terrasnow-enterprise/handlers/snow_cat_item.py
```python
import logging
class SnowCatalogItem(object):
"""ServiceNow Category Item."""
def __init__(self, name, description, conf):
"""Initialize."""
self.name = name
# terraform catalog sys_id
self.catalog = conf.get("SERVICENOW", "TF_CATALOG")
# terraform catalog's watchmaker category
self.category = conf.get("SERVICENOW", "CATEGORY")
self.description = description
# terraform deployment workflow
self.workflow = conf.get("SERVICENOW", "TFE_WORKFLOW")
# terraform-snow (SN application) sys_id
# search for terraform-snow in the sys_package table on your SN inst.
self.sys_package = conf.get("SERVICENOW", "SYS_PACKAGE")
self.isactive = "true"
def data(self):
"""Create category item data payload."""
logging.info('')
return {
"sys_package": self.sys_package,
"name": self.name,
"category": self.category,
"sc_catalogs": self.catalog,
"short_description": self.description,
"workflow": self.workflow,
"active": self.isactive
}
```
#### File: terrasnow-enterprise/handlers/tfe_run_handler.py
```python
import logging as log
import urllib.error
import handlers.config as config
import handlers.tfe_handler as tfe_handler
import requests
from glom import glom
FORMAT = ("[%(asctime)s][%(levelname)s]" +
"[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
log.basicConfig(filename='terrasnow_enterprise.log', level=log.INFO,
format=FORMAT)
# - 1. get workspace id: > going to be extracted sn side
# https://www.terraform.io/docs/enterprise/api/workspaces.html#show-workspace
# - 2. make configuration version request
# - 3. git clone template
# - 4. zip template and push it to the workspace
# Zip the module
# -1. pull the project down from github to the local host
# (git clone specific version)
# -2. while loop to check that the file has finished downloading
# -3. once file is done downloading send it's path to the upload function
# Upload the file
# 1. get correct url
# a. query the workspace for the workspace id
# b. with the workspace id query for the target url
# 2. upload the file and log the results
# TFE configuraiton version
def create_config_version(region, workspace_id):
"""Create workspace configuration version."""
# https://www.terraform.io/docs/enterprise/api/configuration-versions.html#create-a-configuration-version
if workspace_id:
configFromS3 = config.ConfigFromS3("tfsh-config", "config.ini",
region)
conf = configFromS3.config
api_endpoint = (
'/workspaces/{}/configuration-versions'.format(workspace_id))
data = config_version_data()
record = tfe_handler.TFERequest(api_endpoint, data, conf)
log.info('Sending create configuraiton request.')
return response_handler(record)
else:
log.error('Workspace id not provided.')
def get_upload_url(region, workspace_id):
"""Return the TFE workspace configuraiton version upload url."""
response = create_config_version(region, workspace_id)
upload_url = glom(response, 'data.attributes.upload-url', default=False)
log.debug('found upload url: {}'.format(upload_url))
return upload_url
def config_version_data():
"""TFE Confugration Version data."""
return {
"data": {
"type": "configuration-versions",
"attributes": {
"auto-queue-runs": True
}
}
}
def upload_configuration_files(upload_url, tar_path):
"""Upload the configuration files to the target workspace."""
log.info('uploading configuraiton file: {} to {}'.format(tar_path,
upload_url))
if upload_url:
headers = {'Content-Type': 'application/octet-stream'}
file = open(tar_path, 'rb')
response = requests.put(url=upload_url, data=file, headers=headers)
log.info('Recieved response: {}'.format(response.text))
return response.text
else:
log.error('Upload url not provided.')
def response_handler(record):
"""Evaulate response."""
try:
response = record.make_request()
log.debug('Recieved response: {}'.format(response))
return response
except urllib.error.HTTPError as e:
if e.code == 422:
return "ERROR: Worspace already exists"
else:
return "ERROR"
```
#### File: terrasnow-enterprise/handlers/tfe_workspace_handler.py
```python
import logging as log
import urllib.parse
import urllib.request
import handlers.config as config
import handlers.tfe_handler as tfe_handler
from glom import glom
class TFEWorkspace(object):
"""Terraform Enterprise Workspace."""
def __init__(self, name, vcs_repo_id, oauth_token_id, tf_version="0.11.1",
working_dir="", vcs_repo_branch=""):
"""Initialize."""
self.name = name
self.tf_version = tf_version
self.working_dir = working_dir
self.vcs_repo_id = vcs_repo_id
self.oauth_token_id = oauth_token_id
self.vcs_repo_branch = vcs_repo_branch
def initialize_workspace(self):
"""Return create TFE workspace request body."""
return {
"data": {
"attributes": {
"name": self.name,
"auto-apply": True,
"terraform_version": self.tf_version,
"working-directory": self.working_dir,
"vcs-repo": {
"identifier": self.vcs_repo_id,
"oauth-token-id": self.oauth_token_id,
"branch": self.vcs_repo_branch,
"default-branch": "true"
}
},
"type": "workspaces"
}
}
def create_workspace(region, org_name, workspace_name, repo_id, repo_version):
"""Create TFE workspace."""
configFromS3 = config.ConfigFromS3("tfsh-config", "config.ini",
region)
conf = configFromS3.config
w = TFEWorkspace(name=workspace_name, vcs_repo_id=repo_id,
vcs_repo_branch=repo_version, oauth_token_id=(
tfe_handler.get_vcs_oauth(conf)))
workspace = w.initialize_workspace()
api_endpoint = "/organizations/{}/workspaces".format(org_name)
record = tfe_handler.TFERequest(api_endpoint, workspace, conf)
return response_handler(record)
def delete_workspace(region, org_name, workspace_name):
"""Delete TFE workspace."""
configFromS3 = config.ConfigFromS3("tfsh-config", "config.ini",
region)
conf = configFromS3.config
api_endpoint = "/organizations/{}/workspaces/{}".format(org_name,
workspace_name)
record = tfe_handler.TFERequest(api_endpoint, None, conf)
response = record.delete()
log.debug('Delete TFE workspace response: {}'.format(response))
return response
def get_workspace(region, org_name, workspace_name):
"""Get the target workspace information."""
configFromS3 = config.ConfigFromS3("tfsh-config", "config.ini",
region)
conf = configFromS3.config
api_endpoint = "/organizations/{}/workspaces/{}".format(org_name,
workspace_name)
response = tfe_handler.TFERequest(api_endpoint, None, conf)
log.debug('Get TFE workspace reponse: {}'.format(response))
return response_handler(response)
def get_workspace_id(region, org_name, workspace_name):
"""Return the workspace id."""
response = get_workspace(region, org_name, workspace_name)
return glom(response, 'data.id', default=False)
def response_handler(record):
"""Evaulate response."""
try:
response = record.make_request()
log.debug('Create TFE workspace response: {}'.format(response))
return response
except urllib.error.HTTPError as e:
return "ERROR: TFE workspace error occured: {}".format(e)
```
#### File: jeffvandervort/terrasnow-enterprise/webhook.py
```python
import json
import logging
from logging.handlers import RotatingFileHandler
import aws_account_info_listener
import sn_workflow_listener
import terrasnow_enterprise
from flask import abort, Flask, render_template, request
application = Flask(__name__)
# Root of webhook listener. Returns success by default.
@application.route('/', methods=['GET', 'POST'])
def root_listener():
"""Create webhook."""
if request.method == 'GET':
return "SUCCESS", 200
elif request.method == 'POST':
return "SUCCESS", 200
else:
abort(400)
@application.route('/aws-assume-role-webhook', methods=['POST'])
def assume_role_target():
"""Return assumed role credentials."""
if request.method == 'POST':
data = request.get_data().decode("utf-8", "ignore")
application.logger.error(data)
return json.dumps(sn_workflow_listener.assume_role_listener(
json.loads(data))), 200
# listens for tag update event sent from gitlab and creates the associated
# SN catalog item
@application.route('/gitlab-webhook', methods=['GET', 'POST'])
def gitlab_target():
"""Create gitlab webhook."""
if request.method == 'GET':
data = request.get_data().decode("utf-8", "ignore")
application.logger.error(data)
return json.dumps(
terrasnow_enterprise.process_response(
json.loads(data))), 200
elif request.method == 'POST':
data = request.get_data().decode("utf-8", "ignore")
application.logger.error(data)
return json.dumps(
terrasnow_enterprise.process_response(
json.loads(data))), 200
else:
abort(400)
@application.route('/tfe-run-webhook', methods=['POST'])
def tfe_run_target():
"""TFE run event listener."""
if request.method == 'POST':
data = request.get_data().decode("utf-8", "ignore")
application.logger.error(data)
return json.dumps(
sn_workflow_listener.TFE_run_listener(
json.loads(data))), 200
# Listens for catalog item variables object sent from SN catalog item order
# request
@application.route('/variables-webhook', methods=['POST'])
def variables_target():
"""TFE variable creation event listener."""
if request.method == 'POST':
data = request.get_data().decode("utf-8", "ignore")
application.logger.error(data)
return json.dumps(
sn_workflow_listener.variables_event_listener(
json.loads(data))), 200
# Creates/Deletes a TFE workspace using the SN Cat item base module's source
# repo as provided from SN catalog item order request
@application.route('/workflow-webhook', methods=['POST'])
def workflow_target():
"""Workflow event listener."""
if request.method == 'POST':
data = request.get_data().decode("utf-8", "ignore")
application.logger.error(data)
response = json.dumps(sn_workflow_listener.workspace_event_listener(
json.loads(data)))
if "ERROR" in response:
return response, 400
else:
return response, 200
# Returns JSON object of AWS account information
@application.route('/aws-account-info', methods=['POST'])
def aws_account_info():
"""Return aws account info."""
if request.method == 'POST':
data = request.get_data().decode("utf-8", "ignore")
application.logger.error(data)
response = json.dumps(aws_account_info_listener.event_listener(
json.loads(data)))
if "ERROR" in response:
return response, 400
else:
return response, 200
if __name__ == '__main__':
formatter = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler = RotatingFileHandler("terrasnow_enterprise.log",
maxBytes=10000000,
backupCount=5)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
application.run(debug=True)
```
|
{
"source": "jeffvan-netsia/voltha_doc",
"score": 2
}
|
#### File: voltha_doc/cli/omci.py
```python
from optparse import make_option
from cmd2 import Cmd, options
from datetime import datetime
from google.protobuf.empty_pb2 import Empty
from cli.table import print_pb_list_as_table
from voltha.protos import third_party
from voltha.protos import voltha_pb2
from voltha.protos.omci_mib_db_pb2 import MibDeviceData, MibClassData, \
MibInstanceData
from os import linesep
_ = third_party
class OmciCli(Cmd):
CREATED_KEY = 'created'
MODIFIED_KEY = 'modified'
MDS_KEY = 'mib_data_sync'
LAST_SYNC_KEY = 'last_mib_sync'
VERSION_KEY = 'version'
DEVICE_ID_KEY = 'device_id'
CLASS_ID_KEY = 'class_id'
INSTANCE_ID_KEY = 'instance_id'
ATTRIBUTES_KEY = 'attributes'
TIME_FORMAT = '%Y%m%d-%H%M%S.%f'
ME_KEY = 'managed_entities'
MSG_TYPE_KEY = 'message_types'
MSG_TYPE_TO_NAME = {
4: 'Create',
5: 'Create Complete',
6: 'Delete',
8: 'Set',
9: 'Get',
10: 'Get Complete',
11: 'Get All Alarms',
12: 'Get All Alarms Next',
13: 'Mib Upload',
14: 'Mib Upload Next',
15: 'Mib Reset',
16: 'Alarm Notification',
17: 'Attribute Value Change',
18: 'Test',
19: 'Start Software Download',
20: 'Download Section',
21: 'End Software Download',
22: 'Activate Software',
23: 'Commit Software',
24: 'Synchronize Time',
25: 'Reboot',
26: 'Get Next',
27: 'Test Result',
28: 'Get Current Data',
29: 'Set Table'
}
def __init__(self, device_id, get_stub):
Cmd.__init__(self)
self.get_stub = get_stub
self.device_id = device_id
self.prompt = '(' + self.colorize(
self.colorize('omci {}'.format(device_id), 'green'),
'bold') + ') '
def cmdloop(self, intro=None):
self._cmdloop()
do_exit = Cmd.do_quit
def do_quit(self, line):
return self._STOP_AND_EXIT
def get_device_mib(self, device_id, depth=-1):
stub = self.get_stub()
try:
res = stub.GetMibDeviceData(voltha_pb2.ID(id=device_id),
metadata=(('get-depth', str(depth)), ))
except Exception as e:
pass
return res
def help_show_mib(self):
self.poutput('show_mib [-d <device-id>] [-c <class-id> [-i <instance-id>]]' +
linesep + '-d: <device-id> ONU Device ID' +
linesep + '-c: <class-id> Managed Entity Class ID' +
linesep + '-i: <instance-id> ME Instance ID')
@options([
make_option('-d', '--device-id', action="store", dest='device_id', type='string',
help='ONU Device ID', default=None),
make_option('-c', '--class-id', action="store", dest='class_id',
type='int', help='Managed Entity Class ID', default=None),
make_option('-i', '--instance-id', action="store", dest='instance_id',
type='int', help='ME Instance ID', default=None)
])
def do_show_mib(self, _line, opts):
"""
Show OMCI MIB Database Information
"""
device_id = opts.device_id or self.device_id
if opts.class_id is not None and not 1 <= opts.class_id <= 0xFFFF:
self.poutput(self.colorize('Error: ', 'red') +
self.colorize('Class ID must be 1..65535', 'blue'))
return
if opts.instance_id is not None and opts.class_id is None:
self.poutput(self.colorize('Error: ', 'red') +
self.colorize('Class ID required if specifying an Instance ID',
'blue'))
return
if opts.instance_id is not None and not 0 <= opts.instance_id <= 0xFFFF:
self.poutput(self.colorize('Error: ', 'red') +
self.colorize('Instance ID must be 0..65535', 'blue'))
return
try:
mib_db = self.get_device_mib(device_id, depth=-1)
except Exception: # UnboundLocalError if Device ID not found in DB
self.poutput(self.colorize('Failed to get MIB database for ONU {}'
.format(device_id), 'red'))
return
mib = self._device_to_dict(mib_db)
self.poutput('OpenOMCI MIB Database for ONU {}'.format(device_id))
if opts.class_id is None and opts.instance_id is None:
self.poutput('Version : {}'.format(mib[OmciCli.VERSION_KEY]))
self.poutput('Created : {}'.format(mib[OmciCli.CREATED_KEY]))
self.poutput('Last In-Sync Time : {}'.format(mib[OmciCli.LAST_SYNC_KEY]))
self.poutput('MIB Data Sync Value: {}'.format(mib[OmciCli.MDS_KEY]))
class_ids = [k for k in mib.iterkeys()
if isinstance(k, int) and
(opts.class_id is None or opts.class_id == k)]
class_ids.sort()
if len(class_ids) == 0 and opts.class_id is not None:
self.poutput(self.colorize('Class ID {} not found in MIB Database'
.format(opts.class_id), 'red'))
return
for cls_id in class_ids:
class_data = mib[cls_id]
self.poutput(' ----------------------------------------------')
self.poutput(' Class ID: {0} - ({0:#x})'.format(cls_id))
inst_ids = [k for k in class_data.iterkeys()
if isinstance(k, int) and
(opts.instance_id is None or opts.instance_id == k)]
inst_ids.sort()
if len(inst_ids) == 0 and opts.instance_id is not None:
self.poutput(self.colorize('Instance ID {} of Class ID {} not ' +
'found in MIB Database'.
format(opts.instance_id, opts.class_id),
'red'))
return
for inst_id in inst_ids:
inst_data = class_data[inst_id]
self.poutput(' Instance ID: {0} - ({0:#x})'.format(inst_id))
self.poutput(' Created : {}'.format(inst_data[OmciCli.CREATED_KEY]))
self.poutput(' Modified : {}'.format(inst_data[OmciCli.MODIFIED_KEY]))
attributes = inst_data[OmciCli.ATTRIBUTES_KEY]
attr_names = attributes.keys()
attr_names.sort()
max_len = max([len(attr) for attr in attr_names])
for attr in attr_names:
name = self._cleanup_attribute_name(attr).ljust(max_len)
value = attributes[attr]
try:
ivalue = int(value)
self.poutput(' {0}: {1} - ({1:#x})'.format(name, ivalue))
except ValueError:
self.poutput(' {}: {}'.format(name, value))
if inst_id is not inst_ids[-1]:
self.poutput(linesep)
def _cleanup_attribute_name(self, attr):
"""Change underscore to space and capitalize first character"""
return ' '.join([v[0].upper() + v[1:] for v in attr.split('_')])
def _instance_to_dict(self, instance):
if not isinstance(instance, MibInstanceData):
raise TypeError('{} is not of type MibInstanceData'.format(type(instance)))
data = {
OmciCli.INSTANCE_ID_KEY: instance.instance_id,
OmciCli.CREATED_KEY: self._string_to_time(instance.created),
OmciCli.MODIFIED_KEY: self._string_to_time(instance.modified),
OmciCli.ATTRIBUTES_KEY: dict()
}
for attribute in instance.attributes:
data[OmciCli.ATTRIBUTES_KEY][attribute.name] = str(attribute.value)
return data
def _class_to_dict(self, val):
if not isinstance(val, MibClassData):
raise TypeError('{} is not of type MibClassData'.format(type(val)))
data = {
OmciCli.CLASS_ID_KEY: val.class_id,
}
for instance in val.instances:
data[instance.instance_id] = self._instance_to_dict(instance)
return data
def _device_to_dict(self, val):
if not isinstance(val, MibDeviceData):
raise TypeError('{} is not of type MibDeviceData'.format(type(val)))
data = {
OmciCli.DEVICE_ID_KEY: val.device_id,
OmciCli.CREATED_KEY: self._string_to_time(val.created),
OmciCli.LAST_SYNC_KEY: self._string_to_time(val.last_sync_time),
OmciCli.MDS_KEY: val.mib_data_sync,
OmciCli.VERSION_KEY: val.version,
OmciCli.ME_KEY: dict(),
OmciCli.MSG_TYPE_KEY: set()
}
for class_data in val.classes:
data[class_data.class_id] = self._class_to_dict(class_data)
for managed_entity in val.managed_entities:
data[OmciCli.ME_KEY][managed_entity.class_id] = managed_entity.name
for msg_type in val.message_types:
data[OmciCli.MSG_TYPE_KEY].add(msg_type.message_type)
return data
def _string_to_time(self, time):
return datetime.strptime(time, OmciCli.TIME_FORMAT) if len(time) else None
def help_show_me(self):
self.poutput('show_me [-d <device-id>]' +
linesep + '-d: <device-id> ONU Device ID')
@options([
make_option('-d', '--device-id', action="store", dest='device_id', type='string',
help='ONU Device ID', default=None),
])
def do_show_me(self, _line, opts):
""" Show supported OMCI Managed Entities"""
device_id = opts.device_id or self.device_id
try:
mib_db = self.get_device_mib(device_id, depth=1)
mib = self._device_to_dict(mib_db)
except Exception: # UnboundLocalError if Device ID not found in DB
self.poutput(self.colorize('Failed to get supported ME information for ONU {}'
.format(device_id), 'red'))
return
class_ids = [class_id for class_id in mib[OmciCli.ME_KEY].keys()]
class_ids.sort()
self.poutput('Supported Managed Entities for ONU {}'.format(device_id))
for class_id in class_ids:
self.poutput(' {0} - ({0:#x}): {1}'.format(class_id,
mib[OmciCli.ME_KEY][class_id]))
def help_show_msg_types(self):
self.poutput('show_msg_types [-d <device-id>]' +
linesep + '-d: <device-id> ONU Device ID')
@options([
make_option('-d', '--device-id', action="store", dest='device_id', type='string',
help='ONU Device ID', default=None),
])
def do_show_msg_types(self, _line, opts):
""" Show supported OMCI Message Types"""
device_id = opts.device_id or self.device_id
try:
mib_db = self.get_device_mib(device_id, depth=1)
mib = self._device_to_dict(mib_db)
except Exception: # UnboundLocalError if Device ID not found in DB
self.poutput(self.colorize('Failed to get supported Message Types for ONU {}'
.format(device_id), 'red'))
return
msg_types = [msg_type for msg_type in mib[OmciCli.MSG_TYPE_KEY]]
msg_types.sort()
self.poutput('Supported Message Types for ONU {}'.format(device_id))
for msg_type in msg_types:
self.poutput(' {0} - ({0:#x}): {1}'.
format(msg_type,
OmciCli.MSG_TYPE_TO_NAME.get(msg_type, 'Unknown')))
def get_devices(self):
stub = self.get_stub()
res = stub.ListDevices(Empty())
return res.items
def do_devices(self, line):
"""List devices registered in Voltha reduced for OMCI menu"""
devices = self.get_devices()
omit_fields = {
'adapter',
'model',
'hardware_version',
'images',
'firmware_version',
'serial_number',
'vlan',
'root',
'extra_args',
'proxy_address',
}
print_pb_list_as_table('Devices:', devices, omit_fields, self.poutput)
def help_devices(self):
self.poutput('TODO: Provide some help')
def poutput(self, msg):
"""Convenient shortcut for self.stdout.write(); adds newline if necessary."""
if msg:
self.stdout.write(msg)
if msg[-1] != '\n':
self.stdout.write('\n')
```
#### File: adapters/openolt/openolt_flow_mgr.py
```python
from voltha.protos.openflow_13_pb2 import OFPXMC_OPENFLOW_BASIC
import voltha.core.flow_decomposer as fd
import openolt_platform as platform
from voltha.adapters.openolt.protos import openolt_pb2
from voltha.registry import registry
HSIA_FLOW_INDEX = 0 # FIXME
DHCP_FLOW_INDEX = 1 # FIXME
DHCP_DOWNLINK_FLOW_INDEX = 6 # FIXME
EAPOL_FLOW_INDEX = 2 # FIXME
EAPOL_DOWNLINK_FLOW_INDEX = 3 # FIXME
EAPOL_DOWNLINK_SECONDARY_FLOW_INDEX = 4 # FIXME
EAPOL_UPLINK_SECONDARY_FLOW_INDEX = 5 # FIXME
EAP_ETH_TYPE = 0x888e
# FIXME - see also BRDCM_DEFAULT_VLAN in broadcom_onu.py
DEFAULT_MGMT_VLAN = 4091
class OpenOltFlowMgr(object):
def __init__(self, log, stub, device_id):
self.log = log
self.stub = stub
self.device_id = device_id
self.flow_proxy = registry('core').get_proxy(
'/devices/{}/flows'.format(self.device_id))
def add_flow(self, flow, is_down_stream):
self.log.debug('add flow', flow=flow, is_down_stream=is_down_stream)
classifier_info = dict()
action_info = dict()
in_port = fd.get_in_port(flow)
assert in_port is not None
for field in fd.get_ofb_fields(flow):
if field.type == fd.ETH_TYPE:
classifier_info['eth_type'] = field.eth_type
self.log.debug('field-type-eth-type',
eth_type=classifier_info['eth_type'])
elif field.type == fd.IP_PROTO:
classifier_info['ip_proto'] = field.ip_proto
self.log.debug('field-type-ip-proto',
ip_proto=classifier_info['ip_proto'])
elif field.type == fd.IN_PORT:
classifier_info['in_port'] = field.port
self.log.debug('field-type-in-port',
in_port=classifier_info['in_port'])
elif field.type == fd.VLAN_VID:
classifier_info['vlan_vid'] = field.vlan_vid & 0xfff
self.log.debug('field-type-vlan-vid',
vlan=classifier_info['vlan_vid'])
elif field.type == fd.VLAN_PCP:
classifier_info['vlan_pcp'] = field.vlan_pcp
self.log.debug('field-type-vlan-pcp',
pcp=classifier_info['vlan_pcp'])
elif field.type == fd.UDP_DST:
classifier_info['udp_dst'] = field.udp_dst
self.log.debug('field-type-udp-dst',
udp_dst=classifier_info['udp_dst'])
elif field.type == fd.UDP_SRC:
classifier_info['udp_src'] = field.udp_src
self.log.debug('field-type-udp-src',
udp_src=classifier_info['udp_src'])
elif field.type == fd.IPV4_DST:
classifier_info['ipv4_dst'] = field.ipv4_dst
self.log.debug('field-type-ipv4-dst',
ipv4_dst=classifier_info['ipv4_dst'])
elif field.type == fd.IPV4_SRC:
classifier_info['ipv4_src'] = field.ipv4_src
self.log.debug('field-type-ipv4-src',
ipv4_dst=classifier_info['ipv4_src'])
elif field.type == fd.METADATA:
classifier_info['metadata'] = field.table_metadata
self.log.debug('field-type-metadata',
metadata=classifier_info['metadata'])
else:
raise NotImplementedError('field.type={}'.format(
field.type))
for action in fd.get_actions(flow):
if action.type == fd.OUTPUT:
action_info['output'] = action.output.port
self.log.debug('action-type-output',
output=action_info['output'],
in_port=classifier_info['in_port'])
elif action.type == fd.POP_VLAN:
action_info['pop_vlan'] = True
self.log.debug('action-type-pop-vlan', in_port=in_port)
elif action.type == fd.PUSH_VLAN:
action_info['push_vlan'] = True
action_info['tpid'] = action.push.ethertype
self.log.debug('action-type-push-vlan',
push_tpid=action_info['tpid'], in_port=in_port)
if action.push.ethertype != 0x8100:
self.log.error('unhandled-tpid',
ethertype=action.push.ethertype)
elif action.type == fd.SET_FIELD:
# action_info['action_type'] = 'set_field'
_field = action.set_field.field.ofb_field
assert (action.set_field.field.oxm_class ==
OFPXMC_OPENFLOW_BASIC)
self.log.debug('action-type-set-field',
field=_field, in_port=in_port)
if _field.type == fd.VLAN_VID:
self.log.debug('set-field-type-vlan-vid',
vlan_vid=_field.vlan_vid & 0xfff)
action_info['vlan_vid'] = (_field.vlan_vid & 0xfff)
else:
self.log.error('unsupported-action-set-field-type',
field_type=_field.type)
else:
self.log.error('unsupported-action-type',
action_type=action.type, in_port=in_port)
# FIXME - Why ignore downstream flows?
if is_down_stream is False:
intf_id = platform.intf_id_from_uni_port_num(
classifier_info['in_port'])
onu_id = platform.onu_id_from_port_num(
classifier_info['in_port'])
self.divide_and_add_flow(intf_id, onu_id,
flow.priority, classifier_info,
action_info)
# else:
# self.log.info('ignore downstream flow', flow=flow,
# classifier_info=classifier_info,
# action_info=action_info)
# FIXME - No need for divide_and_add_flow if
# both upstream and downstream flows
# are acted upon (not just upstream flows).
def divide_and_add_flow(self, intf_id, onu_id, priority, classifier,
action):
if 'ip_proto' in classifier:
if classifier['ip_proto'] == 17:
self.log.debug('dhcp flow add')
self.add_dhcp_trap(intf_id, onu_id, priority, classifier,
action)
elif classifier['ip_proto'] == 2:
self.log.debug('igmp flow add ignored')
else:
self.log.debug("Invalid-Classifier-to-handle",
classifier=classifier,
action=action)
elif 'eth_type' in classifier:
if classifier['eth_type'] == EAP_ETH_TYPE:
self.log.debug('eapol flow add')
self.add_eapol_flow(intf_id, onu_id, priority)
elif 'push_vlan' in action:
self.add_data_flow(intf_id, onu_id, priority, classifier, action)
else:
self.log.debug('Invalid-flow-type-to-handle',
classifier=classifier,
action=action)
def add_data_flow(self, intf_id, onu_id, priority, uplink_classifier,
uplink_action):
downlink_classifier = dict(uplink_classifier)
downlink_action = dict(uplink_action)
uplink_classifier['pkt_tag_type'] = 'single_tag'
downlink_classifier['pkt_tag_type'] = 'double_tag'
downlink_classifier['vlan_vid'] = uplink_action['vlan_vid']
downlink_classifier['metadata'] = uplink_classifier['vlan_vid']
del downlink_action['push_vlan']
downlink_action['pop_vlan'] = True
# To-Do right now only one GEM port is supported, so below method
# will take care of handling all the p bits.
# We need to revisit when mulitple gem port per p bits is needed.
self.add_hsia_flow(intf_id, onu_id, priority, uplink_classifier,
uplink_action, downlink_classifier, downlink_action,
HSIA_FLOW_INDEX)
# Secondary EAP on the subscriber vlan
(eap_active, eap_priority) = self.is_eap_enabled(intf_id, onu_id)
if eap_active:
self.add_eapol_flow(intf_id, onu_id, eap_priority,
uplink_eapol_id=EAPOL_UPLINK_SECONDARY_FLOW_INDEX,
downlink_eapol_id=EAPOL_DOWNLINK_SECONDARY_FLOW_INDEX,
vlan_id=uplink_classifier['vlan_vid'])
def add_hsia_flow(self, intf_id, onu_id, priority, uplink_classifier,
uplink_action, downlink_classifier, downlink_action,
hsia_id):
gemport_id = platform.mk_gemport_id(onu_id)
flow_id = platform.mk_flow_id(intf_id, onu_id, hsia_id)
self.log.debug('add upstream flow', onu_id=onu_id,
classifier=uplink_classifier, action=uplink_action,
gemport_id=gemport_id, flow_id=flow_id)
flow = openolt_pb2.Flow(
onu_id=onu_id, flow_id=flow_id, flow_type="upstream",
access_intf_id=intf_id, gemport_id=gemport_id, priority=priority,
classifier=self.mk_classifier(uplink_classifier),
action=self.mk_action(uplink_action))
self.stub.FlowAdd(flow)
self.log.debug('add downstream flow', classifier=downlink_classifier,
action=downlink_action, gemport_id=gemport_id,
flow_id=flow_id)
flow = openolt_pb2.Flow(
onu_id=onu_id, flow_id=flow_id, flow_type="downstream",
access_intf_id=intf_id, gemport_id=gemport_id,
priority=priority,
classifier=self.mk_classifier(downlink_classifier),
action=self.mk_action(downlink_action))
self.stub.FlowAdd(flow)
def add_dhcp_trap(self, intf_id, onu_id, priority, classifier, action):
self.log.debug('add dhcp upstream trap', classifier=classifier,
action=action)
action.clear()
action['trap_to_host'] = True
classifier['pkt_tag_type'] = 'single_tag'
classifier.pop('vlan_vid', None)
gemport_id = platform.mk_gemport_id(onu_id)
flow_id = platform.mk_flow_id(intf_id, onu_id, DHCP_FLOW_INDEX)
upstream_flow = openolt_pb2.Flow(
onu_id=onu_id, flow_id=flow_id, flow_type="upstream",
access_intf_id=intf_id, network_intf_id=0, gemport_id=gemport_id,
priority=priority, classifier=self.mk_classifier(classifier),
action=self.mk_action(action))
self.stub.FlowAdd(upstream_flow)
# FIXME - Fix OpenOLT handling of downstream flows instead
# of duplicating the downstream flow from the upstream
# flow.
# FIXME - ONOS should send explicit upstream and downstream
# exact dhcp trap flow.
classifier['udp_src'] = 67
classifier['udp_dst'] = 68
classifier['pkt_tag_type'] = 'double_tag'
action.pop('push_vlan', None)
flow_id = platform.mk_flow_id(intf_id, onu_id,
DHCP_DOWNLINK_FLOW_INDEX)
downstream_flow = openolt_pb2.Flow(
onu_id=onu_id, flow_id=flow_id, flow_type="downstream",
access_intf_id=intf_id, network_intf_id=0, gemport_id=gemport_id,
priority=priority, classifier=self.mk_classifier(classifier),
action=self.mk_action(action))
self.log.debug('add dhcp downstream trap', access_intf_id=intf_id,
onu_id=onu_id, flow_id=flow_id)
self.stub.FlowAdd(downstream_flow)
def add_eapol_flow(self, intf_id, onu_id, priority,
uplink_eapol_id=EAPOL_FLOW_INDEX,
downlink_eapol_id=EAPOL_DOWNLINK_FLOW_INDEX,
vlan_id=DEFAULT_MGMT_VLAN):
# self.log.debug('add eapol flow pre-process',
# classifier=uplink_classifier)
# #action=uplink_action)
downlink_classifier = {}
downlink_classifier['eth_type'] = EAP_ETH_TYPE
downlink_classifier['pkt_tag_type'] = 'single_tag'
downlink_classifier['vlan_vid'] = vlan_id
downlink_action = {}
downlink_action['push_vlan'] = True
downlink_action['vlan_vid'] = vlan_id
uplink_classifier = {}
uplink_classifier['eth_type'] = EAP_ETH_TYPE
uplink_classifier['pkt_tag_type'] = 'single_tag'
uplink_classifier['vlan_vid'] = vlan_id
uplink_action = {}
uplink_action['trap_to_host'] = True
gemport_id = platform.mk_gemport_id(onu_id)
# Add Upstream EAPOL Flow.
uplink_flow_id = platform.mk_flow_id(intf_id, onu_id, uplink_eapol_id)
upstream_flow = openolt_pb2.Flow(
onu_id=onu_id, flow_id=uplink_flow_id, flow_type="upstream",
access_intf_id=intf_id, gemport_id=gemport_id, priority=priority,
classifier=self.mk_classifier(uplink_classifier),
action=self.mk_action(uplink_action))
self.stub.FlowAdd(upstream_flow)
# Add Downstream EAPOL Flow.
downlink_flow_id = platform.mk_flow_id(intf_id, onu_id,
downlink_eapol_id)
downstream_flow = openolt_pb2.Flow(
onu_id=onu_id, flow_id=downlink_flow_id, flow_type="downstream",
access_intf_id=intf_id, gemport_id=gemport_id,
classifier=self.mk_classifier(downlink_classifier),
action=self.mk_action(downlink_action))
self.stub.FlowAdd(downstream_flow)
self.log.debug('eap flows', upstream_flow=upstream_flow,
downstream_flow=downstream_flow)
def mk_classifier(self, classifier_info):
classifier = openolt_pb2.Classifier()
if 'eth_type' in classifier_info:
classifier.eth_type = classifier_info['eth_type']
if 'ip_proto' in classifier_info:
classifier.ip_proto = classifier_info['ip_proto']
if 'vlan_vid' in classifier_info:
classifier.o_vid = classifier_info['vlan_vid']
if 'metadata' in classifier_info:
classifier.i_vid = classifier_info['metadata']
if 'vlan_pcp' in classifier_info:
classifier.o_pbits = classifier_info['vlan_pcp']
if 'udp_src' in classifier_info:
classifier.src_port = classifier_info['udp_src']
if 'udp_dst' in classifier_info:
classifier.dst_port = classifier_info['udp_dst']
if 'ipv4_dst' in classifier_info:
classifier.dst_ip = classifier_info['ipv4_dst']
if 'ipv4_src' in classifier_info:
classifier.src_ip = classifier_info['ipv4_src']
if 'pkt_tag_type' in classifier_info:
if classifier_info['pkt_tag_type'] == 'single_tag':
classifier.pkt_tag_type = 'single_tag'
elif classifier_info['pkt_tag_type'] == 'double_tag':
classifier.pkt_tag_type = 'double_tag'
elif classifier_info['pkt_tag_type'] == 'untagged':
classifier.pkt_tag_type = 'untagged'
else:
classifier.pkt_tag_type = 'none'
return classifier
def mk_action(self, action_info):
action = openolt_pb2.Action()
if 'pop_vlan' in action_info:
action.o_vid = action_info['vlan_vid']
action.cmd.remove_outer_tag = True
elif 'push_vlan' in action_info:
action.o_vid = action_info['vlan_vid']
action.cmd.add_outer_tag = True
elif 'trap_to_host' in action_info:
action.cmd.trap_to_host = True
else:
self.log.info('Invalid-action-field', action_info=action_info)
return
return action
def is_eap_enabled(self, intf_id, onu_id):
flows = self.flow_proxy.get('/').items
for flow in flows:
eap_flow = False
eap_intf_id = None
eap_onu_id = None
for field in fd.get_ofb_fields(flow):
if field.type == fd.ETH_TYPE:
if field.eth_type == EAP_ETH_TYPE:
eap_flow = True
if field.type == fd.IN_PORT:
eap_intf_id = platform.intf_id_from_uni_port_num(
field.port)
eap_onu_id = platform.onu_id_from_port_num(field.port)
if eap_flow:
self.log.debug('eap flow detected', onu_id=onu_id,
intf_id=intf_id, eap_intf_id=eap_intf_id,
eap_onu_id=eap_onu_id)
if eap_flow and intf_id == eap_intf_id and onu_id == eap_onu_id:
return (True, flow.priority)
return (False, 0)
```
#### File: voltha_doc/voltha/coordinator.py
```python
from consul import ConsulException
from consul.twisted import Consul
from requests import ConnectionError
from structlog import get_logger
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
from twisted.internet.error import DNSLookupError
from zope.interface import implementer
from leader import Leader
from common.utils.asleep import asleep
from common.utils.message_queue import MessageQueue
from voltha.registry import IComponent
from worker import Worker
from simplejson import dumps, loads
from common.utils.deferred_utils import DeferredWithTimeout, TimeOutError
log = get_logger()
class StaleMembershipEntryException(Exception):
pass
@implementer(IComponent)
class Coordinator(object):
"""
An app shall instantiate only one Coordinator (singleton).
A single instance of this object shall take care of all external
with consul, and via consul, all coordination activities with its
clustered peers. Roles include:
- registering an ephemeral membership entry (k/v record) in consul
- participating in a symmetric leader election, and potentially assuming
the leader's role. What leadership entails is not a concern for the
coordination, it simply instantiates (and shuts down) a leader class
when it gains (or looses) leadership.
"""
CONNECT_RETRY_INTERVAL_SEC = 1
RETRY_BACKOFF = [0.05, 0.1, 0.2, 0.5, 1, 2, 5]
# Public methods:
def __init__(self,
internal_host_address,
external_host_address,
instance_id,
rest_port,
config,
consul='localhost:8500',
container_name_regex='^.*\.([0-9]+)\..*$'):
log.info('initializing-coordinator')
self.config = config['coordinator']
self.worker_config = config['worker']
self.leader_config = config['leader']
self.membership_watch_relatch_delay = config.get(
'membership_watch_relatch_delay', 0.1)
self.tracking_loop_delay = self.config.get(
'tracking_loop_delay', 1)
self.session_renewal_timeout = self.config.get(
'session_renewal_timeout', 5)
self.session_renewal_loop_delay = self.config.get(
'session_renewal_loop_delay', 3)
self.membership_maintenance_loop_delay = self.config.get(
'membership_maintenance_loop_delay', 5)
self.session_time_to_live = self.config.get(
'session_time_to_live', 10)
self.prefix = self.config.get('voltha_kv_prefix', 'service/voltha')
self.leader_prefix = '/'.join((self.prefix, self.config.get(
self.config['leader_key'], 'leader')))
self.membership_prefix = '/'.join((self.prefix, self.config.get(
self.config['membership_key'], 'members'), ''))
self.assignment_prefix = '/'.join((self.prefix, self.config.get(
self.config['assignment_key'], 'assignments'), ''))
self.workload_prefix = '/'.join((self.prefix, self.config.get(
self.config['workload_key'], 'work'), ''))
self.core_store_prefix = '/'.join((self.prefix, self.config.get(
self.config['core_store_key'], 'data/core')))
self.core_store_assignment_key = self.core_store_prefix + \
'/assignment'
self.core_storage_suffix = 'core_store'
self.retries = 0
self.instance_id = instance_id
self.internal_host_address = internal_host_address
self.external_host_address = external_host_address
self.rest_port = rest_port
self.membership_record_key = self.membership_prefix + self.instance_id
self.session_id = None
self.i_am_leader = False
self.leader_id = None # will be the instance id of the current leader
self.shutting_down = False
self.leader = None
self.membership_callback = None
self.worker = Worker(self.instance_id, self)
self.host = consul.split(':')[0].strip()
self.port = int(consul.split(':')[1].strip())
# TODO need to handle reconnect events properly
self.consul = Consul(host=self.host, port=self.port)
self.container_name_regex = container_name_regex
self.wait_for_leader_deferreds = []
self.peers_mapping_queue = MessageQueue()
def start(self):
log.debug('starting')
reactor.callLater(0, self._async_init)
log.info('started')
return self
@inlineCallbacks
def stop(self):
log.debug('stopping')
self.shutting_down = True
yield self._delete_session() # this will delete the leader lock too
yield self.worker.stop()
if self.leader is not None:
yield self.leader.stop()
self.leader = None
log.info('stopped')
def wait_for_a_leader(self):
"""
Async wait till a leader is detected/elected. The deferred will be
called with the leader's instance_id
:return: Deferred.
"""
d = Deferred()
if self.leader_id is not None:
d.callback(self.leader_id)
return d
else:
self.wait_for_leader_deferreds.append(d)
return d
# Wait for a core data id to be assigned to this voltha instance
@inlineCallbacks
def get_core_store_id_and_prefix(self):
core_store_id = yield self.worker.get_core_store_id()
returnValue((core_store_id, self.core_store_prefix))
def recv_peers_map(self):
return self.peers_mapping_queue.get()
def publish_peers_map_change(self, msg):
self.peers_mapping_queue.put(msg)
# Proxy methods for consul with retry support
def kv_get(self, *args, **kw):
return self._retry('GET', *args, **kw)
def kv_put(self, *args, **kw):
return self._retry('PUT', *args, **kw)
def kv_delete(self, *args, **kw):
return self._retry('DELETE', *args, **kw)
# Methods exposing key membership information
@inlineCallbacks
def get_members(self):
"""Return list of all members"""
_, members = yield self.kv_get(self.membership_prefix, recurse=True)
returnValue([member['Key'][len(self.membership_prefix):]
for member in members])
# Private (internal) methods:
@inlineCallbacks
def _async_init(self):
yield self._create_session()
yield self._create_membership_record()
yield self._start_leader_tracking()
yield self.worker.start()
def _backoff(self, msg):
wait_time = self.RETRY_BACKOFF[min(self.retries,
len(self.RETRY_BACKOFF) - 1)]
self.retries += 1
log.info(msg, retry_in=wait_time)
return asleep(wait_time)
def _clear_backoff(self):
if self.retries:
log.info('reconnected-to-consul', after_retries=self.retries)
self.retries = 0
@inlineCallbacks
def _create_session(self):
@inlineCallbacks
def _create_session():
consul = yield self.get_consul()
# create consul session
self.session_id = yield consul.session.create(
behavior='release', ttl=self.session_time_to_live,
lock_delay=1)
log.info('created-consul-session', session_id=self.session_id)
self._start_session_tracking()
yield self._retry(_create_session)
@inlineCallbacks
def _delete_session(self):
try:
yield self.consul.session.destroy(self.session_id)
except Exception as e:
log.exception('failed-to-delete-session',
session_id=self.session_id)
@inlineCallbacks
def _create_membership_record(self):
yield self._do_create_membership_record_with_retries()
reactor.callLater(0, self._maintain_membership_record)
@inlineCallbacks
def _maintain_membership_record(self):
try:
while 1:
valid_membership = yield self._assert_membership_record_valid()
if not valid_membership:
log.info('recreating-membership-before',
session=self.session_id)
yield self._do_create_membership_record_with_retries()
log.info('recreating-membership-after',
session=self.session_id)
else:
log.debug('valid-membership', session=self.session_id)
# Async sleep before checking the membership record again
yield asleep(self.membership_maintenance_loop_delay)
except Exception, e:
log.exception('unexpected-error-leader-trackin', e=e)
finally:
# except in shutdown, the loop must continue (after a short delay)
if not self.shutting_down:
reactor.callLater(self.membership_watch_relatch_delay,
self._maintain_membership_record)
def _create_membership_record_data(self):
member_record = dict()
member_record['status'] = 'alive'
member_record['host_address'] = self.external_host_address
return member_record
@inlineCallbacks
def _assert_membership_record_valid(self):
try:
log.info('membership-record-before')
is_timeout, (_, record) = yield \
self.consul_get_with_timeout(
key=self.membership_record_key,
index=0,
timeout=5)
if is_timeout:
returnValue(False)
log.info('membership-record-after', record=record)
if record is None or \
'Session' not in record or \
record['Session'] != self.session_id:
log.info('membership-record-change-detected',
old_session=self.session_id,
record=record)
returnValue(False)
else:
returnValue(True)
except Exception as e:
log.exception('membership-validation-exception', e=e)
returnValue(False)
@inlineCallbacks
def _do_create_membership_record_with_retries(self):
while 1:
log.info('recreating-membership', session=self.session_id)
result = yield self._retry(
'PUT',
self.membership_record_key,
dumps(self._create_membership_record_data()),
acquire=self.session_id)
if result:
log.info('new-membership-record-created',
session=self.session_id)
break
else:
log.warn('cannot-create-membership-record')
yield self._backoff('stale-membership-record')
def _start_session_tracking(self):
reactor.callLater(0, self._session_tracking_loop)
@inlineCallbacks
def _session_tracking_loop(self):
@inlineCallbacks
def _redo_session():
log.info('_redo_session-before')
yield self._delete_session()
# Create a new consul connection/session with a TTL of 25 secs
try:
self.consul = Consul(host=self.host, port=self.port)
self.session_id = yield self.consul.session.create(
behavior='release',
ttl=self.session_time_to_live,
lock_delay=1)
log.info('new-consul-session', session=self.session_id)
except Exception as e:
log.exception('could-not-create-a-consul-session', e=e)
@inlineCallbacks
def _renew_session(m_callback):
try:
log.debug('_renew_session-before')
consul_ref = self.consul
result = yield consul_ref.session.renew(
session_id=self.session_id)
log.info('just-renewed-session', result=result)
if not m_callback.called:
# Triggering callback will cancel the timeout timer
log.info('trigger-callback-to-cancel-timout-timer')
m_callback.callback(result)
else:
# Timeout event has already been called. Just ignore
# this event
log.info('renew-called-after-timout',
new_consul_ref=self.consul,
old_consul_ref=consul_ref)
except Exception, e:
# Let the invoking method receive a timeout
log.exception('could-not-renew-session', e=e)
try:
while 1:
log.debug('session-tracking-start')
rcvd = DeferredWithTimeout(
timeout=self.session_renewal_timeout)
_renew_session(rcvd)
try:
_ = yield rcvd
except TimeOutError as e:
log.info('session-renew-timeout', e=e)
# Redo the session
yield _redo_session()
except Exception as e:
log.exception('session-renew-exception', e=e)
else:
log.debug('successfully-renewed-session')
# Async sleep before the next session tracking
yield asleep(self.session_renewal_loop_delay)
except Exception as e:
log.exception('renew-exception', e=e)
finally:
reactor.callLater(self.session_renewal_loop_delay,
self._session_tracking_loop)
def _start_leader_tracking(self):
reactor.callLater(0, self._leadership_tracking_loop)
@inlineCallbacks
def _leadership_tracking_loop(self):
try:
# Attempt to acquire leadership lock. True indicates success;
# False indicates there is already a leader. It's instance id
# is then the value under the leader key service/voltha/leader.
# attempt acquire leader lock
log.info('leadership-attempt-before')
result = yield self._retry('PUT',
self.leader_prefix,
self.instance_id,
acquire=self.session_id)
log.info('leadership-attempt-after')
# read it back before being too happy; seeing our session id is a
# proof and now we have the change id that we can use to reliably
# track any changes. In an unlikely scenario where the leadership
# key gets wiped out administratively since the previous line,
# the returned record can be None. Handle it.
(index, record) = yield self._retry('GET',
self.leader_prefix)
log.info('leader-prefix',
i_am_leader=result, index=index, record=record)
if record is not None:
if result is True:
if record['Session'] == self.session_id:
yield self._assert_leadership()
else:
pass # confusion; need to retry leadership
else:
leader_id = record['Value']
yield self._assert_nonleadership(leader_id)
# if record was none, we shall try leadership again
last = record
while last is not None:
# this shall return only when update is made to leader key
# or expires after 5 seconds wait
is_timeout, (tmp_index, updated) = yield \
self.consul_get_with_timeout(
key=self.leader_prefix,
index=index,
timeout=5)
# Timeout means either there is a lost connectivity to
# consul or there are no change to that key. Do nothing.
if is_timeout:
continue
# After timeout event the index returned from
# consul_get_with_timeout is None. If we are here it's not a
# timeout, therefore the index is a valid one.
index=tmp_index
if updated is None or updated != last:
log.info('leader-key-change',
index=index, updated=updated, last=last)
# leadership has changed or vacated (or forcefully
# removed), apply now
# If I was previoulsy the leader then assert a non
# leadership role before going for election
if self.i_am_leader:
log.info('leaving-leaderdhip',
leader=self.instance_id)
yield self._assert_nonleadership(self.instance_id)
break
last = updated
except Exception, e:
log.exception('unexpected-error-leader-trackin', e=e)
finally:
# except in shutdown, the loop must continue (after a short delay)
if not self.shutting_down:
reactor.callLater(self.tracking_loop_delay,
self._leadership_tracking_loop)
@inlineCallbacks
def _assert_leadership(self):
"""(Re-)assert leadership"""
if not self.i_am_leader:
self.i_am_leader = True
self._set_leader_id(self.instance_id)
yield self._just_gained_leadership()
@inlineCallbacks
def _assert_nonleadership(self, leader_id):
"""(Re-)assert non-leader role"""
# update leader_id anyway
self._set_leader_id(leader_id)
if self.i_am_leader:
self.i_am_leader = False
yield self._just_lost_leadership()
def _set_leader_id(self, leader_id):
self.leader_id = leader_id
deferreds, self.wait_for_leader_deferreds = \
self.wait_for_leader_deferreds, []
for d in deferreds:
d.callback(leader_id)
def _just_gained_leadership(self):
log.info('became-leader')
self.leader = Leader(self)
return self.leader.start()
def _just_lost_leadership(self):
log.info('lost-leadership')
return self._halt_leader()
def _halt_leader(self):
if self.leader:
d = self.leader.stop()
self.leader = None
return d
def get_consul(self):
return self.consul
@inlineCallbacks
def _retry(self, operation, *args, **kw):
while 1:
try:
consul = yield self.get_consul()
log.info('start', operation=operation, args=args)
if operation == 'GET':
result = yield consul.kv.get(*args, **kw)
elif operation == 'PUT':
for name, value in kw.items():
if name == 'acquire':
if value != self.session_id:
log.info('updating-session-in-put-operation',
old_session=value,
new_session=self.session_id)
kw['acquire'] = self.session_id
break
result = yield consul.kv.put(*args, **kw)
elif operation == 'DELETE':
result = yield consul.kv.delete(*args, **kw)
else:
# Default case - consider operation as a function call
result = yield operation(*args, **kw)
self._clear_backoff()
break
except ConsulException, e:
log.exception('consul-not-up',
operation=operation,
args=args,
session=self.consul.Session,
e=e)
yield self._backoff('consul-not-up')
except ConnectionError, e:
log.exception('cannot-connect-to-consul',
operation=operation,
args=args,
session=self.consul.Session,
e=e)
yield self._backoff('cannot-connect-to-consul')
except DNSLookupError, e:
log.info('dns-lookup-failed', operation=operation, args=args,
host=self.host)
yield self._backoff('dns-lookup-failed')
except StaleMembershipEntryException, e:
log.exception('stale-membership-record-in-the-way',
operation=operation,
args=args,
session=self.consul.Session,
e=e)
yield self._backoff('stale-membership-record-in-the-way')
except Exception, e:
if not self.shutting_down:
log.exception(e)
yield self._backoff('unknown-error')
log.info('end', operation=operation, args=args)
returnValue(result)
@inlineCallbacks
def consul_get_with_timeout(self, key, timeout, **kw):
"""
Query consul with a timeout
:param key: Key to query
:param timeout: timeout value
:param kw: additional key-value params
:return: (is_timeout, (index, result)).
"""
@inlineCallbacks
def _get(key, m_callback):
try:
(index, result) = yield self._retry('GET', key, **kw)
if not m_callback.called:
log.debug('got-result-cancelling-timer')
m_callback.callback((index, result))
except Exception as e:
log.exception('got-exception', e=e)
try:
rcvd = DeferredWithTimeout(timeout=timeout)
_get(key, rcvd)
try:
result = yield rcvd
log.debug('result-received', result=result)
returnValue((False, result))
except TimeOutError as e:
log.debug('timeout-or-no-data-change', key=key)
except Exception as e:
log.exception('exception', e=e)
except Exception as e:
log.exception('exception', e=e)
returnValue((True, (None, None)))
```
#### File: kpi/olt/olt_pm_metrics.py
```python
from voltha.protos.device_pb2 import PmConfig, PmConfigs, PmGroupConfig
from voltha.extensions.kpi.adapter_pm_metrics import AdapterPmMetrics
class OltPmMetrics(AdapterPmMetrics):
"""
Shared OL Device Adapter PM Metrics Manager
This class specifically addresses ONU genernal PM (health, ...) area
specific PM (OMCI, PON, UNI) is supported in encapsulated classes accessible
from this object
"""
def __init__(self, adapter_agent, device_id, grouped=False, freq_override=False,
**kwargs):
"""
Initializer for shared ONU Device Adapter PM metrics
:param adapter_agent: (AdapterAgent) Adapter agent for the device
:param device_id: (str) Device ID
:param grouped: (bool) Flag indicating if statistics are managed as a group
:param freq_override: (bool) Flag indicating if frequency collection can be specified
on a per group basis
:param kwargs: (dict) Device Adapter specific values. For an ONU Device adapter, the
expected key-value pairs are listed below. If not provided, the
associated PM statistics are not gathered:
'nni-ports': List of objects that provide NNI (northbound) port statistics
'pon-ports': List of objects that provide PON port statistics
"""
super(OltPmMetrics, self).__init__(adapter_agent, device_id,
grouped=grouped, freq_override=freq_override,
**kwargs)
# PM Config Types are COUNTER, GUAGE, and STATE # GAUGE is misspelled device.proto
self.nni_pm_names = {
('admin_state', PmConfig.STATE),
('oper_status', PmConfig.STATE),
('port_no', PmConfig.GUAGE), # Device and logical_device port numbers same
('rx_packets', PmConfig.COUNTER),
('rx_bytes', PmConfig.COUNTER),
('rx_dropped', PmConfig.COUNTER),
('rx_errors', PmConfig.COUNTER),
('rx_bcast', PmConfig.COUNTER),
('rx_mcast', PmConfig.COUNTER),
('tx_packets', PmConfig.COUNTER),
('tx_bytes', PmConfig.COUNTER),
('tx_dropped', PmConfig.COUNTER),
('tx_bcast', PmConfig.COUNTER),
('tx_mcast', PmConfig.COUNTER),
#
# Commented out are from spec. May not be supported or implemented yet
# ('rx_64', PmConfig.COUNTER),
# ('rx_65_127', PmConfig.COUNTER),
# ('rx_128_255', PmConfig.COUNTER),
# ('rx_256_511', PmConfig.COUNTER),
# ('rx_512_1023', PmConfig.COUNTER),
# ('rx_1024_1518', PmConfig.COUNTER),
# ('rx_frame_err', PmConfig.COUNTER),
# ('rx_over_err', PmConfig.COUNTER),
# ('rx_crc_err', PmConfig.COUNTER),
# ('rx_64', PmConfig.COUNTER),
# ('tx_65_127', PmConfig.COUNTER),
# ('tx_128_255', PmConfig.COUNTER),
# ('tx_256_511', PmConfig.COUNTER),
# ('tx_512_1023', PmConfig.COUNTER),
# ('tx_1024_1518', PmConfig.COUNTER),
# ('collisions', PmConfig.COUNTER),
}
self.pon_pm_names = {
('admin_state', PmConfig.STATE),
('oper_status', PmConfig.STATE),
('port_no', PmConfig.GUAGE), # Physical device port number
('pon_id', PmConfig.GUAGE),
('rx_packets', PmConfig.COUNTER),
('rx_bytes', PmConfig.COUNTER),
('tx_packets', PmConfig.COUNTER),
('tx_bytes', PmConfig.COUNTER),
('tx_bip_errors', PmConfig.COUNTER),
('in_service_onus', PmConfig.GUAGE),
('closest_onu_distance', PmConfig.GUAGE)
}
self.onu_pm_names = {
('pon_id', PmConfig.GUAGE),
('onu_id', PmConfig.GUAGE),
('fiber_length', PmConfig.GUAGE),
('equalization_delay', PmConfig.GUAGE),
('rssi', PmConfig.GUAGE), #
}
self.gem_pm_names = {
('pon_id', PmConfig.GUAGE),
('onu_id', PmConfig.GUAGE),
('gem_id', PmConfig.GUAGE),
('alloc_id', PmConfig.GUAGE),
('rx_packets', PmConfig.COUNTER),
('rx_bytes', PmConfig.COUNTER),
('tx_packets', PmConfig.COUNTER),
('tx_bytes', PmConfig.COUNTER),
}
self.nni_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
for (m, t) in self.nni_pm_names}
self.pon_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
for (m, t) in self.pon_pm_names}
self.onu_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
for (m, t) in self.onu_pm_names}
self.gem_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
for (m, t) in self.gem_pm_names}
self._nni_ports = kwargs.pop('nni-ports', None)
self._pon_ports = kwargs.pop('pon-ports', None)
def update(self, pm_config):
try:
# TODO: Test frequency override capability for a particular group
if self.default_freq != pm_config.default_freq:
# Update the callback to the new frequency.
self.default_freq = pm_config.default_freq
self.lc.stop()
self.lc.start(interval=self.default_freq / 10)
if pm_config.grouped:
for group in pm_config.groups:
group_config = self.pm_group_metrics.get(group.group_name)
if group_config is not None:
group_config.enabled = group.enabled
else:
for m in pm_config.metrics:
self.nni_metrics_config[m.name].enabled = m.enabled
self.pon_metrics_config[m.name].enabled = m.enabled
self.onu_metrics_config[m.name].enabled = m.enabled
self.gem_metrics_config[m.name].enabled = m.enabled
except Exception as e:
self.log.exception('update-failure', e=e)
raise
def make_proto(self, pm_config=None):
if pm_config is None:
pm_config = PmConfigs(id=self.device_id, default_freq=self.default_freq,
grouped=self.grouped,
freq_override=self.freq_override)
metrics = set()
have_nni = self._nni_ports is not None and len(self._nni_ports) > 0
have_pon = self._pon_ports is not None and len(self._pon_ports) > 0
if self.grouped:
if have_nni:
pm_ether_stats = PmGroupConfig(group_name='Ethernet',
group_freq=self.default_freq,
enabled=True)
self.pm_group_metrics[pm_ether_stats.group_name] = pm_ether_stats
else:
pm_ether_stats = None
if have_pon:
pm_pon_stats = PmGroupConfig(group_name='PON',
group_freq=self.default_freq,
enabled=True)
pm_onu_stats = PmGroupConfig(group_name='ONU',
group_freq=self.default_freq,
enabled=True)
pm_gem_stats = PmGroupConfig(group_name='GEM',
group_freq=self.default_freq,
enabled=True)
self.pm_group_metrics[pm_pon_stats.group_name] = pm_pon_stats
self.pm_group_metrics[pm_onu_stats.group_name] = pm_onu_stats
self.pm_group_metrics[pm_gem_stats.group_name] = pm_gem_stats
else:
pm_pon_stats = None
pm_onu_stats = None
pm_gem_stats = None
else:
pm_ether_stats = pm_config if have_nni else None
pm_pon_stats = pm_config if have_pon else None
pm_onu_stats = pm_config if have_pon else None
pm_gem_stats = pm_config if have_pon else None
if have_nni:
for m in sorted(self.nni_metrics_config):
pm = self.nni_metrics_config[m]
if not self.grouped:
if pm.name in metrics:
continue
metrics.add(pm.name)
pm_ether_stats.metrics.extend([PmConfig(name=pm.name,
type=pm.type,
enabled=pm.enabled)])
if have_pon:
for m in sorted(self.pon_metrics_config):
pm = self.pon_metrics_config[m]
if not self.grouped:
if pm.name in metrics:
continue
metrics.add(pm.name)
pm_pon_stats.metrics.extend([PmConfig(name=pm.name,
type=pm.type,
enabled=pm.enabled)])
for m in sorted(self.onu_metrics_config):
pm = self.onu_metrics_config[m]
if not self.grouped:
if pm.name in metrics:
continue
metrics.add(pm.name)
pm_onu_stats.metrics.extend([PmConfig(name=pm.name,
type=pm.type,
enabled=pm.enabled)])
for m in sorted(self.gem_metrics_config):
pm = self.gem_metrics_config[m]
if not self.grouped:
if pm.name in metrics:
continue
metrics.add(pm.name)
pm_gem_stats.metrics.extend([PmConfig(name=pm.name,
type=pm.type,
enabled=pm.enabled)])
if self.grouped:
pm_config.groups.extend([stats for stats in
self.pm_group_metrics.itervalues()])
return pm_config
def collect_metrics(self, metrics=None):
# TODO: Currently PM collection is done for all metrics/groups on a single timer
if metrics is None:
metrics = dict()
if self.pm_group_metrics['Ethernet'].enabled:
for port in self._nni_ports:
name = 'nni.{}'.format(port.port_no)
metrics[name] = self.collect_group_metrics(port,
self.nni_pm_names,
self.nni_metrics_config)
for port in self._pon_ports:
if self.pm_group_metrics['PON'].enabled:
name = 'pon.{}'.format(port.pon_id)
metrics[name] = self.collect_group_metrics(port,
self.pon_pm_names,
self.pon_metrics_config)
for onu_id in port.onu_ids:
onu = port.onu(onu_id)
if onu is not None:
if self.pm_group_metrics['ONU'].enabled:
name = 'pon.{}.onu.{}'.format(port.pon_id, onu.onu_id)
metrics[name] = self.collect_group_metrics(onu,
self.onu_pm_names,
self.onu_metrics_config)
if self.pm_group_metrics['GEM'].enabled:
for gem in onu.gem_ports:
if not gem.multicast:
name = 'pon.{}.onu.{}.gem.{}'.format(port.pon_id,
onu.onu_id,
gem.gem_id)
metrics[name] = self.collect_group_metrics(onu,
self.gem_pm_names,
self.gem_metrics_config)
# TODO: Do any multicast GEM PORT metrics here...
return metrics
```
#### File: voltha_doc/voltha/leader.py
```python
import re
from hash_ring import HashRing
from structlog import get_logger
from twisted.internet import reactor
from twisted.internet.base import DelayedCall
from twisted.internet.defer import inlineCallbacks, DeferredList, returnValue
from simplejson import dumps, loads
from common.utils.asleep import asleep
from common.utils.id_generation import get_next_core_id
log = get_logger()
class ConfigMappingException(Exception):
pass
class Leader(object):
"""
A single instance of this object shall exist across the whole cluster.
This is guaranteed by the coordinator which instantiates this class
only when it secured the leadership lock, as well as calling the halt()
method in cases it looses the leadership lock.
"""
ID_EXTRACTOR = '^(%s)([^/]+)$'
CORE_STORE_KEY_EXTRACTOR = '^%s/(?P<core_store_id>[^/]+)/root$'
START_TIMESTAMP_EXTRACTOR = '^.*_([0-9]+)$'
ASSIGNMENT_ID_EXTRACTOR = '^(%s)([^/]+)/core_store$'
# Public methods:
def __init__(self, coordinator):
self.coord = coordinator
self.halted = False
self.soak_time = 3 # soak till membership/workload changes settle
self.workload = []
self.members = []
self.core_store_ids = []
self.core_store_assignment = None
self.reassignment_soak_timer = None
self.core_store_reassignment_soak_timer = None
self.workload_id_match = re.compile(
self.ID_EXTRACTOR % self.coord.workload_prefix).match
self.member_id_match = re.compile(
self.ID_EXTRACTOR % self.coord.membership_prefix).match
self.core_data_id_match = re.compile(
self.CORE_STORE_KEY_EXTRACTOR % self.coord.core_store_prefix).match
self.core_match = re.compile(self.coord.container_name_regex).match
self.timestamp_match = re.compile(self.START_TIMESTAMP_EXTRACTOR).match
self.assignment_id_match = re.compile(
self.ASSIGNMENT_ID_EXTRACTOR % self.coord.assignment_prefix).match
self.members_tracking_sleep_to_prevent_flood = \
self.coord.leader_config.get((self.coord.leader_config[
'members_track_error_to_prevent_flood']), 1)
@inlineCallbacks
def start(self):
log.debug('starting')
# yield self._validate_workload()
yield self._start_tracking_assignments()
log.info('started')
def stop(self):
"""Suspend leadership duties immediately"""
log.debug('stopping')
self.halted = True
# any active cancellations, releases, etc., should happen here
if isinstance(self.reassignment_soak_timer, DelayedCall):
if not self.reassignment_soak_timer.called:
self.reassignment_soak_timer.cancel()
if isinstance(self.core_store_reassignment_soak_timer, DelayedCall):
if not self.core_store_reassignment_soak_timer.called:
self.core_store_reassignment_soak_timer.cancel()
log.info('stopped')
# Private methods:
def _start_tracking_assignments(self):
"""
We must track both the cluster member list as well as the workload
list. Upon change in either, we must rerun our sharding algorithm
and reassign work as/if needed.
"""
reactor.callLater(0, self._track_members, 0)
@inlineCallbacks
def _get_core_store_mappings(self):
try:
# Get the mapping record
(_, mappings) = yield self.coord.kv_get(
self.coord.core_store_assignment_key, recurse=True)
if mappings:
self.core_store_assignment = loads(mappings[0]['Value'])
return
else: # Key has not been created yet
# Create the key with an empty dictionary value
value = dict()
result = yield self.coord.kv_put(
self.coord.core_store_assignment_key,
dumps(value))
if not result:
raise ConfigMappingException(self.instance_id)
# Ensure the record was created
(_, mappings) = yield self.coord.kv_get(
self.coord.core_store_assignment_key, recurse=True)
self.core_store_assignment = loads(mappings[0]['Value'])
except Exception, e:
log.exception('error', e=e)
@inlineCallbacks
def _update_core_store_references(self):
try:
# Get the current set of configs keys
(_, results) = yield self.coord.kv_get(
self.coord.core_store_prefix, recurse=False, keys=True)
matches = (self.core_data_id_match(e) for e in results or [])
core_ids = [m.group(1) for m in matches if m is not None]
self.core_store_ids = core_ids
# Update the config mapping
self._get_core_store_mappings()
log.debug('core-data', core_ids=core_ids,
assignment=self.core_store_assignment)
except Exception, e:
log.exception('error-update-store', e=e)
def _sanitize_member_list(self, members):
# This method removes any duplicates from the member list using the
# voltha number from the member id and the time that voltha instance
# started, again from the member id. This method is meaningful only
# in a clustered environment (e.g. Docker swarm or Kubernetes). In
# a non-cluster environment the member id is formatted differently.
# In such a case, the method below will create an exception and
# return the member list as is.
try:
unique_members = {}
update_occurred = False
log.info('members', members=members)
for member in members:
log.info('member', member=member)
# Extract the swarm assigned number of the voltha instance
voltha_number = self.core_match(member['id']).group(1)
timestamp = self.timestamp_match(member['id']).group(1)
if voltha_number not in unique_members:
unique_members[voltha_number] = {'id': member['id'],
'timestamp': timestamp,
'host': member['host']}
else:
# Verify whether if this member has the latest timestamp. If
# yes, overwrite the previous one
if unique_members[voltha_number]['timestamp'] < timestamp:
unique_members[voltha_number] = {'id': member['id'],
'timestamp': timestamp,
'host': member['host']}
update_occurred = True
if update_occurred:
updated_members = []
for _, unique_member in unique_members.iteritems():
updated_members.append({'host': unique_member['host'],
'id': unique_member['id']})
return updated_members
else:
return members
except Exception as e:
log.exception('extraction-error', e=e)
return members
@inlineCallbacks
def _is_temporal_state(self, members):
try:
# First get the current core assignments
(_, results) = yield self.coord.kv_get(
self.coord.assignment_prefix,
recurse=True)
log.debug('core-assignments', assignment=results)
if results:
old_assignment = [
{'id': self.assignment_id_match(e['Key']).group(2),
'core': e['Value']}
for e in results]
# If there are no curr_assignments then we are starting the
# system. In this case we should keep processing
if len(old_assignment) == 0:
returnValue(False)
# Tackle the simplest scenario - #members >= #old_assignment
if members is not None and len(members) >= len(old_assignment):
returnValue(False)
# Everything else is a temporal state
log.info('temporal-state-detected', members=members,
old_assignments=old_assignment)
returnValue(True)
else:
returnValue(False)
except Exception as e:
log.exception('temporal-state-error', e=e)
returnValue(True)
@inlineCallbacks
def _track_members(self, index):
previous_index = index
try:
log.info('member-tracking-before')
is_timeout, (tmp_index, results) = yield \
self.coord.consul_get_with_timeout(
key=self.coord.membership_prefix,
recurse=True,
index=index,
timeout=10)
# Check whether we are still the leader - a new regime may be in
# place by the time we see a membership update
if self.halted:
log.info('I am no longer the leader')
return
if is_timeout:
log.debug('timeout-or-no-membership-changed')
return
# This can happen if consul went down and came back with no data
if not results:
log.error('no-active-members')
# Bail out of leadership and go for an early election
self.coord._just_lost_leadership()
return
# After timeout event the index returned from
# consul_get_with_timeout is None. If we are here it's not a
# timeout, therefore the index is a valid one.
index=tmp_index
log.info('membership-tracking-data', index=index, results=results)
if previous_index != index:
log.info('membership-updated',
previous_index=previous_index, index=index)
# Rebuild the membership, if any
# Only members with valid session are considered active
members = [{'id': self.member_id_match(e['Key']).group(2),
'host': loads(e['Value'])['host_address']}
for e in results if 'Session' in e]
if members:
updated_members = self._sanitize_member_list(members)
else:
updated_members = None
log.info('active-members', active_members=members,
sanitized_members=updated_members)
# Check if we are in a temporal state. If true wait for the
# next membership changes
temporal_state = yield self._is_temporal_state(updated_members)
if temporal_state:
log.info('temporal-state-detected')
pass # Wait for next member list change
elif updated_members != self.members:
# if the two sets are the same
# update the current set of config
yield self._update_core_store_references()
log.info('membership-changed',
prev_members=self.members,
curr_members=updated_members,
core_store_mapping=self.core_store_assignment)
self.members = updated_members
self._restart_core_store_reassignment_soak_timer()
else:
log.debug('no-membership-change', index=index)
except Exception, e:
log.exception('members-track-error', e=e)
# to prevent flood
yield asleep(self.members_tracking_sleep_to_prevent_flood)
finally:
if not self.halted:
reactor.callLater(1, self._track_members, index)
def _restart_reassignment_soak_timer(self):
if self.reassignment_soak_timer is not None:
assert isinstance(self.reassignment_soak_timer, DelayedCall)
if not self.reassignment_soak_timer.called:
self.reassignment_soak_timer.cancel()
self.reassignment_soak_timer = reactor.callLater(
self.soak_time, self._reassign_work)
def _restart_core_store_reassignment_soak_timer(self):
if self.core_store_reassignment_soak_timer is not None:
assert isinstance(self.core_store_reassignment_soak_timer, DelayedCall)
if not self.core_store_reassignment_soak_timer.called:
self.core_store_reassignment_soak_timer.cancel()
self.core_store_reassignment_soak_timer = reactor.callLater(
self.soak_time, self._reassign_core_stores)
@inlineCallbacks
def _reassign_core_stores(self):
def _get_core_data_id_from_instance(instance_name):
for id, instance in self.core_store_assignment.iteritems():
if instance and instance['id'] == instance_name:
return id
try:
log.info('core-members', curr_members=self.members,
prev_members=self.core_store_assignment)
# 1. clear the mapping for instances that are no longer running
updated_mapping = dict()
existing_active_config_members = set()
cleared_config_ids = set()
inactive_members = set()
if self.core_store_assignment:
for id, instance in self.core_store_assignment.iteritems():
if instance not in self.members:
updated_mapping[id] = None
cleared_config_ids.add(id)
if instance:
inactive_members.add(instance['id'])
else:
updated_mapping[id] = instance
existing_active_config_members.add(instance['id'])
# 2. Update the mapping with the new set
current_id = max(self.core_store_assignment) \
if self.core_store_assignment else '0000'
for instance in self.members:
if instance['id'] not in existing_active_config_members:
# Add the member to the config map
if cleared_config_ids:
# There is an empty slot
next_id = cleared_config_ids.pop()
updated_mapping[next_id] = instance
else:
# There are no empty slot, create new ids
current_id = get_next_core_id(current_id)
updated_mapping[current_id] = instance
self.core_store_assignment = updated_mapping
log.info('updated-assignment',
core_store_assignment=self.core_store_assignment,
inactive_members=inactive_members)
# 3. save the mapping into consul
yield self.coord.kv_put(self.coord.core_store_assignment_key,
dumps(self.core_store_assignment))
# 4. Assign the new workload to the newly created members
curr_members_set = set([m['id'] for m in self.members])
new_members = curr_members_set.difference(
existing_active_config_members)
for new_member_id in new_members:
yield self.coord.kv_put(
self.coord.assignment_prefix
+ new_member_id + '/' +
self.coord.core_storage_suffix,
_get_core_data_id_from_instance(new_member_id))
# 5. Remove non-existent members
for member_id in inactive_members:
yield self.coord.kv_delete(
self.coord.assignment_prefix + member_id, recurse=True)
yield self.coord.kv_delete(
self.coord.membership_prefix + member_id,
recurse=True)
except Exception as e:
log.exception('config-reassignment-failure', e=e)
self._restart_core_store_reassignment_soak_timer()
```
|
{
"source": "jeff-vincent/docker-py",
"score": 3
}
|
#### File: docker-py/docker_lite_python/docker_lite.py
```python
import docker
class DockerLite:
def __init__(self):
self.client = docker.from_env()
def build_image(self, path_to_dir, resulting_image_name):
"""A method to build a Docker image from a Dockerfile.
Args:
path_to_dockerfile: string: the path to the Dockerfile
resulting_image_name: string: unique name for the image
Returns:
response: Python object: A given image.
"""
response = self.client.images.build(
path=path_to_dir,
tag=resulting_image_name)
return response
def list_containers(self, all=None):
"""A method for listing Docker containers.
Returns only running Docker containers by default.
Args:
all: bool: optional
Returns:
response: List: A list of container objects.
"""
if all:
response = self.client.containers.list(all=True)
else:
response = self.client.containers.list()
return response
def get_container_by_name(self, existing_container_name):
"""A method for getting a Python object that represents
a given Docker container.
Args:
existing_container_name: string: the name of the Docker container
Returns:
response: Python object: a given Docker container
"""
response = self.client.containers.get(existing_container_name)
return response
def run_container(self, image_name, resulting_container_name, command=None, volumes=None):
"""A method for running a Docker container.
Requires a name for the container.
Args:
image_name: string: the name of the Docker image to run
can be local or in Docker Hub.
resulting_container_name: string: the name to set to the container
command: string: the command to run at startup: optional
Returns:
response: Python object: the container being run.
"""
response = self.client.containers.run(
image=image_name,
name=resulting_container_name,
command=command,
remove=True,
detach=True,
volumes=None)
return response
def exec_into_running_container(self, existing_container_name, command):
container = self.get_container_by_name(existing_container_name)
response = container.exec_run(command)
return response
def kill_container(self, existing_container_name):
"""A methond for stopping and removing a Docker container.
Args:
existing_container_name: string: the container to tear down
Returns:
0
"""
container = self.get_container_by_name(existing_container_name)
container.stop()
return 0
def list_images(self):
"""A method for listing all images on the system.
Args:
None
Returns:
image_list: List: a list of Python objects
representing all images on the system.
"""
image_list = self.client.images.list()
return image_list
def remove_unused_images(self):
"""A method for removing unused images.
Args:
None
Returns:
0
"""
self.client.images.prune()
return 0
def remove_all_images(self):
"""A method for removing ALL images.
Args:
None
Returns:
0
"""
image_list = self.list_images()
for image in image_list:
self.client.images.remove(image.id, force=True)
return 0
```
#### File: examples/cross_platform_scripting/cross_platform_scripting.py
```python
from argparse import ArgumentParser
from docker_lite_python import DockerLite
class ExampleAdapter:
def __init__(self):
self.dl = DockerLite()
self.dfile_path = './'
def generate_container(self):
image = self.dl.build_image(
path_to_dir=self.dfile_path,
resulting_image_name='legacy-env')
container = dl.run_container(
image_name='legacy-env',
resulting_container_name='legacy-env',
command='sleep infinity')
return container
def pass_input(self, container_name, input):
intermediate_result = dl.exec_into_running_container(
existing_container_name=container_name,
command=example_input)
return intermediate_result.output
def pass_output(self, intermediate_result):
adapted_result = intermediate_result + ' and some more stuff in Python or beyond!'
return adapted_result
def tidy_up(self):
dl.remove_all_images()
dl.kill_container('legacy-env')
def main():
ea = ExampleAdapter()
parser = ArgumentParser()
# g++ -o qq qq.cpp
parser.add_argument('example_input1', help='type: string: compile the "sub-code."')
# ./qq
parser.add_argument('example_input2', help='type: string: run the "sub-code."')
args = parser.parse_args()
container = ea.generate_container()
intermediate_result = ea.pass_input(container.name, args.example_input1)
intermediate_result = ea.pass_input(container.name, args.example_input2)
adapted_result = ea.pass_output(intermediate_result)
ea.tidy_up()
return adapted_result
if __name__ == '__main__':
main()
```
|
{
"source": "jeff-vincent/orka-ansible-dynamic-inventory",
"score": 2
}
|
#### File: jeff-vincent/orka-ansible-dynamic-inventory/orka_inventory.py
```python
import argparse
import json
import os
import subprocess
class OrkaAnsibleInventory:
def __init__(self):
self.vm_data = None
self.filtered_data = None
self.inventory = {
'group': {'hosts': []},
'vars': [],
'_meta': {
'hostvars': {}
}
}
def get_current_vm_data(self):
"""Get current VM data related to the current CLI user.
Note
----
The user must be logged in to the Orka CLI.
"""
completed_process = subprocess.run(
['orka', 'vm', 'list', '--json'],
capture_output=True)
dict_string = completed_process.stdout.decode('utf-8')
data = json.loads(dict_string)
self.vm_data = data['virtual_machine_resources']
def get_deployed_vms(self):
"""Filter current VM data to isolate deployed VMs."""
self.filtered_data = \
[i for i in self.vm_data if i['vm_deployment_status'] == 'Deployed']
def get_vm_by_host_name(self, host_name):
"""Filter current VM data to isolate named VM.
Args:
host_name: string: the VM name to match.
"""
self.filtered_data = \
[i for i in self.vm_data if host_name == i['status'][0]['virtual_machine_name']]
def get_name_contains_vms(self, name_contains):
"""Filter current VM data to isolate VMs by partial name match.
Args:
name_contains: string: partial match sort key for deployed VMs.
"""
nc = name_contains.lower()
self.filtered_data = \
[i for i in self.filtered_data if nc in i['status'][0]['virtual_machine_name'].lower()]
# def _build_vars(self):
# """Build the vars dict to pass to Ansible"""
# ansible_ssh_user = os.environ.get('ANSIBLE_SSH_USER')
# ansible_ssh_pass = os.environ.get('ANSIBLE_SSH_PASS')
# ansible_connection = 'ssh'
# return {
# 'ansible_connection': ansible_connection,
# 'ansible_ssh_user': ansible_ssh_user,
# 'ansible_ssh_pass': <PASSWORD>_ssh_pass
# }
def create_inventory(self):
"""Create the inventory object to return to Ansible."""
hosts = []
ansible_ssh_user = os.environ.get('ANSIBLE_SSH_USER')
ansible_ssh_pass = os.<PASSWORD>('ANSIBLE_SSH_PASS')
for i in self.filtered_data:
ip_address = i['status'][0]['virtual_machine_ip']
hosts.append(ip_address)
self.inventory['_meta']['hostvars'][ip_address] = \
{'ansible_ssh_port': i['status'][0]['ssh_port'],
'ansible_ssh_user': ansible_ssh_user,
'ansible_ssh_pass': <PASSWORD>_ssh_pass,
'ansible_connection': 'ssh'}
self.inventory['group']['hosts'] = hosts
# varss = self._build_vars()
# self.inventory['vars'] = varss
print(json.dumps(self.inventory))
return json.dumps(self.inventory)
def parse_args():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('--list', help='list deployed VMs',
action='store_true')
group.add_argument('--host', help='get host by name', action='store',
dest='host_name')
return parser.parse_args()
def main(args, name_contains):
if args.host_name:
host_name = args.host_name
inventory_creator = OrkaAnsibleInventory()
inventory_creator.get_current_vm_data()
inventory_creator.get_vm_by_host_name(host_name)
inventory_creator.create_inventory()
elif args.list:
inventory_creator = OrkaAnsibleInventory()
inventory_creator.get_current_vm_data()
inventory_creator.get_deployed_vms()
if name_contains:
inventory_creator.get_name_contains_vms(name_contains)
inventory_creator.create_inventory()
else:
print('Warning: you must pass either `--list` or `--host <hostname>` argument.')
if __name__ == '__main__':
args = parse_args()
name_contains = os.environ.get('ANSIBLE_NAME_CONTAINS')
main(args, name_contains)
```
|
{
"source": "jeff-vincent/orka-python-sdk",
"score": 3
}
|
#### File: lib/orka_sdk/result.py
```python
class Result:
def __init__(self, errors, data=None):
self.data = data
self.errors = errors
if self.errors:
self.success = False
else:
self.success = True
```
|
{
"source": "jeff-vincent/Slingshot",
"score": 2
}
|
#### File: Slingshot/backend/app.py
```python
from quart import Quart
from quart import request
from user_management import UserManagement
from sms import IncomingSMS
app = Quart(__name__)
@app.route('/', methods=['GET'])
async def index():
return 'index'
@app.route('/sign-up', methods=['GET', 'POST'])
async def sign_up():
if request.method == 'GET':
return 'Sign up here.'
elif request.method == 'POST':
request_dict = await request.form
print(request_dict)
user_management = UserManagement(request_dict)
sign_up = await user_management.sign_up()
print(sign_up)
return sign_up
else:
return 'Error. Sign up here.'
@app.route('/login', methods=['GET', 'POST'])
async def login():
if request.method == 'GET':
return 'Login here.'
elif request.method == 'POST':
login = await user_management.login(request)
return login.json()
else:
return 'Error. Sign up here.'
@app.route('/incoming-sms', methods=['POST'])
async def handle_incoming_sms():
incoming_sms = IncomingSMS(request)
handled_request = await incoming_sms.handle_sms_callback()
return handled_request.json()
@app.route('/set-prompt', methods=['POST'])
# set time/datestamped prompt on user
async def set_prompt():
pass
# TODO: @admin functionality
@app.route('/get-billable-users', methods=['GET'])
async def get_billable_users():
pass
if __name__ == '__main__':
app.run(debug='true')
```
#### File: backend/db/kafka_mongo.py
```python
from kafka import KafkaConsumer
from pymongo import MongoClient
from json import loads
from backend.config import MONGO_URI
class KafkaMongo:
def __init__(self):
self.message_consumer = KafkaConsumer(
'message',
bootstrap_servers=['localhost:9092'],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id='my-group',
value_deserializer=lambda x: loads(x.decode('utf-8')))
self.user_consumer = KafkaConsumer(
'user',
bootstrap_servers=['localhost:9092'],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id='my-group',
value_deserializer=lambda x: loads(x.decode('utf-8')))
self.client = MongoClient(MONGO_URI)
self.messages = self.client.db.messages
self.users = self.client.db.users
def run(self):
"""A method to run the Kafka message and user consumers, and
to write the comsumed data to Mongo.
Args:
self: an instance of the KafkaMongo class
"""
while True:
for message in self.message_consumer:
message = message.value
self.messages.insert_one(message)
print('{} added to {}'.format(message, messages))
for user in self.user_consumer:
user = user.value
self.users.insert_one(user)
print('{} added to {}'.format(user, users))
if __name__ == '__main__':
kafka_mongo = KafkaMongo()
kafka_mongo.run()
```
#### File: Slingshot/backend/user_management.py
```python
import asyncio
import datetime
from quart import session, jsonify
from werkzeug.security import generate_password_hash, check_password_hash
from twilio_account import CreateTwilioAccount
from db.db_connector import MotorQuery
class UserManagement:
def __init__(self, request_dict, users_collection=None):
self.request = request_dict
self.users_collection = users_collection
self.motor_query = MotorQuery()
# async def get_user(self, username):
# """A method for getting a user record by username.
# Args:
# self: an instance of the UserManagement class
# username: string
# """
# user = await self.users_collection.find_one({'username': username})
# return user
# async def _parse_sign_up_request(self):
# """A method for parsing sign up requests.
# Args:
# self: an instance of the UserManagement class
# """
# request_data = {}
# form = await self.request.form
# request_data['username'] = await form.get('username')
# request_data['password'] = await form.get('password')
# request_data['payment_type'] = await form.get('payment_type')
# request_data['cc_number'] = await form.get('cc_number')
# request_data['area_code'] = await form.get('area_code')
# # set area code to session for Twilio account creation
# session['area_code'] = request_data['area_code']
# return request_data
async def _strip_sensitive_fields(self, request_data):
"""A method for removing sensitive data from a request data dict.
Args:
request_data: a dict containing a proposed user's data
"""
self.request['payment_type'] = None
self.request['password'] = None
self.request['cc_number'] = None
self.request['area_code'] = None
self.request['sid'] = None
async def _augment_sign_up_data(self, request_data, twilio_user):
"""A method for augmenting a new user record with Twilio
and other peripheral data.
Args:
self: an instance of the UserManagement class
request_data: a dict containing a proposed user's data
twilio_user: a dict containing newly
created twilio subuser details
"""
hashed_password = await generate_password_hash(self.request['password'])
self.request['password'] = <PASSWORD>_password
self.request['date_joined']: datetime.datetime.utcnow()
self.request['sms_number']: twilio_user['sms_number']
self.request['sid']: twilio_user['sid']
async def sign_up(self):
"""A method for signing up for the service.
Args:
self: an instance of the UserManagement class
"""
# # parse the request
# try:
# request_data = await self._parse_sign_up_request()
# except Exception as e:
# return 'There was a problem parsing your request.\
# Error message: {}'.format(str(e))
# check that requested namespace is available
print(self.request)
user = await self.motor_query.get_user(self.request['username'])
if user:
return 'Sorry, but that username is already taken.\
Please choose a different username.'
# create Twilio subaccount
twilio = CreateTwilioAccount(friendly_name=self.request['username'])
try:
twilio_user = await twilio.create_user_account()
except:
print('TWILIO USER CREATE ERROR'+'-'*50)
pass
# create system user and associate Twilio subaccount
self._augment_sign_up_data(self.request, twilio_user)
user_id = await self.users_collection.insert(self.request)
return_data = await self._strip_sensitive_fields(self.request)
session['area_code'] = None
return jsonify(return_data)
async def login(self):
"""A method for logging in to the service.
Args:
self: an instance of the UserManagement class
"""
try:
form = await self.request.form
username = await form.get('username')
password = await form.get('password')
except Exception as e:
return 'There was a problem parsing your request.\
Error message: {}'.format(str(e))
try:
user = await self.get_user(username)
if check_password_hash(user['password'], password):
session['sms_number'] = user['sms_number']
session['sid'] = user['sid']
return 'Logged in user: {}'.format(user['username'])
return 'Please log in.'
except Exception as e:
return 'Login failed: ' + (str(e))
async def recover_password(self):
pass
```
|
{
"source": "jeffvogelsang/eureka",
"score": 2
}
|
#### File: eureka/eureka/search.py
```python
import json
class LogglySearch(object):
"""Loggly search object. Makes the results of a Loggly search available as structured data."""
is_faceted = None
response = None
response_payload = None
context = None
data = None
numFound = None
gmt_offset = None
gap = None
def __init__(self, response, is_faceted):
self.is_faceted = is_faceted
self.response = response
self.response_payload = json.loads(response.text)
for key in self.response_payload.keys():
if key == 'context':
self.context = LogglySearchContext(self.response_payload['context'])
elif key == 'data':
data = self.response_payload[key]
self.data = []
if is_faceted:
for key in data:
# Facets are structured as the facetby key and the count of events in the facet.
self.data.append(LogglyFacet(key, data[key]))
else:
for item in data:
# Events are structured as the event text plus metadata about the event.
self.data.append(LogglyEvent(item))
else:
setattr(self, key, self.response_payload[key])
class LogglyEvent(object):
"""Loggly event. Makes Loggly events available as structured data."""
isjson = None
timestamp = None
inputname = None
inputid = None
ip = None
text = None
def __init__(self, event):
for key in event.keys():
setattr(self, key, event[key])
class LogglyFacet(object):
"""Loggly facet. Makes Loggly facets available as structured data."""
facet = None
count = None
def __init__(self, facet, count):
self.facet = facet
self.count = count
class LogglySearchContext(object):
"""Loggly search context. Makes Loggly search context available as structured data."""
rows = None
from_date = None
until_date = None
start = None
query = None
order = None
def __init__(self, context):
for key in context.keys():
if key == 'from':
self.from_date = context[key]
elif key == 'until':
self.until_date = context[key]
else:
setattr(self, key, context[key])
```
#### File: eureka/test/test_eureka.py
```python
import os
import random
from random import randrange
import json
import string
import unittest
import time
from eureka import connect_loggly
from connection import LogglyDevice
# Ensure that live test only run if LOGGLY_TEST_LIVE variable is present in the environment and set to 'True'
# Note: Live tests are designed to reasonably safely create and destroy Loggly inventory without affecting
# existing configuration through use of randomized strings and loop-back IP addresses.
enable_live_tests = os.environ.get('LOGGLY_TEST_LIVE')
if enable_live_tests is not None and enable_live_tests == 'True':
enable_live_tests = True
def rand_string(count=12):
"""Return random string of length count with letters and numbers, mixed case. Uses Python randomness."""
return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(count))
def get_rand_private_ip():
"""Return a random IP based on the 127.x.x.x block."""
return "127.%s.%s.%s" % (randrange(0, 255, 1), randrange(0, 255, 1), randrange(0, 255, 1))
class TestLoggly(unittest.TestCase):
def setUp(self):
# Preserve environment settings, put them back when done.
self.env_username_save = os.environ.get('LOGGLY_USERNAME')
self.env_password_save = os.environ.get('LOGGLY_PASSWORD')
self.env_domain_save = os.environ.get('LOGGLY_DOMAIN')
self.env_protocol_save = os.environ.get('LOGGLY_PROTOCOL')
def tearDown(self):
def restore_environment(env_var, env_var_saved):
if env_var_saved is not None:
os.environ[env_var] = env_var_saved
else:
if os.environ.get(env_var) is not None:
del os.environ[env_var]
restore_environment('LOGGLY_USERNAME', self.env_username_save)
restore_environment('LOGGLY_PASSWORD', self.env_password_save)
restore_environment('LOGGLY_DOMAIN', self.env_domain_save)
restore_environment('LOGGLY_PROTOCOL', self.env_protocol_save)
def testConnCredsFromEnv(self):
os.environ['LOGGLY_USERNAME'] = 'env_username'
os.environ['LOGGLY_PASSWORD'] = '<PASSWORD>'
os.environ['LOGGLY_DOMAIN'] = 'env_domain'
conn = connect_loggly()
self.assertEquals('env_username', getattr(conn, 'username'))
self.assertEquals('env_password', getattr(conn, 'password'))
self.assertEquals('https://env_domain/api', getattr(conn, 'base_url'))
# Make sure we can override the HTTP default.
os.environ['LOGGLY_PROTOCOL'] = 'http'
conn = connect_loggly()
self.assertEquals('http', getattr(conn, 'protocol'))
self.assertEquals('http://env_domain/api', getattr(conn, 'base_url'))
def testConnCredsSupplied(self):
conn = connect_loggly('username', 'password', 'domain')
self.assertEquals('username', getattr(conn, 'username'))
self.assertEquals('password', getattr(conn, 'password'))
self.assertEquals('https://domain/api', getattr(conn, 'base_url'))
conn = connect_loggly('username', 'password', 'domain', 'http')
self.assertEquals('http', getattr(conn, 'protocol'))
self.assertEquals('http://domain/api', getattr(conn, 'base_url'))
def testConnCredsMissing(self):
if os.environ.get('LOGGLY_USERNAME') is not None:
del os.environ['LOGGLY_USERNAME']
if os.environ.get('LOGGLY_PASSWORD') is not None:
del os.environ['LOGGLY_PASSWORD']
if os.environ.get('LOGGLY_DOMAIN') is not None:
del os.environ['LOGGLY_DOMAIN']
self.assertRaises(AttributeError, connect_loggly)
def testConnRepr(self):
os.environ['LOGGLY_USERNAME'] = 'env_username'
os.environ['LOGGLY_PASSWORD'] = '<PASSWORD>'
os.environ['LOGGLY_DOMAIN'] = 'env_domain'
# Credentials from enviornment
conn = connect_loggly()
self.assertEqual("Connection:env_username@https://env_domain/api", "%s" % conn)
del os.environ['LOGGLY_USERNAME']
del os.environ['LOGGLY_PASSWORD']
del os.environ['LOGGLY_DOMAIN']
# Credentials supplied to constructor
conn = connect_loggly('username', 'password', 'domain')
self.assertEqual("Connection:username@https://domain/api", "%s" % conn)
@unittest.skipIf(not enable_live_tests, 'Live connection tests skipped.')
class TestLogglyLive(unittest.TestCase):
"""Live tests. Prove code works against live API.
Note: As these are live tests, running an integration-level, they are subject to environmental failures!
If you experience a failure, the tests may leave behind inputs and devices you'll want to clean up.
"""
def setUp(self):
"""Re-use a live connection to loggly for tests."""
self.conn = connect_loggly()
print "Using: %s" % self.conn
# Helper methods
def _create_input(self, input_type="syslogudp", input_format="text"):
"""Create and with a randomized name and description for testing purposes."""
input_name = "test-input-%s" % rand_string()
input_desc = "test-description-%s" % rand_string()
loggly_input = self.conn.create_input(input_name, input_type, input_format, input_desc)
print "Created input: %s, %s" % (loggly_input.id, loggly_input.name)
return loggly_input
def _create_syslog_input(self):
"""Create a syslog input with a randomized named and description for testing purposes."""
return self._create_input(input_type="syslogudp")
def _create_http_text_input(self):
"""Create a http text input with a randomized named and description for testing purposes."""
return self._create_input(input_type="http")
def _create_http_json_input(self):
"""Create a http json input with a randomized named and description for testing purposes."""
return self._create_input(input_type="http", input_format="json")
def _get_events(self, test_faceted=False, test_json=False):
"""Local method for testing retrieval methods: Facedted vs. not, JSON vs. not."""
submit_attempts = 10 # number of times to attempt submitting an event.
submit_attempt_delay = 30 # delay between attempts in seconds
search_attempts = 10 # number of times to attempt searching for an event.
search_attempt_delay = 30 # delay between attempts in seconds
# Create an input. Need an HTTP input.
if test_json:
loggly_input = self._create_http_json_input()
else:
loggly_input = self._create_http_text_input()
# Make a random string that we're certain won't be found.
event_string = rand_string(150)
if test_json:
event = json.dumps({
'event_string': event_string
})
else:
event = event_string
# Test submitting a event.
event_submitted = False
while not event_submitted and submit_attempts > 0:
try:
self.conn.submit_text_data(event, loggly_input.input_token)
print "Event submitted."
event_submitted = True
except Exception as e:
submit_attempts -= 1
print "Error submitting event: %s" % e.message
print "%s tries left. Will try again in %s seconds." % (submit_attempts, submit_attempt_delay)
time.sleep(submit_attempt_delay)
self.assertTrue(event_submitted, "Event not submitted.")
# Test retrieving event.
event_found = False
while not event_found and search_attempts > 0:
try:
if test_faceted:
if test_json:
print "Testing faceted JSON search."
events = self.conn.get_events_faceted_dict('date', 'json.event_string:"%s"' % event_string)
else:
print "Testing faceted Text search."
events = self.conn.get_events_faceted_dict('date', event_string)
else:
if test_json:
print "Testing JSON search."
events = self.conn.get_events_dict('json.event_string:"%s"' % event_string)
else:
print "Testing Text search."
events = self.conn.get_events_dict(event_string)
num_found = events['numFound']
if num_found > 0:
print "Event found."
event_found = True
else:
search_attempts -= 1
print "Event not found. %s tries left. Will try again in %s seconds." \
% (search_attempts, search_attempt_delay)
time.sleep(search_attempt_delay)
except Exception as e:
search_attempts -= 1
print "Error searching for event: %s" % e.message
print "%s tries left. Will try again in %s seconds." % (search_attempts, search_attempt_delay)
self.assertTrue(event_found, "Event not found.")
# Remove the input
self.conn.delete_input(loggly_input)
# Tests
def testCreateDeleteInput(self):
"""Create an input then delete it."""
loggly_input = self._create_syslog_input()
self.conn.delete_input(loggly_input)
def testCreateDeleteDevice(self):
"""Create a device then delete it.
This requires adding the device to an input, so we create and delete one of these as well.
"""
loggly_input = self._create_syslog_input()
min_loggly_device = LogglyDevice({'ip': get_rand_private_ip()}) # de minimus Loggly device
loggly_device = self.conn.add_device_to_input(min_loggly_device, loggly_input) # create actual device
self.conn.delete_device(loggly_device)
self.conn.delete_input(loggly_input)
def testCreateDeleteDeviceWithName(self):
"""Create a device then delete it.
This requires adding the device to an input, so we create and delete one of these as well.
"""
loggly_input = self._create_syslog_input()
min_loggly_device = LogglyDevice({'ip': get_rand_private_ip()})
loggly_device = self.conn.add_device_to_input(min_loggly_device, loggly_input, "test-name-%s" % rand_string())
self.conn.delete_device(loggly_device)
self.conn.delete_input(loggly_input)
def testCreateDeleteDeviceWithIP(self):
"""Create a device using an IP and Name, then delete it.
This requires adding the device to an input, so we create and delete one of these as well.
"""
loggly_input = self._create_syslog_input()
device_ip = get_rand_private_ip()
device_name = "test-name-%s" % rand_string()
loggly_device = self.conn.add_ip_to_input(device_ip, loggly_input, device_name) # create actual device
self.conn.delete_device(loggly_device)
self.conn.delete_input(loggly_input)
def testDeleteDeviceByIP(self):
"""Create a device using an IP and Name, then delete it using the IP only.
This requires adding the device to an input, so we create and delete one of these as well.
"""
loggly_input = self._create_syslog_input()
device_ip = get_rand_private_ip()
device_name = "test-name-%s" % rand_string()
loggly_device = self.conn.add_ip_to_input(device_ip, loggly_input, device_name) # create actual device
self.conn.delete_device_by_ip(device_ip)
self.conn.delete_input(loggly_input)
def testCreateDeleteDeviceWithIPAndNamedInput(self):
"""Create a device using an IP and Name, then delete it.
This requires adding the device to an input, so we create and delete one of these as well.
"""
loggly_input = self._create_syslog_input()
device_ip = get_rand_private_ip()
device_name = "test-name-%s" % rand_string()
loggly_device = self.conn.add_ip_to_input_by_name(device_ip, loggly_input.name, device_name)
self.conn.delete_device(loggly_device)
self.conn.delete_input(loggly_input)
def testCreateDeleteThisDevice(self):
"""Create a device based on the current IP that Loggly sees, then delete it.
This requires adding the device to an input, so we create and delete one of these as well.
"""
loggly_input = self._create_syslog_input()
loggly_device = self.conn.add_this_device_to_input(loggly_input)
self.conn.remove_this_device_from_input(loggly_input)
self.conn.delete_device(loggly_device)
def testGetAllInputs(self):
"""Get all inputs.
To make sure we're getting multiple inputs, create a few, get the list, then delete them.
"""
loggly_input1 = self._create_syslog_input()
loggly_input2 = self._create_syslog_input()
inputs = self.conn.get_all_inputs()
self.assertGreaterEqual(len(inputs), 2)
self.conn.delete_input(loggly_input1)
self.conn.delete_input(loggly_input2)
def testGetInputFromGetAllInputs(self):
"""Use get all inputs to get a specific input by name.
We create a input so we can test finding a specific input, then delete it.
"""
loggly_input1 = self._create_syslog_input()
loggly_input2 = self._create_syslog_input()
self.assertEqual(1, len(self.conn.get_all_inputs([loggly_input1.name])))
self.assertEqual(loggly_input1.id, self.conn.get_all_inputs([loggly_input1.name])[0].id)
self.assertEqual(2, len(self.conn.get_all_inputs([loggly_input1.name, loggly_input2.name])))
self.conn.delete_input(loggly_input1)
self.conn.delete_input(loggly_input2)
def testGetInputByName(self):
"""Create an input, and then find its ID using the input's name.
We create a input so we can test finding a specific input, then delete it.
"""
loggly_input_to_find = self._create_syslog_input()
loggly_input_found = self.conn.get_input_by_name(loggly_input_to_find.name)
self.assertEqual(loggly_input_found.id, loggly_input_to_find.id)
self.conn.delete_input(loggly_input_to_find)
def testGetInputByNameNotFoundErrors(self):
"""Ensure we get an exception if we search for an input that doesn't exist.
"""
self.assertRaises(Exception, self.conn.get_input_by_name, rand_string(32))
def testGetInputIdByName(self):
"""Create an input, and then find its ID using the input's name.
We create a input so we can test finding a specific input, then delete it.
"""
loggly_input_to_find = self._create_syslog_input()
loggly_input_found_id = self.conn.get_input_id_by_name(loggly_input_to_find.name)
self.assertEqual(loggly_input_found_id, loggly_input_to_find.id)
self.conn.delete_input(loggly_input_to_find)
def testGetInputIdByNameNotFoundErrors(self):
"""Ensure we get an exception if we search for an input that doesn't exist.
"""
self.assertRaises(Exception, self.conn.get_input_id_by_name, rand_string(32))
def testGetInput(self):
"""Get a single input by id.
We create a input so we can test finding a specific input, then delete it.
"""
loggly_input_to_find = self._create_syslog_input()
loggly_input_found = self.conn.get_input(loggly_input_to_find.id)
self.assertEqual(loggly_input_found.id, loggly_input_to_find.id)
self.conn.delete_input(loggly_input_found)
def testGetAllDevices(self):
"""Get all devices.
To make sure we're getting multiple devices, create a few attached to a new input, get the list,
then delete the input and the devices.
"""
loggly_input = self._create_syslog_input()
min_loggly_device1 = LogglyDevice({'ip': get_rand_private_ip()}) # de minimus Loggly device
min_loggly_device2 = LogglyDevice({'ip': get_rand_private_ip()})
loggly_device1 = self.conn.add_device_to_input(min_loggly_device1, loggly_input) # create actual devices
loggly_device2 = self.conn.add_device_to_input(min_loggly_device2, loggly_input)
devices = self.conn.get_all_devices()
self.assertGreaterEqual(len(devices), 2)
self.conn.delete_device(loggly_device1)
self.conn.delete_device(loggly_device2)
self.conn.delete_input(loggly_input)
def testGetDeviceFromGetAllDevices(self):
"""Use get all devices to get a specific device by IP.
We create an input and a device so we can test finding a specific device, then delete them.
"""
loggly_input = self._create_syslog_input()
min_loggly_device1 = LogglyDevice({'ip': get_rand_private_ip()}) # de minimus Loggly device
min_loggly_device2 = LogglyDevice({'ip': get_rand_private_ip()})
loggly_device1 = self.conn.add_device_to_input(min_loggly_device1, loggly_input) # create actual devices
loggly_device2 = self.conn.add_device_to_input(min_loggly_device2, loggly_input)
self.assertEqual(1, len(self.conn.get_all_devices([loggly_device1.ip])))
self.assertEqual(loggly_device1.id, self.conn.get_all_devices([loggly_device1.ip])[0].id)
self.assertEqual(2, len(self.conn.get_all_devices([loggly_device1.ip, loggly_device2.ip])))
self.conn.delete_device(loggly_device1)
self.conn.delete_device(loggly_device2)
self.conn.delete_input(loggly_input)
def testGetDevice(self):
""" Get a single device by id.
We create a device so we can test finding a specific device, then delete it.
"""
loggly_input = self._create_syslog_input()
min_loggly_device = LogglyDevice({'ip': get_rand_private_ip()}) # de minimus Loggly device
loggly_device_to_find = self.conn.add_device_to_input(min_loggly_device, loggly_input) # create actual devices
loggly_device_found = self.conn.get_device(loggly_device_to_find.id)
self.assertEqual(loggly_device_found.id, loggly_device_to_find.id)
self.conn.delete_device(loggly_device_found)
self.conn.delete_input(loggly_input)
def testGetDeviceByName(self):
"""Create an device, and then find it using the device's name.
We create a device so we can test finding a specific device, then delete it.
"""
loggly_input = self._create_syslog_input()
device_name = "test-name-%s" % rand_string()
min_loggly_device = LogglyDevice({'ip': get_rand_private_ip()}) # de minimus Loggly device
loggly_device_to_find = self.conn.add_device_to_input(min_loggly_device, loggly_input, device_name)
loggly_device_found = self.conn.get_device_by_name(device_name)
self.assertEqual(loggly_device_to_find.id, loggly_device_found.id)
self.conn.delete_device(loggly_device_to_find)
self.conn.delete_input(loggly_input)
def testGetDeviceByNameNotFound(self):
"""Ensure we get an exception if we search for a device that doesn't exist.
"""
self.assertRaises(Exception, self.conn.get_device_by_name, rand_string(32))
def testGetDeviceIdByName(self):
"""Create an device, and then find its ID using the device's name.
We create a device so we can test finding a specific device, then delete it.
"""
loggly_input = self._create_syslog_input()
device_name = "test-name-%s" % rand_string()
min_loggly_device = LogglyDevice({'ip': get_rand_private_ip()}) # de minimus Loggly device
loggly_device_to_find = self.conn.add_device_to_input(min_loggly_device, loggly_input, device_name)
loggly_device_found_id = self.conn.get_device_id_by_name(device_name)
self.assertEqual(loggly_device_to_find.id, loggly_device_found_id)
self.conn.delete_device(loggly_device_to_find)
self.conn.delete_input(loggly_input)
def testGetDeviceIdByNameNotFound(self):
"""Ensure we get an exception if we search for a device that doesn't exist.
"""
self.assertRaises(Exception, self.conn.get_device_id_by_name, rand_string(32))
def testGetDeviceByIp(self):
"""Create an device, and then find it using the device's name.
We create a device so we can test finding a specific device, then delete it.
"""
loggly_input = self._create_syslog_input()
device_ip = get_rand_private_ip()
min_loggly_device = LogglyDevice({'ip': device_ip}) # de minimus Loggly device
loggly_device_to_find = self.conn.add_device_to_input(min_loggly_device, loggly_input)
loggly_device_found = self.conn.get_device_by_ip(device_ip)
self.assertEqual(loggly_device_to_find.id, loggly_device_found.id)
self.conn.delete_device(loggly_device_to_find)
self.conn.delete_input(loggly_input)
def testGetDeviceByIpNotFound(self):
"""Ensure we get an exception if we search for a device that doesn't exist.
"""
self.assertRaises(Exception, self.conn.get_device_by_ip, get_rand_private_ip())
def testGetDeviceIdByIp(self):
"""Create an device, and then find its ID using the device's name.
We create a device so we can test finding a specific device, then delete it.
"""
loggly_input = self._create_syslog_input()
device_ip = get_rand_private_ip()
min_loggly_device = LogglyDevice({'ip': device_ip}) # de minimus Loggly device
loggly_device_to_find = self.conn.add_device_to_input(min_loggly_device, loggly_input)
loggly_device_found_id = self.conn.get_device_id_by_ip(device_ip)
self.assertEqual(loggly_device_to_find.id, loggly_device_found_id)
self.conn.delete_device(loggly_device_to_find)
self.conn.delete_input(loggly_input)
def testGetDeviceIdByIpNotFound(self):
"""Ensure we get an exception if we search for a device that doesn't exist.
"""
self.assertRaises(Exception, self.conn.get_device_id_by_ip, get_rand_private_ip())
def testSubmitAndRetrieveTextEvents(self):
"""Test submitting and retrieving Text events."""
self._get_events(test_faceted=False, test_json=False)
def testSubmitAndRetrieveJsonEvents(self):
"""Test submitting and retrieving JSON events."""
self._get_events(test_faceted=False, test_json=True)
def testSubmitAndRetrieveTextEventsFaceted(self):
"""Test submitting and retrieving faceted Text events."""
self._get_events(test_faceted=True, test_json=False)
def testSubmitAndRetrieveJsonEventsFaceted(self):
"""Test submitting and retrieving faceted JSON events."""
self._get_events(test_faceted=True, test_json=True)
def testLogglyExceptions(self):
# A device with an 12-character string id should cause a 400 status code and raise and exception.
bad_device = LogglyDevice({'id': rand_string()})
self.assertRaises(Exception, self.conn.delete_device, bad_device)
if __name__ == '__main__':
unittest.main(verbosity=2)
```
|
{
"source": "Jeffvos/license-tool",
"score": 3
}
|
#### File: license-tool/plugins/sonar.py
```python
import json
import requests
from clients.validation import Validate
try:
with open("./config/appconfig.json", 'r') as json_file:
APP_CONF = json.load(json_file)
except FileNotFoundError:
print('appconfig.json not found')
class CheckSonar:
def __init__(self):
user = APP_CONF['sonar']['user']
passw = APP_CONF['sonar']['pass']
self._license_url = APP_CONF['sonar']['license_url']
self._auth_user = (user, passw)
def _create_request(self):
url = self._license_url
req = requests.request("GET", url, auth=self._auth_user, verify=False)
json_data = req.json()
return Validate.check_validity(json_data)
def _check_license(self, json_data):
license_data = json_data['isExpired']
if not license_data:
Validate.check_validity(json_data)
else:
print('Expired')
def call(self):
return self._create_request()
```
|
{
"source": "jeffvswanson/CodingPractice",
"score": 5
}
|
#### File: ProgrammingExercises/10_CubeClass/cubeProperties.py
```python
from cubeClass import Cube
def main():
edge = 0
while edge <= 0:
try:
edge = float(input("Please enter the edge length of the cube: "))
if radius <= 0:
print("You have to enter a number greater than zero.")
except(SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a number greater than zero.")
continue
cube = Cube(edge)
volume = cube.volume()
surfaceArea = cube.surfaceArea()
if volume == 1:
print("\nThe volume of the cube is {0:.2f} unit.".format(volume))
else:
print("\nThe volume of the cube is: {0:.2f} units.".format(volume))
if surfaceArea == 1:
print("\nThe surface area of the cube is: {0:.2f} unit."
.format(surfaceArea))
else:
print("\nThe surface area of the cube is: {0:.2f} units."
.format(surfaceArea))
main()
```
#### File: ProgrammingExercises/12_DisplayPlayingCards/playingCardClass.py
```python
class playingCard:
"""A playing card object. A card's rank is between 1 and 13 indicating
the ranks Ace-King, and suit is a single character "d", "c", "h", or
"s" indicating the suit (diamonds, clubs, hearts, or spades)."""
def __init__(self, rank, suit):
self.rank = int(rank)
self.suit = str(suit)
def getRank(self):
"""Returns the rank of the card."""
return self.rank
def getSuit(self):
"""Returns the suit of the card."""
return self.suit
def BJValue(self):
"""Returns the BlackJack value of a card. Aces count as one, face
cards count as 10."""
if self.rank > 10:
self.rank = 10
return self.rank
def __str__(self):
"""Returns a string that names the card. For example, "Ace of Spades",
c = Card(1,"s")
print c
Ace of Spades"""
ranks = ["Ace","Two","Three","Four","Five","Six","Seven","Eight", \
"Nine","Ten","Jack","Queen","King"]
suits = ["Clubs", "Diamonds", "Hearts", "Spades"]
cardRank = ranks[self.rank - 1]
for i in suits:
if i[0] == self.suit.upper():
cardSuit = i
cardName = "{0} of {1}".format(cardRank, cardSuit)
return cardName
def draw(self, win, center):
"""Draws a playing card."""
```
#### File: ProgrammingExercises/16_CannonballTarget/inputDialog.py
```python
from graphics import GraphWin, Entry, Text, Point
from button import Button
class InputDialog:
""" A custom window for getting simulation values (angle, velocity,
and height) from the user."""
def __init__(self, angle, vel, height):
""" Build and display the ingut window """
self.win = win = GraphWin("Initial Values", 200, 300)
win.setCoords(0, 4.5, 4, 0.5)
Text(Point(1, 1), "Angle").draw(win)
self.angle = Entry(Point(3, 1), 5).draw(win)
self.angle.setText(str(angle))
Text(Point(1, 2), "Velocity").draw(win)
self.vel = Entry(Point(3, 2), 5).draw(win)
self.vel.setText(str(vel))
Text(Point(1, 3), "Height").draw(win)
self.height = Entry(Point(3, 3), 5).draw(win)
self.height.setText(str(height))
self.fire = Button(win, Point(1, 4), 1.25, 0.5, "Fire!")
self.fire.activate()
self.quit = Button(win, Point(3, 4), 1.25, 0.5, "Quit")
self.quit.activate()
def interact(self):
""" wait for user to click Quit or Fire button
Returns a string indicating which button was clicked
"""
while True:
pt = self.win.getMouse()
if self.quit.clicked(pt):
return "Quit"
if self.fire.clicked(pt):
return "Fire!"
def getValues(self):
""" return input values """
a = float(self.angle.getText())
v = float(self.vel.getText())
h = float(self.height.getText())
return a, v, h
def close(self):
""" close the input window """
self.win.close()
```
#### File: ProgrammingExercises/9_SphereClass/sphereClass.py
```python
import math
class Sphere:
"""A sphere object. The radius is returned with getRadius(),
the sphere's surface area is calculated with surfaceArea(),
and the volume with volume()."""
def __init__(self, radius):
"""Creates a sphere of a given radius."""
self.radius = float(radius)
def getRadius(self):
return self.radius
def surfaceArea(self):
area = 4 * math.pi * self.radius ** 2
return area
def volume(self):
v = (4/3) * math.pi * self.radius ** 3
return v
```
#### File: ProgrammingExercises/9_SphereClass/sphereProperties3.py
```python
from sphereClass import Sphere
def main():
radius = 0
while radius <= 0:
try:
radius = float(input("Please enter the radius of the sphere: "))
if radius <= 0:
print("You have to enter a number greater than zero.")
except(SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a number greater than zero.")
continue
sphere = Sphere(radius)
volume = sphere.volume()
surfaceArea = sphere.surfaceArea()
print("\nThe volume of the sphere is {0:.2f} units.".format(volume))
print("\nThe surface area of the sphere is {0:.2f} units."
.format(surfaceArea))
main()
```
#### File: Section6_Widgets/DiceRoller/dieview.py
```python
from graphics import *
class DieView:
"""DieView is a widget that displays a graphical representation
of a standard six-sided die."""
def __init__(self, win, center, size):
"""Create a view of a die, e.g.:
d1 = Gdie(myWin, Point(40, 50) having sides
of length 20."""
# first define some standard values
self.win = win # save this for drawing pips later
self.background = "white" # color of die face
self.foreground = "black" # color of the pips
self.psize = 0.1 * size # radius of each pip
hsize = size / 2.0 # half the size of the die
offset = 0.6 * hsize # distance from center to outer pips
# create a square for the face of the die
cx, cy = center.getX(), center.getY()
p1 = Point(cx - hsize, cy - hsize)
p2 = Point(cx + hsize, cy + hsize)
rect = Rectangle(p1, p2)
rect.draw(win)
rect.setFill(self.background)
# Create 7 circles for standard pip locations
self.pip1 = self.__makePip(cx - offset, cy - offset)
self.pip2 = self.__makePip(cx - offset, cy)
self.pip3 = self.__makePip(cx - offset, cy + offset)
self.pip4 = self.__makePip(cx, cy)
self.pip5 = self.__makePip(cx + offset, cy - offset)
self.pip6 = self.__makePip(cx + offset, cy)
self.pip7 = self.__makePip(cx + offset, cy + offset)
# Draw an initial value
self.setValue(1)
def __makePip(self, x, y):
"Internal helper method to draw a pip at (x,y)"
pip = Circle(Point(x,y), self.psize)
pip.setFill(self.background)
pip.setOutline(self.background)
pip.draw(self.win)
return pip
def setValue(self, value):
"Set this die to display value."
self.value = value
# turn all pips off
self.pip1.setFill(self.background)
self.pip2.setFill(self.background)
self.pip3.setFill(self.background)
self.pip4.setFill(self.background)
self.pip5.setFill(self.background)
self.pip6.setFill(self.background)
self.pip7.setFill(self.background)
# turn correct pips on
if self.value == 1:
self.pip4.setFill(self.foreground)
elif self.value == 2:
self.pip1.setFill(self.foreground)
self.pip7.setFill(self.foreground)
elif self.value == 3:
self.pip1.setFill(self.foreground)
self.pip7.setFill(self.foreground)
self.pip4.setFill(self.foreground)
elif self.value == 4:
self.pip1.setFill(self.foreground)
self.pip3.setFill(self.foreground)
self.pip5.setFill(self.foreground)
self.pip7.setFill(self.foreground)
elif self.value == 5:
self.pip1.setFill(self.foreground)
self.pip3.setFill(self.foreground)
self.pip4.setFill(self.foreground)
self.pip5.setFill(self.foreground)
self.pip7.setFill(self.foreground)
else:
self.pip1.setFill(self.foreground)
self.pip2.setFill(self.foreground)
self.pip3.setFill(self.foreground)
self.pip5.setFill(self.foreground)
self.pip6.setFill(self.foreground)
self.pip7.setFill(self.foreground)
def setColor(self, color):
self.foreground = color
self.setValue(self.value)
```
#### File: ProgrammingExercises/1_ImprovedStatistics/statsClass.py
```python
from math import sqrt
class Statistics:
def __init__(self, nums):
self.nums = nums
def mean(self):
total = 0.0
for num in self.nums:
total = total + num
self.avg = total / len(self.nums)
return self.avg
def stdDev(self):
sumDevSq = 0.0
self.avg = self.mean()
for num in self.nums:
dev = num - self.avg
sumDevSq = sumDevSq + dev * dev
self.sigma = sqrt(sumDevSq)/(len(self.nums)-1)
return self.sigma
def meanStdDev(self):
self.avg = self.mean()
self.sigma = self.stdDev()
return self.avg, self.sigma
```
#### File: ProgrammingExercises/2_GPASort/GPASort.py
```python
from gpaClass import Student, makeStudent
def getFileName():
while True:
try:
filename = input("Enter the name of the data file: ")
except(SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a valid file path.")
continue
else:
break
return filename
def howToSort():
choices = [1, 2, 3]
while True:
try:
sortChoice = int(input("How do you want the data sorted? \
(1 = GPA, 2 = name, 3 = credits) "))
except (SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a 1, 2, or 3.")
continue
if sortChoice in choices:
break
else:
print("You have to enter a 1, 2, or 3.")
continue
return sortChoice
def pickSort(sortChoice, filename):
if sortChoice == 1:
data = gpaSort(filename)
elif sortChoice == 2:
data = nameSort(filename)
else:
data = creditSort(filename)
return data
def readStudents(filename):
infile = open(filename, 'r')
students = []
for line in infile:
students.append(makeStudent(line))
infile.close()
return students
def writeStudents(students, filename):
outfile = open(filename, 'w')
for s in students:
print("{0}\t{1}\t{2}".
format(s.getName(), s.getHours(), s.getQPoints()),
file = outfile)
outfile.close()
def gpaSort(filename):
data = readStudents(filename)
data.sort(key=Student.gpa)
return data
def nameSort(filename):
data = readStudents(filename)
data.sort(key=Student.getName)
return data
def creditSort(filename):
data = readStudents(filename)
data.sort(key=Student.getHours)
return data
def main():
print("This program sorts student information by GPA, name, or credits.")
filename = getFileName()
sortChoice = howToSort()
data = pickSort(sortChoice, filename)
filename = input("Enter a name for the output file: ")
writeStudents(data, filename)
print("The data has been written to", filename)
if __name__ == '__main__':
main()
```
#### File: Chapter11_DataCollections/Section2_ApplyingLists/stats.py
```python
from math import sqrt
def getNumbers():
nums = [] # Start with an empty list
# Sentinel loop to get numbers
xStr = input("Enter a number (<Enter> to quit) >> ")
while xStr != "":
x = float(xStr)
nums.append(x) # Add this value to the list
xStr = input("Enter a number (<Enter> to quit) >> ")
return nums
def mean(nums):
total = 0.0
for num in nums:
total = total + num
return total / len(nums)
def stdDev(nums, xbar):
sumDevSq = 0.0
for num in nums:
dev = num - xbar
sumDevSq = sumDevSq + dev * dev
return sqrt(sumDevSq)/(len(nums)-1)
def median(nums):
nums.sort()
size = len(nums)
midPos = size // 2
if size % 2 == 0:
med = (nums[midPos] + nums[midPos-1]) / 2.0
else:
med = nums[midPos]
return med
def main():
print("This program computes mean, median, and standard deviation.")
data = getNumbers()
xbar = mean(data)
std = stdDev(data, xbar)
med = median(data)
print("\nThe mean is", xbar)
print("The standard deviation is", std)
print("The median is", med)
if __name__ == '__main__': main()
```
#### File: ProgrammingExercises/3_Chaos/chaos.py
```python
def main():
print("This program illustates a chaotic function")
x = eval(input("Enter a number between 0 and 1: "))
for i in range(10):
x = 2.0 * x * (1 - x)
print(x)
main()
"""Between the two programs the program from section 1.6 seems to continually
maintain an unstable state, whereas, this program seems to converge on 0.5. In
fact, if you put the value of 0.5 in this program never diverges and if you
choose a number above or below 0.5 the calculation converges to it or
extremely close within about 5 iterations."""
```
#### File: 6_Chaos100Iterations/a_Chaos100Iterations/chaos.py
```python
def main():
print("This program illustates a chaotic function")
x = eval(input("Enter a number between 0 and 1: "))
for i in range(100):
x = 3.9 * x * (1 - x)
print(x)
main()
"""As the programs start the first value is the same, but in subsequent values
the three different programs begin to diverge with the results becoming more
disparate as the series runs. For example, at the end the values are:
0.95462, 0.33564, and 0.38084 on the last iteration. This is due to
the computer's interpretation of floating point arithmetic and the numbers
the processor has to drop resulting in wildly chaotic variations in the
results."""
```
#### File: 6_Chaos100Iterations/c_Chaos100Iterations/chaos.py
```python
def main():
print("This program illustates a chaotic function")
x = eval(input("Enter a number between 0 and 1: "))
for i in range(100):
x = 3.9 * x - 3.9 * x * x
print(x)
main()
```
#### File: Chapter1_ComputersPrograms/Section6_MagicOfPython/chaos.py
```python
def main():
print("This program illustates a chaotic function")
x = eval(input("Enter a number between 0 and 1: "))
for i in range(10):
x = 3.9 * x * (1 - x)
print(x)
main()
```
#### File: ProgrammingExercises/4_Convert5Loops/convert.py
```python
def main():
print("This program converts Celsius temperatures into Fahrenheit")
for i in range(5):
celsius = eval(input("What is the Celsius temperature? "))
fahrenheit = 9/5 * celsius + 32
print("The temperature is", fahrenheit, "degrees Fahrenheit.")
main()
```
#### File: ProgrammingExercises/6_FutureValueUserInput/futval.py
```python
def main():
print("This program calculates the future value of an x-year investment.")
principal = eval(input("Enter the intial principal: "))
time = eval(input("Enter the total number of years of the investment: "))
apr = eval(input("Enter the annual interest rate as a decimal: "))
for i in range(time):
principal = principal * (1 + apr)
print("The value in", time, "years is: ${0:,.2f}".format(principal))
main()
```
#### File: ProgrammingExercises/13_SumSeries/sumSeries.py
```python
def main():
print("This program sums a series of numbers entered by the user.")
n = int(input("Please input the amount of numbers you wish to sum: "))
sum = 0
for i in range(n):
x = float(input("Please input the number you wish to add to the sum: "))
ans = sum + x
sum = ans
print("The sum from is {:.0f}.".format(ans))
main()
```
#### File: ProgrammingExercises/16_FibonacciSequence/fibonacciSequence.py
```python
def main():
print("This program sums a series of numbers to produce a \
Fibonacci sequence to a point specified by the user.")
n = int(input("Please input how many numbers you wish to proceed down the \
sequence: "))
ans = 1
sum = 0
for i in range(n):
print(ans)
ans = sum + ans
sum = ans - sum
main()
```
#### File: ProgrammingExercises/4_LightninDistance/lightningDistance.py
```python
import math
def main():
time = 0 # sec
speedOfSound = 1100 # ft/sec
mile = 5280 # feet
print("This program is used to calculate how far lightning is from an observer.")
print()
time = int(input("Please enter the time in seconds from when you saw the lightning to hearing the thunder. "))
distance = time * speedOfSound / mile # miles
print()
print("The lightning is {:0.1f} miles from the observer.".format(distance))
main()
```
#### File: ProgrammingExercises/5_StraightOfDice/straightOfDice.pyw
```python
from graphics import *
def main():
win = GraphWin("Straight of Dice", 500, 90)
win.setCoords(0.0, 0.0, 50.0, 9.0)
dice = Rectangle(Point(3,2), Point(8, 7))
dice.setOutline("black")
dice.setFill("white")
dice.draw(win)
dot = Circle(Point(5.5, 4.5), 0.5)
dot.setOutline("black")
dot.setFill("black")
dot.draw(win)
dice2 = dice.clone()
dice2.move(10, 0)
dice2.draw(win)
dot2a = dot.clone()
dot2a.move(10, -1)
dot2a.draw(win)
dot2b = dot2a.clone()
dot2b.move (0, 2)
dot2b.draw(win)
dice3 = dice.clone()
dice3.move(20, 0)
dice3.draw(win)
dot3a = dot.clone()
dot3a.move(19, -1)
dot3a.draw(win)
dot3b = dot3a.clone()
dot3b.move(1, 1)
dot3b.draw(win)
dot3c = dot3b.clone()
dot3c.move(1, 1)
dot3c.draw(win)
dice4 = dice.clone()
dice4.move(30, 0)
dice4.draw(win)
dot4a = dot.clone()
dot4a.move(29, -1)
dot4a.draw(win)
dot4b = dot4a.clone()
dot4b.move(0, 2)
dot4b.draw(win)
dot4c = dot4a.clone()
dot4c.move(2, 2)
dot4c.draw(win)
dot4d = dot4a.clone()
dot4d.move(2, 0)
dot4d.draw(win)
dice5 = dice.clone()
dice5.move(40, 0)
dice5.draw(win)
dot5a = dot.clone()
dot5a.move(39, -1)
dot5a.draw(win)
dot5b = dot5a.clone()
dot5b.move(0, 2)
dot5b.draw(win)
dot5c = dot5a.clone()
dot5c.move(2, 2)
dot5c.draw(win)
dot5d = dot5a.clone()
dot5d.move(1, 1)
dot5d.draw(win)
dot5e = dot5a.clone()
dot5e.move(2, 0)
dot5e.draw(win)
main()
```
#### File: ProgrammingExercises/8_LineSegmentInformation/lineSegmentInformation.py
```python
from graphics import *
import math
def main():
# Introduction
print("""This program allows the user to draw a line segment. The
program marks the midpoint in a different color and prints the length
and slope of the line.""")
# Create a graphics window to get the user generated line.
win = GraphWin("Line Segment Length and Slope", 400, 400)
win.setBackground("white")
win.setCoords(0.0, 0.0, 10.0, 10.0)
message = Text(Point(5.0, 0.5), "Click on two points.")
message.draw(win)
# Get and draw the user input for the line segment.
p1 = win.getMouse()
p1.draw(win)
p2 = win.getMouse()
p2.draw(win)
line = Line(p1, p2)
line.draw(win)
midpoint = line.getCenter()
midpoint.setOutline("cyan")
midpoint.draw(win)
# Calculate the change in x
dx = p2.getX() - p1.getX()
# Calculate the change in y
dy = p2.getY() - p1.getY()
# Calculate and display the slope
slope = dy/dx
print("The line segment slope, m, is {:0.2f}.".format(slope))
# Calculate and dispaly the line segment length
length = math.sqrt(dx**2 + dy**2)
print("The line segment length, l, is {:0.2f}".format(length))
# Wait for another click to exit.
message.setText("Click anywhere to quit.")
win.getMouse()
win.close()
main()
```
#### File: ProgrammingExercises/11_ChaosPrettyPrinting/chaos2.py
```python
def main():
print("This program illustates a chaotic function")
x = float(input("Enter a number, x, between 0 and 1: "))
y = float(input("Please enter a second number, y, between 0 and 1: "))
indexString = "index {0:^12.2f} {1:^10.2f}".format(x, y)
print(indexString)
dashString = "-" * (1 + len(indexString))
print(dashString)
for i in range(1, 11):
x = 3.9 * x * (1 - x)
y = 3.9 * y * (1 - y)
print("{0:>3} {1:0.8f} {2:0.8f}".format(i, x, y))
main()
```
#### File: ProgrammingExercises/2_QuizScoring/quizScoring.py
```python
def main():
print("Quiz scoring is a program that accepts a quiz score as an input \
and prints out the corresponding grade.")
print("5-A, 4-B, 3-C, 2-D, 1-F, 0-F")
score = int(input("Please enter the quiz score: "))
grades = ["F", "F", "D", "C", "B", "A"]
grade = grades[score]
print("\nThe quiz grade is {0}".format(grade))
main()
```
#### File: ProgrammingExercises/3_ExamScoring/examScoring.py
```python
def main():
print("Quiz scoring is a program that accepts a quiz score as an input and \
prints out the corresponding grade.")
print("90-100:A, 80-89:B, 70-79:C, 60-69:D, <60:F")
score = eval(input("Please enter the quiz score: "))
grades = ["F", "F", "F", "F", "F", "F", "F", "F", "F", "F",\
"F", "F", "F", "F", "F", "F", "F", "F", "F", "F",\
"F", "F", "F", "F", "F", "F", "F", "F", "F", "F",\
"F", "F", "F", "F", "F", "F", "F", "F", "F", "F",\
"F", "F", "F", "F", "F", "F", "F", "F", "F", "F",\
"F", "F", "F", "F", "F", "F", "F", "F", "F", "F",\
"D", "D", "D", "D", "D", "D", "D", "D", "D", "D",\
"C", "C", "C", "C", "C", "C", "C", "C", "C", "C",\
"B", "B", "B", "B", "B", "B", "B", "B", "B", "B",\
"A", "A", "A", "A", "A", "A", "A", "A", "A", "A", "A"]
grade = grades[score]
print("\nThe quiz grade is {0}".format(grade))
main()
```
#### File: ProgrammingExercises/4_AcronymCreator/acronymCreator.py
```python
def main():
print("This program accepts a string and then returns the acronym of the \
string.")
phrase = input("Please input the words you want to make an acronym: ")
acronym = ""
phraseSplit = phrase.split()
for f in phraseSplit:
acronym = acronym + f[0]
print("\nThe acronym is {0}.".format(acronym.upper()))
main()
```
#### File: ProgrammingExercises/8_CesarCipherImproved/cesarCipher.py
```python
def main():
print("This program converts a message to a substitution cipher")
print("with a key value chosen by the user.\n")
# Get the message to encode.
message = print("Please enter the message you want to encode")
message = input("(letters and spaces only): ")
message = message.lower()
key = int(input("Please enter the key number: "))
alphabet = "abcdefghijklmnopqrstuvwxyz "
alphabetList = list(alphabet)
# Loop through the string and build the ciphered message
# z will show up as a space " " when printed and a space as "a" when
# shifted one.
chars = []
for ch in message:
## print(ch)
## print(alphabetList.index(ch))
## print(alphabetList.index(ch) + key)
## print((alphabetList.index(ch) + key) % (len(alphabetList)))
## print(alphabetList[(alphabetList.index(ch) + key) \
## % (len(alphabetList))])
chars.append(alphabetList[(alphabetList.index(ch) + key) \
% (len(alphabetList))])
cipher = "".join(chars)
print("\nThe encoded message is:", cipher)
main()
```
#### File: Chapter5_SequencesStringsListsFiles/Section4_StringRepresentationMessageEncoding/text2numbers.py
```python
def main():
print("This program converts a textual message into a sequence")
print("of numbers representing the Unicode encoding of the message.\n")
# Get the message to encode
message = input("Please enter the message to encode: ")
print("\nHere are the Unicode codes:")
# Loop through the message and print out the Unicode values
for ch in message:
print(ord(ch), end = " ")
print() # blank line before prompt
main()
```
#### File: ProgrammingExercises/13_ConvertStringToFloat/toNumbers.py
```python
def toNumbers(strList):
numList = []
for entry in strList.split():
num = float(entry)
numList.append(num)
return numList
def main():
print("This program asks for a list of numbers then onverts the string \
list to a type recognized as a number by python (int, float, etc.).\n")
strList = input("Please enter the list of numbers seperated by a space. ")
numList = toNumbers(strList)
print(numList)
print("The string list of numbers were converted to {0} in Python."
.format(type(numList[0])))
main()
```
#### File: ProgrammingExercises/3_SphereProperties/sphereProperties2.py
```python
import math
def sphereArea(radius):
area = 4 * math.pi * radius ** 2
return area
def sphereVolume(radius):
volume = (4/3) * math.pi * radius ** 3
return volume
def main():
print("Calculate a spheres surface area and volume.")
volume = 0
area = 0
radius = 0
radius = float(input("Please enter the radius of the sphere: "))
area = sphereArea(radius)
volume = sphereVolume(radius)
print("\nThe volume of the sphere is: {0:0.2f}".format(volume))
print("\nThe surface area of the sphere is: {0:0.2f}".format(area))
main()
```
#### File: ProgrammingExercises/10_EasterCalculator2/easterCalculator2.py
```python
def calcEaster(year):
a = year % 19
b = year % 4
c = year % 7
d = (19 * a + 24) % 30
e = (2 * b + 4 * c + 6 * d + 5) % 7
date = 22 + d + e
if year == 1954 or year == 1981 or year == 2049 or year == 2076:
date = date - 7
if date <= 31:
easterDay = date
month = "March"
else:
easterDay = date - 31
month = "April"
return easterDay, month
def main():
try:
print("This program calculates the date of Easter between the years \
1900-2099.")
year = int(input("Please enter the year you would like to know the \
date of Easter: "))
if year > 2099 or year < 1900:
print("You can only put in a date between 1900 and 2099.")
else:
easterDay, month = calcEaster(year)
print("Easter in {0} is on {1} {2}".format(year, easterDay, month))
except (ValueError, SyntaxError):
print("You must enter the year as a whole number. Exiting.")
main()
```
#### File: ProgrammingExercises/1_WeeksWages/weeksWages.py
```python
def main():
print("This program accepts the number of hours worked and the hourly \
rate.")
print("The program calculates the wages earned in a week with overtime \
at over 40 hours being paid time-and-a-half.\n")
try:
hours = float(input("Please enter the number of hours worked in the week: "))
wage = float(input("Please enter the hourly wage: "))
overtimePay = 1.5 * wage
if hours >= 40:
overtime = hours - 40
else:
overtime = 0
pay = wage * (hours - overtime) + overtimePay * overtime
print("The pay for this week is ${0:0.2f}.".format(pay))
except (ValueError, SyntaxError):
print("Please put in a number value. Exiting.")
main()
```
#### File: ProgrammingExercises/2_QuizScore/quizScore.py
```python
def main():
print("""This program accepts a quiz score as an input and calculates
the corresponding grade. (5-A, 4-B, 3-C, 2-D, 1-F, 0-F)""")
try:
score = int(input("\nPlease enter the quiz score (0-5): "))
# Assume this can be greater than 6 if CS professor gives extra credit
if score >= 5:
grade = "A"
elif score == 4:
grade = "B"
elif score == 3:
grade = "C"
elif score == 2:
grade = "D"
else:
grade = "F"
print("The grade for {0} is {1}.".format(score, grade))
except (ValueError, SyntaxError):
print("You need to enter an integer of 0 through 5. Exiting.")
main()
```
#### File: Chapter7_DecisionStructures/Section5_StudyInDesignMaxOfThree/maxn.py
```python
def main():
n = int(input("How many numbers are there? "))
# Set max to be the first value input
maxval = float(input("Enter a number >> "))
# Now compare the n-1 successive values
for i in range(n-1):
x = float(input("Enter a number >> "))
if x > maxval:
maxval = x
print("The largest value is", maxval)
main()
```
#### File: ProgrammingExercises/11_HeatingAndCooling/heatingAndCoolingDays.py
```python
def userInput():
tempList = []
while True:
temp = input("Please enter the average temperature for the \
day. <Enter to quit>: ")
if temp == "" and tempList == []:
print("Exiting the program")
quit(0)
elif temp == "":
break
else:
try:
tempList.append(float(temp))
except (SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a number.")
continue
return tempList
def calcDD(tempList):
# Initialize the heating degree days and cooling degree days
hdd, cdd = 0, 0
for temp in tempList:
if temp < 60:
hdd = hdd + (60 - temp)
elif temp > 80:
cdd = cdd + (temp - 80)
return hdd, cdd
def main():
print("This program determines the number of heating and cooling days.")
# Get user input to create a daily average temperature list (tempList).
tempList = userInput()
# Send the temperature list to get the heating degree days (hdd) and
# cooling degree days (cdd).
hdd, cdd = calcDD(tempList)
print("\nThe number value of heating degree days is {0:.1f}".format(hdd))
print("The number value of cooling degree days is {0:.1f}".format(cdd))
main()
```
#### File: ProgrammingExercises/12_HeatingAndCoolingFromFile/heatingAndCoolingDaysReadFromFile.py
```python
def getFile():
# Get the file name
while True:
try:
infileName = input("\nPlease enter your file name: ")
except (SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a file name.")
continue
break
return infileName
def degreeDays(infileName):
infile = open(infileName, "r")
heatday = 0
coolday = 0
for line in infile:
temp = float(line)
if temp < 60:
heatday = heatday + (60 - temp)
elif temp > 80:
coolday = coolday + (temp - 80)
return heatday, coolday
def main():
print("This program determines the number of heating and cooling days from \
a file.")
infileName = getFile()
# Send the file to get the heating degree days (hdd) and cooling degree
# days (cdd).
hdd, cdd = degreeDays(infileName)
print("\nThe number value of heating degree days is {0:.1f}".format(hdd))
print("The number value of cooling degree days is {0:.1f}".format(cdd))
main()
```
#### File: ProgrammingExercises/13_RegressionLine/regressionLine.pyw
```python
from graphics import *
def createWindow():
win = GraphWin("Linear Regression Chart", 500, 550)
win.setCoords(0.0, 0.0, 50.0, 55.0)
win.setBackground("white")
yaxis = Line(Point(5, 5), Point(5, 50))
yaxis.draw(win)
Text(Point(4, 50), "y").draw(win)
xaxis = Line(Point(5, 5), Point(45, 5))
xaxis.draw(win)
Text(Point(46, 5), "x").draw(win)
quitbutton = Rectangle(Point(1, 4.5), Point(11, 0.5))
quitbutton.setFill("gray")
quitbutton.setOutline("black")
quitbutton.draw(win)
quitmessage = Text(Point(6, 2.5), "Done")
quitmessage.draw(win)
return win
def getPoints(win):
p = Point(0, 0)
n, sumx, sumy, sumxsquared, sumxy = 0, 0, 0, 0, 0
points = []
message = Text(Point(25, 27.5), "Please click inside the axis area").draw(win)
# Test if the click is in the "Done" box
while not((p.getX() >= 1 and p.getX() <= 11) and (p.getY() >= 0.5 and p.getY() <= 4.5)):
p = win.getMouse()
message.undraw()
if p.getX() < 5 or p.getX() > 45 or p.getY() < 5 or p.getY() > 50:
message.draw(win)
else:
message.undraw()
points.append(p)
n += 1
sumx = sumx + p.getX()
sumy = sumy + p.getY()
sumxsquared = sumxsquared + (p.getX()) ** 2
sumxy = sumxy + (p.getX() * p.getY())
p.draw(win)
return n, sumx, sumy, sumxsquared, sumxy, message
def regressionLine(win, n, x, y, xsquared, xy):
avgx = x / n
avgy = y / n
m = (xy - n * avgx * avgy)/(xsquared - n * avgx ** 2)
x1 = 5
x2 = 45
y1 = avgy + m * (x1 - avgx)
y2 = avgy + m * (x2 - avgx)
lineOfRegression = Line(Point(x1, y1), Point(x2, y2))
lineOfRegression.setOutline("orange")
lineOfRegression.draw(win)
return
def main():
# Create the graphics window
# Create a small rectangle labeled done.
# Ask for user input
# Create a decision loop for the user generated mouse clicks
# Record each click in a list and track sum of x, y, x^2, and xy and number of clicks
# If the mouseclick is in the "Done" box leave the loop and don't record the click
# Calculate the linear regression from the list
# Plot the linear regression
# Wait for a final mouseclick to close the window
win = createWindow()
n, sumx, sumy, sumxsquared, sumxy, message = getPoints(win)
# Undraw the last message from the while loop.
message.undraw()
regressionLine(win, n, sumx, sumy, sumxsquared, sumxy)
message = Text(Point(25, 52.5), "Click anywhere to quit.")
message.draw(win)
win.getMouse()
win.close()
main()
```
#### File: ProgrammingExercises/14_ConvertImageToGrayscale/convertImageGrayscale.py
```python
from graphics import *
def getPicture():
# Create a graphic window to get the file name.
win = GraphWin("Input File Name", 400, 400)
win.setBackground("white")
win.setCoords(0.0, 0.0, 4.0, 4.0)
# Introduction
message = Text(Point(2.0, 3.5), \
"""This program gets a color image
and converts it to grayscale.""").draw(win)
Text(Point(0.75, 3), "Input file name: ").draw(win)
infileEntry = Entry(Point(2.25, 3), 20)
infileEntry.draw(win)
button = Text(Point(2, 1.5), "Get the file")
button.draw(win)
Rectangle(Point(1, 1.0), Point(3, 2)).draw(win)
# Initialize p to immediately execute the while loop
p = Point(0, 0)
infileEntry.setText("")
while not((p.getX() >= 1 and p.getX() <= 3) and \
(p.getY() >= 1.0 and p.getY() <= 2)):
p = win.getMouse()
# Do nothing if the click is not in the "Get file" button
if not((p.getX() >= 1 and p.getX() <= 3) and \
(p.getY() >= 1.0 and p.getY() <= 2)):
pass
else:
try:
infileName = infileEntry.getText()
# Files have a period before the file type
if infileName == "" or not("." in infileName):
# Reset p to keep the window open.
p = Point(0, 0)
message.setText("You have to enter a valid file name.")
infileEntry.setText("")
continue
except (SyntaxError, NameError, TypeError, ValueError):
message.setText("You have to enter a valid file name.")
infileEntry.setText("")
# You don't want the window closing inadvertently, so reset p.
p = Point(0, 0)
continue
win.close()
return infileName
def showPhoto(infileName):
photo = Image(Point(0,0), infileName)
w = photo.getWidth()
h = photo.getHeight()
photo.move(w/2, h/2)
win = GraphWin("Grayscale Converter", w, h)
photo.draw(win)
message = Text(Point(w/2, h/3), """Click to start
grayscale conversion.""")
message.setSize(36)
message.setTextColor("Orange")
message.draw(win)
win.getMouse()
message.undraw()
grayscaleConverter(photo, win)
return photo, win
def grayscaleConverter(photo, win):
w = photo.getWidth()
h = photo.getHeight()
for row in range(0, h):
for col in range(0, w):
r, g, b = photo.getPixel(col, row)
brightness = int(round(0.299*r + 0.587*g + 0.114*b))
photo.setPixel(col, row, \
color_rgb(brightness, brightness, brightness))
# Update each row to show progress
win.update()
def savePhoto(photo):
# Create a graphic window to allow the user to save the photo.
win = GraphWin("Save Photo", 400, 400)
win.setBackground("white")
win.setCoords(0.0, 0.0, 4.0, 4.0)
# Introduction
message = Text(Point(2.0, 3.5), \
"""Do you want to save your photo? """).draw(win)
Text(Point(0.75, 3), "Save file as: ").draw(win)
saveFileEntry = Entry(Point(2.25, 3), 20)
saveFileEntry.draw(win)
button = Text(Point(2, 1.5), "Save the file")
button.draw(win)
Rectangle(Point(1, 1.0), Point(3, 2)).draw(win)
# Initialize p to immediately execute the while loop
p = Point(0, 0)
saveFileEntry.setText("")
while not((p.getX() >= 1 and p.getX() <= 3) and \
(p.getY() >= 1.0 and p.getY() <= 2)):
p = win.getMouse()
# Do nothing if the click is not in the "Save file" button
if not((p.getX() >= 1 and p.getX() <= 3) and \
(p.getY() >= 1.0 and p.getY() <= 2)):
pass
else:
try:
saveFileName = saveFileEntry.getText()
# Files have a period before the file type
if saveFileName == "" or not("." in saveFileName):
# Reset p to keep the window open.
p = Point(0, 0)
message.setText("You have to enter a valid file name.")
saveFileEntry.setText("")
continue
except (SyntaxError, NameError, TypeError, ValueError):
message.setText("You have to enter a valid file name.")
saveFileEntry.setText("")
# You don't want the window closing inadvertently, so reset p.
p = Point(0, 0)
continue
photo.save(saveFileName)
win.close()
def main():
# Get the input file from the user
infileName = getPicture()
# Display the file and convert the image to grayscale
photo, win = showPhoto(infileName)
# Open a new window to let the user save the photo
savePhoto(photo)
# Let the user admire the newly saved photo
main()
```
#### File: ProgrammingExercises/16_EventLoop4/event_loop4.py
```python
from graphics import *
def handleKey(k, win):
if k == "r":
win.setBackground("pink")
elif k == "w":
win.setBackground("white")
elif k == "g":
win.setBackground("lightgray")
elif k == "b":
win.setBackground("lightblue")
def handleClick(pt, win):
# Create an entry for user to type in
entry = Entry(pt, 10)
# Go modal: loop until user types <Enter> key
while True:
entry.draw(win)
key = win.getKey()
if key == "Escape":
entry.undraw()
break
if key == "Return": break
# Undraw the entry and create and draw Text()
entry.undraw()
typed = entry.getText()
Text(pt, typed).draw(win)
# Clear (ignore) any mouse click that occurred during text entry
win.checkMouse()
def main():
win = GraphWin("Color Window", 500, 500)
# Event Loop: handle key presses until user presses the 'q' key.
while True:
key = win.checkKey()
if key == "q": # loop exit
break
# Process the key
if key:
handleKey(key, win)
pt = win.checkMouse()
if pt:
handleClick(pt, win)
# exit program
win.close()
main()
```
#### File: ProgrammingExercises/1_FibonacciSequence/fibonacciSequence2.py
```python
def getInput():
while True:
try:
n = int(input("Please input how many numbers you wish to proceed \
down the sequence: "))
except (SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a whole number.")
continue
return n
def fibonacciNumber(n):
ans = 1
sum = 0
for i in range(n-1):
print("n = {0}, {1}".format((i+1),ans))
ans = sum + ans
sum = ans - sum
print("n = {0}, {1}".format(n,ans))
return ans
def main():
print("This program sums a series of numbers to produce a Fibonacci \
sequence to a point specified by the user.")
n = getInput()
fibNum = fibonacciNumber(n)
print("The Fibonacci number for n = {0} is {1}.".format(n, fibNum))
main()
```
#### File: ProgrammingExercises/2_WindChillIndex/windchillIndex.py
```python
def listSetup():
velocity = []
temp = []
windchill = []
# Create a list of wind speeds from 0 to 50-mph in 5-mph increments
for v in range(0, 51, 5):
velocity.append(v)
# Create a list of temperatures from -20 to 60 degrees Fahrenheit
for t in range(-20, 61, 10):
temp.append(t)
# Iterate through the velocity list, then the temperature list to create
# sequences of temperatures for a given wind speed.
for v in velocity:
if v > 3:
for t in temp:
chill = 35.74 + 0.6215*t - 35.75 * (v ** 0.16) + 0.4275 * t \
* (v ** 0.16)
windchill.append(chill)
else:
for t in temp:
windchill.append(t)
return velocity, windchill
def main():
# Create lists for wind velocity, temperature range, and wind chill.
vel, chill = listSetup()
# Create the table header.
print("Wind Speed (mph)| -20F| -10F| 0F | 10F | 20F | 30F | 40F | 50F |\
60F |", end = "")
# Create the table. Each row has slices 9 values from the windchill list
a = 0
for v in vel:
print("\n{0:>16}|".format(v), end = "", flush = True)
for c in chill[a:(a+9)]:
print("{0:^5.0f}|".format(c), end = "", flush = True)
a = a + 1
main()
```
#### File: ProgrammingExercises/4_SyracuseSequence/syracuseSequence.py
```python
def userInput():
while True:
try:
val = int(input("Please enter a positive whole number to start \
the sequence: "))
except (SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a positive whole number to start the \
sequence.")
continue
if val > 0:
break
return val
def main():
print("This program prints a Syracuse sequence.")
val = userInput()
while val != 1:
print(val)
if val % 2 == 1:
val = 3 * val + 1
else:
val = val // 2
print(val)
main()
```
#### File: Section3_CommonLoopPatterns/a_average2/average2.py
```python
def main():
total = 0.0
count = 0
moredata = "yes"
while moredata[0] == "y":
x = float(input("Enter a number: "))
total = total + x
count = count + 1
moredata = input("Do you have more numbers? (yes/no) ")
print("\nThe average of the numbers is", total / count)
main()
```
#### File: Section3_CommonLoopPatterns/c_average4/average4.py
```python
def main():
total = 0.0
count = 0
xStr = input("Enter a number (<Enter> to quit): ")
while xStr != "":
x = float(xStr)
total = total + x
count = count + 1
xStr = input("Enter a number (<Enter> to quit): ")
print("\nThe average of the numbers is", total / count)
main()
```
#### File: ProgrammingExercises/5_VolleyballGameComparison/volleyballComparison.py
```python
from random import random
import volleyball
import volleyballRallyScoring
def main():
printIntro()
probA, probB, n = getInputs()
# Get the number of games won by rally scoring.
winsARally, winsBRally = volleyballRallyScoring.simNGames(n, probA, probB)
# Get the number of games won by traditional scoring.
winsATrad, winsBTrad = volleyball.simNGames(n, probA, probB)
printSummary(winsARally, winsBRally, winsATrad, winsBTrad, n)
def printIntro():
print("This program simulates a game of volleyball between two \
teams called 'A' and 'B'. The abilities of each team is indicated by a \
probability (a number betweeen 0 and 1) that the team wins the point when \
serving. Team A always has the first serve.")
def getInputs():
# Returns the three simulation parameters.
a = b = 0
while a + b != 1:
print("Team A and Team B probability of winning added together \
must equal 1.")
while True:
try:
a = float(input("What is the probability Team A \
wins a serve? "))
except (SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a decimal between 0 and 1.")
continue
if a > 1 or a < 0:
print("You have to enter a decimal between 0 and 1.")
continue
else: break
while True:
try:
b = float(input("What is the probability Team B \
wins a serve? "))
except (SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a decimal between 0 and 1.")
continue
if a > 1 or a < 0:
print("You have to enter a decimal between 0 and 1.")
continue
else: break
n = 0
while n < 1:
try:
n = int(input("How many games to simulate? "))
except (SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a whole number.")
continue
if n < 1:
print("You have to simulate at least one game.")
continue
return a, b, n
def simNGames(n, probA, probB):
# Simulates n games of racquetball between players whos
# abilities are represented by the probability of winning a serve.
# Returns number of wins for A and B
winsA = winsB = 0
for i in range(n):
scoreA, scoreB = simOneGame(probA, probB)
if scoreA > scoreB:
winsA = winsA + 1
else:
winsB = winsB + 1
return winsA, winsB
def simOneGame(probA, probB):
# Simulates a single game of volleyball between teams whose
# abilities are represented by the probability of winning a serve.
# Returns final scores for teams A and B
serving = "A"
scoreA = 0
scoreB = 0
while not gameOver(scoreA, scoreB):
if serving == "A":
if random() < probA:
scoreA = scoreA + 1
else:
serving = "B"
else:
if random() < probB:
scoreB = scoreB + 1
else:
serving = "A"
return scoreA, scoreB
def gameOver(a, b):
# a and b represent scores for a volleyball game
# Returns True if the game is over, otherwise False.
if a >= 15 and a > b + 2:
return True
elif b >= 15 and b > a + 2:
return True
else:
return False
def printSummary(winsARally, winsBRally, winsATrad, winsBTrad, n):
# Prints a summary of wins by game type
print("\nGames simulated:", n)
print("Rally scoring wins for team A: {0} ({1:0.1%})"\
.format(winsARally, winsARally/n))
print("Traditional scoring wins for team A: {0} ({1:0.1%})"\
.format(winsATrad, winsATrad/n))
print("\nRally scoring wins for team B: {0} ({1:0.1%})"\
.format(winsBRally, winsBRally/n))
print("Traditional scoring wins for team B: {0} ({1:0.1%})"\
.format(winsBTrad, winsBTrad/n))
if __name__ == '__main__': main()
```
#### File: ProgrammingExercises/9_BlackjackDealerBustProbability/blackjackGameDealerBustProbability.py
```python
from random import randrange
def main():
printIntro()
n = getInputs()
busts = simNHands(n)
printSummary(busts, n)
def printIntro():
print("This game simulates a dealer playing hands of Blackjack. The dealer \
must hold at a hand of 17 and busts at a hand over 21. Aces are treated as 11 \
if it brings the hand to a score between 17 and 21. Otherwise aces are worth \
one.")
def getInputs():
n = 0
while n < 1:
try:
n = int(input("How many hands of blackjack to simulate? "))
except (SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a whole number.")
continue
if n < 1:
print("You have to simulate at least one game.")
continue
return n
def simNHands(n):
# Simulates n hands of blackjack at each card value
# Create a list to store the number of busts for each value a face up card can be.
busts = []
for j in range(1, 11):
numbusts = 0
faceUpCard = j
for i in range(n):
bust = simOneHand(faceUpCard)
if bust == True:
numbusts = numbusts + 1
busts.append(numbusts)
return busts
def simOneHand(faceUpCard):
# Simulates one hand of blackjack for the dealer.
# The deck will be treated as infinite.
# Use a list to keep track of drawn cards.
cards = []
# initial two card draw
cards.append(faceUpCard)
drawCard = randrange(1, 11)
cards.append(drawCard)
inHand = scoreHand(cards)
while not handOver(inHand):
# check for greater than or equal to 17 or over 21
# if less than 17, draw again
# else hold
drawCard
cards.append(drawCard)
inHand = scoreHand(cards)
if inHand > 21:
bust = True
else:
bust = False
return bust
def scoreHand(cards):
# Initialize the score of the hand and check for an ace.
score = 0
for i in cards:
if i == 1 or i == 11:
hasAce = True
if i == 1:
cards.remove(1)
ace = 11
cards.append(ace)
else:
hasAce = False
# Score the hand treating ace as 11
for j in range(len(cards)):
score = score + cards[j]
# Check for bust and reduce the ace to one if true.
if hasAce == True and score > 21:
cards.remove(11)
ace = 1
cards.append(ace)
# Rescore the hand
score = 0
for k in range(len(cards)):
score = score + cards[k]
return score
def handOver(inHand):
# The dealer stops drawing if his hand scores over 17 regardless of busting
if inHand >= 17:
return True
else:
return False
def printSummary(busts, n):
# Prints a summary of busted hands and the percentage of busted hands
print("\nHands simulated per face-up card value:", n)
for i in range (1, 11):
if i == 1:
print("The dealer had {0} busted hands with a player win \
percentage of ({1:0.1%}) when an ace was facing up."\
.format(busts[i-1],busts[i-1]/n))
else:
print("The dealer had {0} busted hands with a player win \
percentage of ({1:0.1%}) when {2} was facing up."\
.format(busts[i-1], busts[i-1]/n, i))
if __name__ == '__main__': main()
```
|
{
"source": "jeffvswanson/DataStructuresAndAlgorithms",
"score": 4
}
|
#### File: 06_Selection/Deterministic/deterministic_selection_algorithm_example.py
```python
from random import randint
def main():
target_list = [7, 2, 17, 12, 13, 8, 20, 4, 6, 3, 19, 1, 9, 5, 16, 10, 15,
18, 14, 11]
k_order_statistic = randint(1, len(target_list))
_, target_value = quick_select(target_list, k_order_statistic)
last_digit = find_last_digit(k_order_statistic)
print("From the list")
print(sorted(target_list))
result = output(k_order_statistic, target_value, last_digit)
print(result)
def quick_select(target_list, k):
# Base case: A list of length 1 or 0 is by default sorted.
if len(target_list) < 2:
return target_list, target_list[0]
pivot_index = find_median_of_medians(target_list)
pivot_value = target_list[pivot_index]
# Swap the pivot value to the leftmost index position
target_list = swap(target_list, 0, pivot_index)
# Set up the pointers
# i is the index delineating the partition of all values less
# than or equal to the pivot.
# j is the index value of which all indices greater than j have not
# yet been compared against the pivot value.
i = j = 1
# Perform the sort
while j < len(target_list):
if target_list[j] <= pivot_value:
target_list = swap(target_list, i, j)
i += 1
j += 1
# Swap the pivot value into its rightful position
pivot_index = i - 1
target_list = swap(target_list, 0, pivot_index)
# Determine how to continue solving the problem.
# Remember, k is on a 1-based index and pivot_index is 0-based.
if k-1 == pivot_index:
return target_list, target_list[pivot_index]
elif k-1 < pivot_index:
return quick_select(target_list[:pivot_index], k)
else:
return quick_select(target_list[i:], k-i)
def find_median_of_medians(target_list):
"""Method to select the median of medians from a list."""
group_size = 5
# Base case: A list less than the group size is close enough.
if len(target_list) < group_size:
return len(target_list)//2
num_full_groups = len(target_list)//group_size
medians = []
median_indices = []
for i in range(0, num_full_groups*group_size, group_size):
target_list = selection_sort(target_list, i, i+5)
medians.append(target_list[i+2])
median_indices.append(i+2)
_, median_of_medians = quick_select(medians,
len(target_list)//(group_size*2))
for idx, potential_median in enumerate(medians):
if potential_median == median_of_medians:
median_of_medians_index = median_indices[idx]
return median_of_medians_index
def selection_sort(given_list, left_index, right_index):
"""Will always sort 5 elements. Used to determine median values."""
for idx in range(left_index, right_index):
min_value = given_list[idx]
min_index = idx
j = idx + 1
while j < right_index:
if given_list[j] < min_value:
min_value = given_list[j]
min_index = j
j += 1
given_list = swap(given_list, idx, min_index)
return given_list
def swap(L, i, j):
"""Swaps values at indices i and j in list L."""
L[i], L[j] = L[j], L[i]
return L
def find_last_digit(k):
"""Determines the last digit in a base 10 integer."""
return k%10
def output(k_order_statistic, target_value, last_digit):
if k_order_statistic != 11 and last_digit == 1:
result = "The {}st order statistic is {}.".format(k_order_statistic,
target_value)
elif k_order_statistic != 12 and last_digit == 2:
result = "The {}nd order statistic is {}.".format(k_order_statistic,
target_value)
elif k_order_statistic != 13 and last_digit == 3:
result = "The {}rd order statistic is {}.".format(k_order_statistic,
target_value)
else:
result = "The {}th order statistic is {}.".format(k_order_statistic,
target_value)
return result
if __name__ == "__main__":
main()
```
#### File: 08_GraphSearch/BreadthFirstSearch/breadth_first_search.py
```python
from collections import defaultdict
def main():
print("This program demonstrates the shortest path characteristic \
of breadth first search (BFS) on an undirected graph.\n")
vertices, node_edges = setup()
starting_node = get_node_value("start")
ending_node = get_node_value("end")
shortest_path = breadth_first_search(starting_node, ending_node,
vertices, node_edges)
output = generate_output(shortest_path, starting_node, ending_node)
print(output)
def setup():
"""
Converts the kargerMinCut.txt file into a useable format.
"""
vertices = []
node_edges = defaultdict(list)
with open("kargerMinCut.txt") as f:
# Exclude the newline character, \n
for line in f:
# Remove the trailing tab and newline characters.
# Otherwise, there are issues on import.
line = line.rstrip('\t\n')
# Build the vertices list.
vertex = int(line.split('\t', 1)[0])
vertices.append(vertex)
# edges (relationships) is a list of tuples with the edge
# between two points, u and v, represented by a
# tuple, (u, v).
for v in line.split('\t'):
v = int(v)
# We don't want self-loops, that is, (1, 1).
if v != vertex:
node_edges[vertex].append(v)
return vertices, node_edges
def get_node_value(type_of_node):
"""
Gets user input to designate a starting or ending node.
"""
while True:
if type_of_node == "start":
try:
node = int(input("Please enter your starting node choice \
(whole number between 1 and 200: "))
if node < 1 or node > 200: raise ValueError
break
except (TypeError, ValueError, SyntaxError):
print("That is not a whole number between 1 and 200, please try again\n")
else:
try:
node = int(input("\nPlease enter your ending node choice \
(whole number between 1 and 200: "))
if node < 1 or node > 200: raise ValueError
break
except (TypeError, ValueError, SyntaxError):
print("That is not a whole number between 1 and 200, please try again\n")
return node
def breadth_first_search(starting_node, ending_node, vertices, node_edges):
"""
Function to explore the given graph for a connection between the
starting node and the ending node and return the shortest path
between the two nodes if one exists.
"""
# Short circuit if the starting_node equals the ending_node
if starting_node == ending_node:
return 0
# Set all nodes as unexplored
is_explored = defaultdict(str)
for vertex in vertices:
is_explored[vertex] = "Unexplored"
# Set starting_node as "Explored"
is_explored[starting_node] = "Explored"
# Let Q = queue data structure (First-in, First-out (FIFO)),
# initialized with starting_node
q = [starting_node]
# dist is a key-value pair representing how many edges we've
# traversed from the starting_node
distance_from_start_node = defaultdict(int)
distance_from_start_node[starting_node] = 0
while len(q) != 0:
v = q.pop(0)
# Explore the different edges v posseses (v, u)
for u in node_edges[v]:
if is_explored[u] == "Unexplored":
distance_from_start_node[u] = distance_from_start_node[v] + 1
is_explored[u] = "Explored"
q.append(u)
if is_explored[ending_node] == "Explored":
shortest_path = distance_from_start_node[ending_node]
else:
# The two nodes are not connected.
shortest_path = -1
return shortest_path
def generate_output(shortest_path, starting_node, ending_node):
"""
Generates the message to the user detailing the result of the
breadth first search to find the shortest path.
"""
if shortest_path < 0:
output_message = "Nodes {} and {} are not connected.".format(starting_node, ending_node)
elif shortest_path == 0:
output_message = "The shortest path is 0. If you're at home, you don't have to \
cross the street to get home!"
else:
output_message = "The shortest path between nodes {} and {} is {}.".format(starting_node, ending_node, shortest_path)
return output_message
if __name__ == "__main__":
main()
```
#### File: Stanford/10_BinarySearchTrees/red_black_node.py
```python
class Node:
"""
A class used to represent a Node in a red-black search tree.
Attributes:
key: The key is the value the node shall be sorted on. The key can be an integer,
float, string, anything capable of being sorted.
instances (int): The number of times the key for a node was inserted into the tree.
parent (node): The pointer to the parent of the node.
left (node): The pointer to the left child node.
right (node): The pointer to the right child node.
is_red (bool): The color attribute keeps track of whether a node is red or black.
"""
def __init__(self, key):
"""
Parameters:
key: The key is the value the node shall be sorted on. The key can be an integer,
float, string, anything capable of being sorted.
"""
self.key = key
self.instances = 1
self.parent = None
self.left = None
self.right = None
self.is_red = True
def recolor(self):
"""
Switches the color of a Node from red to black or black to red.
"""
if self.is_red:
self.is_red = False
else:
self.is_red = True
def add_instance(self):
"""
Allows for duplicates in a node by making it "fat" instead of
creating more nodes which would defeat the purpose of a self-
balancing tree.
"""
self.instances += 1
def remove_instance(self):
"""
Allows for removal of a single instance of a key from the
search tree rather than pruning an entire node from the tree.
"""
self.instances -= 1
def delete(self):
"""
Zeroes out a node for deletion.
"""
self.key = None
self.instances = 0
self.parent = None
self.left = None
self.right = None
self.is_red = False # Null nodes are, by default, black.
```
#### File: Stanford/10_BinarySearchTrees/red_black_tree.py
```python
import red_black_node as rbn
from typing import Tuple
class RedBlackTree:
"""
A class used to represent a red-black binary search tree.
Attributes:
Methods:
insert(key)
Inserts an element into the search tree.
_rebalance(node)
Rebalances the red-black search tree after a node is inserted.
_case1(node)
A helper function for rebalancing a specific case where the parent
of the node and the node's sibling are red.
_case2(node)
A helper function for rebalancing a specific case where the parent
of the node is red and the node's sibling is black or None.
_left_rotation(key)
Reorganizes a section of the search tree so the parent node, x,
becomes the left child of it's original right child, y, and y
becomes the parent of x.
_right_rotation(key)
Reorganizes a section of the search tree so the parent node, x,
becomes the right child of its original left child, y, and y
becomes the parent of x.
delete_instance(key)
Deletes an instance of the node from the search tree if the key
exists. That is, if the node was inserted multiple timesinto the
tree it only removes one instance. If delete_instance is called
on a node with only one instance delete_instance will delete the
node from the red-black search tree.
delete(key)
Deletes a node from the search tree if the key exists.
_case3(node)
A helper function for determining how to arrange the red-black
tree after a node deletion. The node being deleted is black and
either has no children or two children.
traverse() -> list
Prints the keys of the search tree in ascending order, for example,
1, 2, 3, 4, ..., n.
successor(key) -> key
Provides the given key's closest node in value that is greater than
the key if it exists in the search tree.
predecessor(key) -> key
Provides the given key's closest node in value that is less than the
key if it exists in the search tree.
max() -> key
Provides the maximum value that exists in the search tree.
min() -> key
Provides the minimum vlaue that exists in the search tree.
contains(key) -> Tuple[node, node]
Checks if a value exists in the search tree.
"""
def __init__(self):
"""
Parameters:
None
"""
self.root = None
def insert(self, key):
"""
Inserts a node into the search tree.
Parameters:
key: The key of the node you wish to insert.
"""
if key == None:
return
new_node = rbn.Node(key)
# Check if there is nothing in the tree.
if self.root == None:
self.root = new_node
# Paint it black.
self.root.recolor()
return
# Find where the node should be inserted.
found_node, parent = self.contains(new_node.key)
if found_node != None:
found_node.add_instance()
elif new_node.key != parent.key:
new_node.parent = parent
if new_node.key < parent.key:
parent.left = new_node
else: # new_node.key > parent.key
parent.right = new_node
self._rebalance(new_node)
def _rebalance(self, node):
"""
Ensures the search tree remains balanced.
Parameters:
node: The node where rebalancing should start.
"""
# Easy case: node's parent is black.
if node != self.root and not node.parent.is_red:
return
# Now we have to keep propagating changes up the tree since
# node's parent is red and there cannot be two reds in a
# parent-child relationship.
while node.parent.is_red and node != self.root:
grandparent = node.parent.parent
# Determine the rebalancing case
if grandparent.right == None or grandparent.left == None:
self._case2(node)
elif grandparent.right.is_red and grandparent.left.is_red:
self._case1(node)
else:
self._case2(node)
# Have to reassign grandparent to to rebalancing
grandparent = node.parent.parent
# Do not continue, the root does not have a grandparent.
if grandparent == self.root or grandparent == None:
break
else:
node = node.parent
# After propagating ensure the root of the tree remains black.
if self.root.is_red:
self.root.recolor()
def _case1(self, node):
"""
The parent of the node and the parent's sibling are red.
Leave node as red. The grandparent of red must be black since
the parent of node is originally red. Color the grandparent of
node red and the grandparent's left and right children black.
Parameters:
node: The node originating the first case of node reorganization.
"""
grandparent = node.parent.parent
grandparent.recolor()
grandparent.left.recolor()
grandparent.right.recolor()
def _case2(self, node):
"""
The parent of the node is red and the parent's sibling is black or None.
Rotate node's parent in the opposite direction of node so node
occupies the original parent's position. Then recolor node and
node's new parent.
Parameters:
node: The node originating the second case of node reorganization.
"""
grandparent = node.parent.parent
# Figure out which way to rotate.
if node.parent == grandparent.right:
if node == node.parent.right:
self._left_rotation(grandparent)
node.parent.recolor()
node.parent.left.recolor()
else:
self._right_rotation(node.parent)
self._case2(node.right)
else:
if node == node.parent.left:
self._right_rotation(grandparent)
node.parent.recolor()
node.parent.right.recolor()
else:
self._left_rotation(node.parent)
self._case2(node.left)
def _left_rotation(self, node):
"""
Conducts a left rotation causing the given node to move left down the
tree and brings its right child into the vacated position.
A left C
/\\ ----------> /
B C rotation A
of A /
B
Parameters:
node: The parent node to rotate out of position.
"""
# Adjust the child pointers for the nodes due to the rotation.
# The node's right child will become the node's parent with
# a left rotation
new_parent = node.right
# Since the new_parent is greater than node, the new_parent's
# left pointer will adjust to point to node and node's right
# pointer must be adjusted to point to the soon-to-be orphaned
# left node of new_parent.
node.right = new_parent.left
if new_parent.left != None:
new_parent.left.parent = node
# Adjust the parent pointers for the nodes due to the rotation.
if node.parent == None:
self.root = new_parent
# Paint it black
if self.root.is_red:
self.root.recolor()
else:
new_parent.parent = node.parent
if node == node.parent.left:
node.parent.left = new_parent
else:
node.parent.right = new_parent
new_parent.left = node
node.parent = new_parent
def _right_rotation(self, node):
"""
Conducts a right rotation causing the given node to move right down the
tree and brings its left child into the vacated position.
A right B
/\\ ----------> \\
B C rotation A
of A \\
C
Parameters:
node: The parent node to rotate out of position.
"""
# Adjust the child pointers for the nodes due to the rotation.
# The node's left child will become the node's parent with
# a right rotation.
new_parent = node.left
# Since the new_parent is less than node, the new_parent's
# right pointer will adjust to point to node and node's left
# pointer must be adjusted to point to the soon-to-be orphaned
# right node of new_parent.
node.left = new_parent.right
if new_parent.right != None:
new_parent.right.parent = node
# Adjust the parent pointers for the nodes due to the rotation.
if node.parent == None:
self.root = new_parent
# Paint it black
if self.root.is_red:
self.root.recolor()
else:
new_parent.parent = node.parent
if node == node.parent.left:
node.parent.left = new_parent
else:
node.parent.right = new_parent
new_parent.right = node
node.parent = new_parent
def delete_instance(self, key):
"""
Deletes an instance of a node in the red-black search tree. That is, if there
is more than one instance delete_instance decrements the number of instances
of the node. If this method is called when only one instance exists the
delete method gets called to completely remove the node from the search
tree.
Parameters:
key: The key of the node you wish to delete an instance of.
"""
node, _ = self.contains(key)
if node == None:
return
else:
node.remove_instance()
if node.instances < 1:
self.delete(key)
if node == self.root:
self.root = None
def delete(self, key):
"""
Completely removes a node from a red-black search tree regardless of the
number of instances the node possesses.
Parameters:
key: The key of the node you wish to delete from the search tree.
"""
node, parent = self.contains(key)
if node == None:
return
# Case 1: node being deleted is red with no children
if node.is_red and node.left == None and node.right == None:
if parent.left == node:
parent.left = None
else:
parent.right = None
# Case 2: node is black, node has only one child, and the child is red
# As a side note, there is no case for a node possessing only one
# black child as that would not be a valid tree structure.
elif node.left != None and node.left.is_red and node.right == None:
if parent == None:
self.root = node.left
self.root.parent == None
self.root.recolor()
elif parent.left == node:
parent.left = node.left
node.left.parent = parent
parent.left.recolor()
else:
parent.right = node.left
node.left.parent = parent
parent.right.recolor()
elif node.right != None and node.right.is_red and node.left == None:
if parent == None:
self.root = node.right
self.root.parent == None
self.root.recolor()
if parent.right == node:
parent.right = node.right
node.right.parent = parent
parent.right.recolor()
else:
parent.left = node.right
node.right.parent = parent
parent.left.recolor()
# Case 3: node is black
else:
self._case3(node)
if parent != None:
if parent.left == node:
parent.left = None
elif parent.right == node:
parent.right = None
node.delete()
def _case3(self, node):
"""
Case 3 occurs when the node we want to delete is a black node.
Since deleting a black node would alter the requirement to have
the same number of black nodes regardless of the path taken
the tree must be adjusted.
Parameters:
node: The node originating the case 3 deletion.
"""
# node is the root and does not need case 3.
if node.parent == None:
return
parent = node.parent
if parent.right == node:
s = parent.left
else:
s = parent.right
# Case 3.1: node's sibling, s, is red
if s.is_red:
parent.recolor()
s.recolor()
if s == parent.left:
self._right_rotation(parent)
else:
self._left_rotation(parent)
self._case3(node)
else: # node's sibling, s, is black
# Case 3.2: Both children of s are black
if not ((s.left != None and s.left.is_red) and (s.right != None and s.right.is_red)):
s.recolor() # s is now red
if parent.is_red:
parent.recolor()
return
else:
# Defaults to case 3.1 on the next pass-through.
self._case3(node)
# Case 3.3: s's left child is red
elif s.left.is_red and not s.right.is_red:
s.recolor() # s is red
s.left.is_red.recolor() # s.left is black
self._right_rotation(s) # sets us up for case 3.4
# Case 3.4: s's right child is red
if s.right.is_red:
s.right.recolor() # s.right is black
if parent.is_red:
s.parent.recolor() # s.parent is black
self._left_rotation(s.parent)
def traverse(self, node) -> list:
"""
Provides keys in increasing order.
Parameters:
node: The node the in-order traversal will start from.
Returns:
list: A list of the tree's keys in ascending order.
"""
tree = []
if node != None:
tree = self.traverse(node.left)
tree.append(node.key)
tree = tree + self.traverse(node.right)
return tree
def successor(self, key=None) -> rbn.Node:
"""
Computes the next greater value in the search tree. If no successor is found,
the key is a maximum.
Returns:
node.key: The successor node's value.
"""
# Easy Case: If the key in question's right subtree is not empty,
# return the min key in the right subtree.
key_node = self.contains(key)[0]
if key_node != None and key_node.right != None:
return self.min(key_node.right)
# Otherwise, Follow parent pointers of the key in question until you
# get to a key value greater than the original key. If you reach the
# root and have not found a key greater than the original key, then
# there is no successor in the search tree and the original key is
# the maximum key.
succ = key_node
while succ.parent != None:
if succ.parent == self.root and self.root.key < key_node.key:
return None
elif succ.parent.key > key_node.key:
return succ.parent
succ = succ.parent
def predecessor(self, key=None) -> rbn.Node:
"""
Computes the next least value in the search tree. If no predecessor is found,
the key is a minimum.
Returns:
node.key: The predecessor node's value.
"""
# Easy Case: If the key in question's left subtree is not empty, return
# the max key in the left subtree.
key_node = self.contains(key)[0]
if key_node != None and key_node.left != None:
return self.min(key_node.left)
# Otherwise: Follow parent pointers of the key in question until you
# get to a key value less than the original key. If you reach the root
# and have not found a key less than the original key, then there is
# no predecessor in the search tree and the original key is the
# minimum key.
pred = key_node
while pred.parent != None:
if pred.parent == self.root and self.root.key > key_node.key:
return None
elif pred.parent.key < key_node.key:
return pred.parent
pred = pred.parent
def max(self) -> rbn.Node:
"""
Computes the maximum value in the search tree.
Returns:
node.key: The maximum node's value.
"""
max_node = self.root
if max_node != None:
while max_node.right != None:
max_node = max_node.right
return max_node
def min(self, min_node=None) -> rbn.Node:
"""
Computes the minimum value in the search tree.
Returns:
node.key: The minimum node's value.
"""
# Working with predecessor, have the min function take as default a None node parameter
# If the node is none assign it as the root in the min
# Then we can do a min search from the node or continue on
if min_node == None:
min_node = self.root
while min_node != None and min_node.left != None:
min_node = min_node.left
return min_node
def contains(self, v) -> Tuple[rbn.Node, rbn.Node]:
"""
Checks if the given value is in the search tree. It returns the
last node accessed
Parameters:
v: The value you wish to check for.
Returns:
node: The node the search ended on whether a null node or
an actual node.
node: The last node accessed (parent or potential parent).
"""
current_node, parent = self.root, self.root
while current_node != None and v != current_node.key:
parent = current_node
if current_node.key != None and v < current_node.key:
current_node = current_node.left
else:
current_node = current_node.right
if current_node == self.root:
parent = None
return current_node, parent
```
#### File: ProgrammingQuestion6_HeapsSearchTreesAndHashTables/Question2a_HeapMedianMaintenance/heap.py
```python
class Node:
"""Node represents where a value goes in the heap"""
def __init__(self, value):
self.node = value
class Heap:
"""
Heap represents the heap data structure.
Default heap kind is a min-heap, unless 'max' denoted.
"""
def __init__(self, kind="min", *args):
# Array to start the heap data structure.
# Actual nodes start at heap[1] as a heap is a 1-based index.
# The 0 is a placeholder.
self.heap = [0]
self.kind = kind
for node in args:
self.insert(node)
def insert(self, node):
"""Inserts a node into the heap."""
# The first element, whatever it is, gets put in the root node
# position.
if len(self.heap) < 2:
if self.kind == "max":
self.heap.append(-node)
else:
self.heap.append(node)
return
if self.kind == "max":
self.heap.append(-node)
else:
self.heap.append(node)
self.bubble_up(len(self.heap)-1)
return
def bubble_up(self, child_index):
"""
Push a child node up through the heap to maintain the heap property.
"""
parent_index = self.get_parent_index(child_index)
# The node in question is the root node
if parent_index == 0:
return
if self.heap[child_index] < self.heap[parent_index]:
self.heap[child_index], self.heap[parent_index] = \
self.heap[parent_index], self.heap[child_index]
self.bubble_up(parent_index)
return
def extract_root(self):
"""
Extracts and returns the max or min root value of the heap,
dependent on heap type.
Returns True or False if there is a root node to return. That
is, a heap without a root node (0-element heap) has a return
value of a 0 and a False.
"""
# Check if the heap only has the 0-element.
# False return because there is no root element.
if self.length() < 1:
return self.heap[0], False
if self.kind == "max":
root = -self.heap[1]
else:
root = self.heap[1]
# Swap the last index position into the root node
self.heap[1] = self.heap[-1]
# Pare down the list since the last node became the root
del self.heap[-1]
if self.length() > 1:
self.bubble_down(1)
return root, True
def bubble_down(self, parent_index):
"""Moves a misplaced node into its appropriate position."""
min_val = self.heap[parent_index]
min_index = self.get_left_child_index(parent_index)
# The parent node has no children
if min_index == 0:
return
# Find the smaller of the two children
right_child_index = self.get_right_child_index(parent_index)
if right_child_index == 0:
right_child_index = min_index
if self.heap[right_child_index] < self.heap[min_index]:
min_index = right_child_index
if self.heap[min_index] < min_val:
min_val = self.heap[min_index]
if min_val != self.heap[parent_index]:
self.heap[parent_index], self.heap[min_index] = self.heap[min_index], self.heap[parent_index]
self.bubble_down(min_index)
def get_parent_index(self, child_index):
"""
Get the index of the given node's parent given the index
of the child node.
"""
# The root of the tree is at index position 1
# and can have no parent
if child_index == 1:
return False
return child_index // 2
def get_left_child_index(self, parent_index):
"""
Get the index of the left child given the parent
node's index.
"""
# Remember this is a 1-base index.
if parent_index * 2 > self.length():
# There is no left-child
return False
return parent_index * 2
def get_right_child_index(self, parent_index):
"""
Get the index of the right child given the
parent node's index.
"""
# Remember, this is a 1-base index.
if parent_index * 2 + 1 > self.length():
# There is no right child
return False
return parent_index * 2 + 1
def peek(self):
"""
Returns the root of the heap without extracting the root.
Returns True or False if there is a root node to return. That
is, a heap without a root node (0-element heap) has a return
value of a 0 and a False.
"""
# Check if the heap only has the 0-element.
# False return because there is no root element.
if self.length() < 1:
return self.heap[0], False
if self.kind == "max":
root = -self.heap[1]
else:
root = self.heap[1]
return root, True
def length(self):
"""
Returns the length of the 1-based index heap.
"""
return len(self.heap) - 1
```
|
{
"source": "jeffvswanson/LeetCode",
"score": 4
}
|
#### File: 0001_TwoSum/python/two_sum.py
```python
def brute_force(*, nums: list[int], target: int) -> list[int]:
indices = set()
for i, num in enumerate(nums):
start = i + 1
nums2 = nums[start:]
for j, num2 in enumerate(nums2):
if num + num2 == target:
indices.add(i)
indices.add(j + start)
return list(indices)
```
#### File: 0014_LongestCommonPrefix/python/test_solution.py
```python
import pytest
import solution
@pytest.mark.parametrize(
"strs,expected",
[
(["flower", "flow", "flight"], "fl"),
(["dog", "racecar", "car"], ""),
(["flight", "slight", "right"], ""),
],
)
def test_initial_pass(strs, expected):
got = solution.initial_pass(strs)
assert got == expected
@pytest.mark.parametrize(
"strs,expected",
[
(["flower", "flow", "flight"], "fl"),
(["dog", "racecar", "car"], ""),
(["flight", "slight", "right"], ""),
],
)
def test_faster(strs, expected):
got = solution.faster(strs)
assert got == expected
```
#### File: 0019_RemoveNthNodeFromEndOfList/python/test_solution.py
```python
import pytest
import solution
def create_linked_list(raw_list) -> solution.ListNode:
for i, val in enumerate(raw_list):
if i == 0:
node = solution.ListNode(val=val)
head = node
if i + 1 < len(raw_list):
node.next = solution.ListNode(val=raw_list[i+1])
node = node.next
return head
@pytest.mark.parametrize(
"raw_list,n,expected",
[
([1, 2, 3, 4, 5], 2, [1, 2, 3, 5]),
([1], 1, None),
([1, 2], 1, [1]),
]
)
def test_initial_pass(raw_list, n, expected):
head = create_linked_list(raw_list)
got = solution.initial_pass(head, n)
if expected:
expected = create_linked_list(expected)
if got and expected:
while got.val and expected.val:
assert got.val == expected.val
got = got.next
expected = expected.next
if got is None:
assert expected is None
break
else:
assert got is None
```
#### File: 0026_RemoveDuplicatesFromSortedArray/python/test_solution.py
```python
import pytest
import solution
@pytest.mark.parametrize(
"nums,expected_list,expected_k",
[
([1, 1, 2], [1, 2, 0], 2),
([0, 0, 1, 1, 1, 2, 2, 3, 3, 4], [0, 1, 2, 3, 4, 0, 0, 0, 0, 0], 5),
([], [], 0),
([1], [1], 1),
([1, 1], [1, 1], 1),
([1, 2], [1, 2], 2),
([1, 1, 1], [1, 1, 1], 1),
([-1, 0, 0, 0, 0, 3, 3], [-1, 0, 3], 3),
],
)
def test_initial_pass(nums, expected_list, expected_k):
got = solution.initial_pass(nums)
assert got == expected_k
for i in range(expected_k):
assert nums[i] == expected_list[i]
@pytest.mark.parametrize(
"nums,expected_list,expected_k",
[
([1, 1, 2], [1, 2, 0], 2),
([0, 0, 1, 1, 1, 2, 2, 3, 3, 4], [0, 1, 2, 3, 4, 0, 0, 0, 0, 0], 5),
([], [], 0),
([1], [1], 1),
([1, 1], [1, 1], 1),
([1, 2], [1, 2], 2),
([1, 1, 1], [1, 1, 1], 1),
([-1, 0, 0, 0, 0, 3, 3], [-1, 0, 3], 3),
],
)
def test_iterative_pass(nums, expected_list, expected_k):
got = solution.iterative_pass(nums)
assert got == expected_k
for i in range(expected_k):
assert nums[i] == expected_list[i]
```
#### File: 0070_ClimbingStairs/python/solution.py
```python
def initial_pass(n: int) -> int:
pass
```
#### File: 0077_Combinations/python/solution.py
```python
def initial_pass(n: int, k: int) -> list[list[int]]:
from itertools import combinations
return list(combinations(range(1, n+1), k))
```
#### File: 0104_MaximumDepthOfBinaryTree/python/test_solution.py
```python
from typing import Optional
import pytest
from solution import Solution, TreeNode
def build_tree(tree: list[int]) -> Optional[TreeNode]:
tree = [TreeNode(val=v) for v in tree]
tree_size = len(tree)
for i, parent in enumerate(tree):
left_child = 2*i +1
right_child = 2*i + 2
if left_child < tree_size:
parent.left = tree[left_child] if tree[left_child] else None
if right_child < tree_size:
parent.right = tree[right_child] if tree[right_child] else None
if tree:
return tree[0]
else:
return None
test_cases = [
([3, 9, 20, None, None, 15, 7], 3),
([1, None, 2], 2),
([], 0),
]
@pytest.mark.parametrize("tree,expected", test_cases)
def test_maxDepth(tree, expected):
root = build_tree(tree)
got = Solution().maxDepth(root)
assert got == expected
@pytest.mark.parametrize("tree,expected", test_cases)
def test_bfs_traversal(tree, expected):
root = build_tree(tree)
got = Solution().bfs_traversal(root)
assert got == expected
```
#### File: 0116_PopulatingNextRightPointersInEachNode/python/solution.py
```python
class Node:
def __init__(self, val: int=0, left: 'Node'=None, right: 'Node'=None, next: 'Node'=None):
self.val = val
self.left = left
self.right = right
self.next = next
def first_pass(root: Node) -> Node:
if not root:
return None
node = root
while node and node.left:
left = node.left
while node:
node.left.next = node.right
node.right.next = node.next.left if node.next else None
node = node.next
node = left
return root
```
#### File: 0189_RotateArray/python/test_solution.py
```python
import pytest
import solution
@pytest.mark.parametrize(
"nums,k,expected",
[
([1, 2, 3, 4, 5, 6, 7], 3, [5, 6, 7, 1, 2, 3, 4]),
([-1, -100, 3, 99], 2, [3, 99, -1, -100]),
([5, 4, 3, 2, 1], 0, [5, 4, 3, 2, 1]),
([1, 2, 3, 4], 6, [3, 4, 1, 2]),
([1], 4, [1]),
],
)
def test_initial_pass(nums, k, expected):
solution.initial_pass(nums, k)
assert nums == expected
```
#### File: 0278_FirstBadVersion/python/test_solution.py
```python
import pytest
import solution
@pytest.mark.parametrize("num_versions,bad_version", [(10, 1), (10, 8), (1, 1), (10, 10), (10, 5)])
def test_initial_pass(num_versions, bad_version):
got = solution.initial_pass(num_versions, bad_version)
assert got == bad_version
@pytest.mark.parametrize("num_versions,bad_version", [(10, 1), (10, 8), (1, 1), (10, 10), (10, 5)])
def test_optimized_pass(num_versions, bad_version):
got = solution.optimized_pass(num_versions, bad_version)
assert got == bad_version
```
#### File: 0382_LinkedListRandomNode/python/solution.py
```python
import random
from typing import Optional
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
"""
Your Solution object will be instantiated and called as such:
obj = Solution(head)
param_1 = obj.getRandom()
"""
def __init__(self, head: Optional[ListNode]):
node_vals = []
node = head
while node:
node_vals.append(node.val)
node = node.next
self.list_len = len(node_vals)
self.node_vals = node_vals
def getRandom(self) -> int:
return self.node_vals[random.randint(0, self.list_len - 1)]
```
#### File: 0542_01Matrix/python/test_solution.py
```python
import pytest
import solution
@pytest.mark.parametrize(
"mat,expected",
[
([[0, 0, 0], [0, 1, 0], [0, 0, 0]], [[0, 0, 0], [0, 1, 0], [0, 0, 0]]),
([[0, 0, 0], [0, 1, 0], [1, 1, 1]], [[0, 0, 0], [0, 1, 0], [1, 2, 1]]),
],
)
def test_first_pass(mat, expected):
got = solution.first_pass(mat)
assert got == expected
```
#### File: 0557_ReverseWordsInAString3/python/test_solution.py
```python
import pytest
import solution
@pytest.mark.parametrize(
"s,expected",
[
("Let's take LeetCode contest", "s'teL ekat edoCteeL tsetnoc"),
("God Ding", "doG gniD"),
("h", "h"),
],
)
def test_initial_solution(s, expected):
got = solution.initial_solution(s)
assert got == expected
@pytest.mark.parametrize(
"s,expected",
[
("Let's take LeetCode contest", "s'teL ekat edoCteeL tsetnoc"),
("God Ding", "doG gniD"),
("h", "h"),
],
)
def test_faster_solution(s, expected):
got = solution.faster_solution(s)
assert got == expected
```
#### File: 0567_PermutationInString/python/test_solution.py
```python
import pytest
import solution
@pytest.mark.parametrize(
"s1,s2,expected",
[
("ab", "eidbaooo", True),
("ab", "eidboaoo", False),
("a", "b", False),
("a", "a", True),
("abba", "yabbadabba", True),
("abba", "baab", True),
("abba", "zabbzbabzabzab", False),
("abba", "ab", False),
]
)
def test_initial_pass(s1, s2, expected):
# breakpoint()
got = solution.initial_pass(s1, s2)
assert got is expected
```
#### File: 0617_MergeTwoBinaryTrees/python/test_solution.py
```python
import pytest
import solution
def root_1a() -> solution.TreeNode:
root = solution.TreeNode(val=1)
root.left = solution.TreeNode(val=3)
root.left.left = solution.TreeNode(val=5)
root.right = solution.TreeNode(val=2)
return root
def root_1b() -> solution.TreeNode:
root = solution.TreeNode(val=2)
root.left = solution.TreeNode(val=1)
root.left.right = solution.TreeNode(val=4)\
root.right = solution.TreeNode(val=3)
root.right.right = solution.TreeNode(val=7)
return root
def expected_1() -> solution.TreeNode:
root = solution.TreeNode(val=3)
root.left = solution.TreeNode(val=4)
root.left.left = solution.TreeNode(val=5)
root.left.right = solution.TreeNode(val=4)
root.right = solution.TreeNode(val=5)
root.right.right = solution.TreeNode(val=7)
return root
def root_2a() -> solution.TreeNode:
root = solution.TreeNode(val=1)
return root
def root_2b() -> solution.TreeNode:
root = solution.TreeNode(val=1)
root.left = solution.TreeNode(val=2)
return root
def expected_2() -> solution.TreeNode:
root = solution.TreeNode(val=2)
root.left = solution.TreeNode(val=2)
return root
@pytest.mark.parametrize(
"root1,root2,expected",
[
(root_1a(), root_1b(), expected_1()),
(root_2a(), root_2b(), expected_2()),
],
)
def test_first_pass(root1, root2, expected):
got = solution.first_pass(root1, root2)
assert got.val == expected.val
```
#### File: 0695_MaxAreaOfIsland/python/test_solution.py
```python
import pytest
import solution
@pytest.mark.parametrize(
"grid,expected",
[
(
[
[0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
],
6,
),
([[0, 0, 0, 0, 0, 0, 0, 0]], 0),
([[1]], 1),
],
)
def test_initial_pass(grid, expected):
got = solution.initial_pass(grid)
assert got == expected
```
#### File: 0733_FloodFill/python/test_solution.py
```python
import pytest
import solution
@pytest.mark.parametrize(
"image,starting_row_index,starting_column_index,new_color,expected",
[
([[1, 1, 1], [1, 1, 0], [1, 0, 1]], 1, 1, 2, [[2, 2, 2], [2, 2, 0], [2, 0, 1]]),
([[0, 0, 0], [0, 0, 0]], 0, 0, 2, [[2, 2, 2], [2, 2, 2]]),
([[1]], 0, 0, 2, [[2]]),
([[1, 1], [1, 0]], 0, 0, 1, [[1, 1], [1, 0]]),
],
)
def test_initial_solution(image, starting_row_index, starting_column_index, expected, new_color):
got = solution.initial_pass(image, starting_row_index, starting_column_index, new_color)
assert got == expected
```
#### File: 0876_MiddleOfTheLinkedList/python/test_solution.py
```python
import pytest
import solution
def create_linked_list(raw_list: list) -> solution.ListNode:
for i, val in enumerate(raw_list):
if not i:
node = solution.ListNode(val=val)
head = node
if i + 1 < len(raw_list):
node.next = solution.ListNode(raw_list[i+1])
node = node.next
return head
@pytest.mark.parametrize(
"raw_list,expected",
[
([1,2,3,4,5],[3,4,5]),
([1,2,3,4,5,6],[4,5,6]),
],
)
def test_initial_pass(raw_list,expected):
head = create_linked_list(raw_list)
got = solution.initial_pass(head)
expected = create_linked_list(expected)
while got.next and expected.next:
assert got.val == expected.val
got = got.next
expected = expected.next
@pytest.mark.parametrize(
"raw_list,expected",
[
([1,2,3,4,5],[3,4,5]),
([1,2,3,4,5,6],[4,5,6]),
],
)
def test_faster_solution(raw_list,expected):
head = create_linked_list(raw_list)
got = solution.faster_solution(head)
expected = create_linked_list(expected)
while got.next and expected.next:
assert got.val == expected.val
got = got.next
expected = expected.next
```
#### File: 1306_JumpGameIII/python/solution.py
```python
def initial_pass(arr: list[int], start: int) -> bool:
if 0 not in arr:
return False
elif start < 0 or start >= len(arr):
return False
elif arr[start] == 0:
return True
# We've visited the position
elif arr[start] < 0:
return False
# Add or subtract the value at the index position to see if we reach the index
# position containing a 0.
# Continue breaking down the problem looking for values that sum to a possible value
# Prevent infinite recursion: If the addition reaches an index with the same value
# as the recently added value, then the subtraction attempt will point back to the
# index we just came from. This will also occur with the subtraction attempt.
# Solution: Keep track of the visited index positions.
val_at_start = arr[start]
arr[start] = -1
add_attempt = initial_pass(arr, start + val_at_start)
subtract_attempt = initial_pass(arr, start - val_at_start)
return add_attempt or subtract_attempt
```
#### File: 1446_ConsecutiveCharacters/python/test_solution.py
```python
import pytest
import solution
test_cases = [
("leetcode", 2),
("abbcccddddeeeeedcba", 5),
("triplepillooooow", 5),
("hooraaaaaaaaaaay", 11),
("tourist", 1),
("cc", 2),
]
@pytest.mark.parametrize("s,expected", test_cases)
def test_initial_pass(s, expected):
got = solution.initial_pass(s)
assert got == expected
@pytest.mark.parametrize("s,expected", test_cases)
def test_optimized_pass(s, expected):
got = solution.optimized_pass(s)
assert got == expected
```
|
{
"source": "Jeffwan/aws-eks-deep-learning-benchmark",
"score": 2
}
|
#### File: benchmark/test/install_addon.py
```python
import argparse
import logging
import yaml
import datetime
import time
import urllib
import os
from kubernetes import client as k8s_client
from kubernetes.client import rest
from benchmark.test import deploy_utils
from kubeflow.testing import util
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--namespace", default='default', type=str, help=("The namespace to use."))
parser.add_argument(
"--base_dir",
default=None,
type=str,
help=("The source directory of all repositories."))
parser.add_argument(
"--github_secret_name",
default="github-token",
type=str,
help=("The github token to be created."))
parser.add_argument(
"--kubeflow_registry",
default="github.com/jeffwan/kubeflow/tree/master/kubeflow",
type=str,
help=("The github token to be created."))
parser.add_argument(
"--kubebench_registry",
default="github.com/kubeflow/kubebench/tree/master/kubebench",
type=str,
help=("The github token to be created."))
args, _ = parser.parse_known_args()
return args
def install_gpu_drivers(api_client):
"""Install GPU drivers on the cluster.
Return:
ds: Daemonset for the GPU installer
"""
logging.info("Install GPU Drivers.")
# Fetch the daemonset to install the drivers.
link = "https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/v1.11/nvidia-device-plugin.yml" # pylint: disable=line-too-long
logging.info("Using daemonset file: %s", link)
f = urllib.urlopen(link)
daemonset_spec = yaml.load(f)
ext_client = k8s_client.ExtensionsV1beta1Api(api_client)
try:
namespace = daemonset_spec["metadata"]["namespace"]
ext_client.create_namespaced_daemon_set(namespace, daemonset_spec)
except rest.ApiException as e:
# Status appears to be a string.
if e.status == 409:
logging.info("GPU driver daemon set has already been installed")
else:
raise
def wait_for_gpu_driver_install(api_client,
timeout=datetime.timedelta(minutes=10)):
"""Wait until some nodes are available with GPUs."""
end_time = datetime.datetime.now() + timeout
api = k8s_client.CoreV1Api(api_client)
while datetime.datetime.now() <= end_time:
nodes = api.list_node()
for n in nodes.items:
if n.status.capacity.get("nvidia.com/gpu", 0) > 0:
logging.info("GPUs are available.")
return
logging.info("Waiting for GPUs to be ready.")
time.sleep(15)
logging.error("Timeout waiting for GPU nodes to be ready.")
raise TimeoutError("Timeout waiting for GPU nodes to be ready.")
def install_kubeflow(api_client, app_dir, namespace):
"""Deploy required kubeflow packages to run benchmark"""
util.run(["ks", "generate", "argo", "argo"], cwd=app_dir)
util.run(["ks", "generate", "tf-job-operator", "tf-job-operator"], cwd=app_dir)
util.run(["ks", "generate", "mpi-operator", "mpi-operator"], cwd=app_dir)
if namespace != 'default':
cmd = "ks param set tf-job-operator namespace " + namespace
util.run(cmd.split(), cwd=app_dir)
cmd = "ks param set mpi-operator namespace " + namespace
util.run(cmd.split(), cwd=app_dir)
cmd = "ks param set argo namespace " + namespace
util.run(cmd.split(), cwd=app_dir)
util.run(cmd.split(), cwd=app_dir)
apply_command = ["ks", "apply", "default", "-c", "argo",
"-c", "tf-job-operator", "-c", "mpi-operator"]
util.run(apply_command, cwd=app_dir)
def wait_for_kubeflow_install(api_client, namespace):
"""Wait until kubeflow components are up."""
# Verify that the Argo operator is deployed.
argo_deployment_name = "workflow-controller"
logging.info("Verifying Argo controller started.")
util.wait_for_deployment(api_client, namespace, argo_deployment_name)
# Verify that the TfJob operator is actually deployed.
tf_job_deployment_name = "tf-job-operator"
logging.info("Verifying TfJob controller started.")
util.wait_for_deployment(api_client, namespace, tf_job_deployment_name)
# Verify that the Argo operator is deployed.
mpi_job_deployment_name = "mpi-operator"
logging.info("Verifying MPIJob controller started.")
util.wait_for_deployment(api_client, namespace, mpi_job_deployment_name)
def install_kubebench_nfs(api_client, app_dir, namespace):
"""Deploy required kubeflow packages to run benchmark"""
util.run(["ks", "pkg", "install", "kubebench/kubebench-quickstarter"], cwd=app_dir)
util.run(["ks", "generate", "kubebench-quickstarter-service", "kubebench-quickstarter-service"], cwd=app_dir)
util.run(["ks", "generate", "kubebench-quickstarter-volume", "kubebench-quickstarter-volume"], cwd=app_dir)
util.run(["ks", "param", "set", "kubebench-quickstarter-service", "namespace", namespace], cwd=app_dir)
util.run(["ks", "param", "set", "kubebench-quickstarter-volume", "namespace", namespace], cwd=app_dir)
apply_command = ["ks", "apply", "default", "-c", "kubebench-quickstarter-service"]
util.run(apply_command, cwd=app_dir)
kubebench_nfs_deployment_name = "kubebench-nfs-deploy"
kubebench_nfs_service_name = "kubebench-nfs-svc"
logging.info("Verifying NFS deployment started")
util.wait_for_deployment(api_client, namespace, kubebench_nfs_deployment_name)
service = get_k8s_service(api_client, namespace, kubebench_nfs_service_name)
util.run(["ks", "param", "set", "kubebench-quickstarter-volume", "nfsServiceIP", service.spec.cluster_ip], cwd=app_dir)
apply_command = ["ks", "apply", "default", "-c", "kubebench-quickstarter-volume"]
util.run(apply_command, cwd=app_dir)
def get_k8s_service(api_client, namespace, service_name):
"""Get service cluster IP.
Args:
api_client: K8s api client to use.
namespace: The name space for the service.
name: The name of the service.
Returns:
service: The deploy object describing the service.
Raises:
TimeoutError: If timeout waiting for service to be ready.
"""
end_time = datetime.datetime.now() + datetime.timedelta(minutes=1)
api_client = k8s_client.CoreV1Api(api_client)
while datetime.datetime.now() <= end_time:
service = api_client.read_namespaced_service(service_name, namespace)
if not service.spec or not service.spec.cluster_ip:
logging.info("Waiting for service to be ready.")
time.sleep(15)
continue
logging.info("Service %s is available.", service_name)
return service
logging.error("Timeout waiting for service %s to be ready.", service_name)
raise TimeoutError("Timeout waiting for service %s to be ready.", service_name)
def install_addon():
"""Install Benchmark Addons."""
logging.basicConfig(level=logging.INFO,
format=('%(asctime)s %(name)-12s %(levelname)-8s %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
args = parse_args()
namespace = args.namespace
base_dir = args.base_dir
kubeconfig_path = str(os.environ['KUBECONFIG'])
api_client = deploy_utils.create_k8s_client(kubeconfig_path)
# Setup GPU Device Plugin
install_gpu_drivers(api_client)
wait_for_gpu_driver_install(api_client)
# Setup ksonnet application
app_dir = deploy_utils.setup_ks_app(base_dir, namespace, api_client, args.kubeflow_registry, args.kubebench_registry)
# Deploy Kubeflow
install_kubeflow(api_client, app_dir, namespace)
wait_for_kubeflow_install(api_client, namespace)
# change the namespace to default to set up nfs-volume and nfs-server
namespace = "default"
# Deploy NFS for kubebench
install_kubebench_nfs(api_client, app_dir, namespace)
# Deploy Github Secret
github_token = str(os.environ['GITHUB_TOKEN'])
install_github_secret(api_client, namespace, args.github_secret_name, github_token)
def install_github_secret(api_client, namespace, secret_name, github_token):
"""Install Github secret on the cluster.
Return:
secret: Secret for Github token
"""
logging.info("Install Github secret.")
corev1_api = k8s_client.CoreV1Api(api_client)
try:
secret = client.V1Secret()
secret.metadata = client.V1ObjectMeta(name=secret_name)
secret.type = "Opaque"
secret.data = {"GITHUB_TOKEN": github_token}
corev1_api.create_namespaced_secret(namespace, secret)
except rest.ApiException as e:
# Status appears to be a string.
if e.status == 409:
logging.info("GPU driver daemon set has already been installed")
else:
raise
if __name__ == "__main__":
install_addon()
```
|
{
"source": "Jeffwan/tfx",
"score": 2
}
|
#### File: orchestration/portable/runtime_parameter_utils.py
```python
from typing import cast, Mapping, Optional
from google.protobuf import descriptor
from google.protobuf import message
from tfx import types
from tfx.orchestration.portable.mlmd import common_utils
from tfx.proto.orchestration import pipeline_pb2
def _is_type_match(v_type, v):
if isinstance(v, int):
return v_type == pipeline_pb2.RuntimeParameter.INT
elif isinstance(v, float):
return v_type == pipeline_pb2.RuntimeParameter.DOUBLE
elif isinstance(v, str):
return v_type == pipeline_pb2.RuntimeParameter.STRING
else:
raise RuntimeError('Unexpected binding value type: %s' % type(v))
def _get_runtime_parameter_value(
runtime_parameter: pipeline_pb2.RuntimeParameter,
parameter_bindings: Mapping[str,
types.Property]) -> Optional[types.Property]:
"""Populates the value for a RuntimeParameter when possible.
If external parameter bindings not found, try to use the default value.
Args:
runtime_parameter: RuntimeParameter as the template.
parameter_bindings: Parameter bindings to substitute runtime parameter
placeholders in the RuntimeParameter.
Returns:
Resolved value for the RuntimeParameter if available. Returns None if the
RuntimeParameter cannot be resolved.
Raises:
RuntimeError: When the provided binding value type does not match the
RuntimeParameter type requirement.
"""
# If no external parameter bindings for this runtime parameter, try to use its
# default value.
if runtime_parameter.name not in parameter_bindings:
if runtime_parameter.HasField('default_value'):
default_value = getattr(
runtime_parameter.default_value,
runtime_parameter.default_value.WhichOneof('value'))
if _is_type_match(runtime_parameter.type, default_value):
return default_value
else:
raise RuntimeError('Runtime parameter type %s does not match with %s.' %
(type(default_value), runtime_parameter))
else:
return None
# External parameter binding is found, try to use it.
binding_value = parameter_bindings[runtime_parameter.name]
if _is_type_match(runtime_parameter.type, binding_value):
return binding_value
else:
raise RuntimeError('Runtime parameter type %s does not match with %s.' %
(type(binding_value), runtime_parameter))
def _get_structural_runtime_parameter_value(
structural_runtime_parameter: pipeline_pb2.StructuralRuntimeParameter,
parameter_bindings: Mapping[str, types.Property]) -> Optional[str]:
"""Populates the value for a StructuralRuntimeParameter when possible.
Only populates the value when all parts in the structural runtime parameter
are resolved to values.
Args:
structural_runtime_parameter: The StructuralRuntimeParameter message as the
template.
parameter_bindings: Parameter bindings to substitute runtime parameter
placeholders in the StructuralRuntimeParameter.
Returns:
A string if all parts are resolved. Returns None otherwise.
"""
parts = []
for part in structural_runtime_parameter.parts:
if part.WhichOneof('value') == 'constant_value':
parts.append(part.constant_value)
else:
part_value = _get_runtime_parameter_value(part.runtime_parameter,
parameter_bindings)
if part_value is None:
return None
parts.append(part_value)
# If we reach here, all parts are resolved to strings, concatenates them
# together and use that as the final value.
return ''.join(parts)
def substitute_runtime_parameter(
msg: message.Message, parameter_bindings: Mapping[str,
types.Property]) -> None:
"""Utility function to substitute runtime parameter placeholders with values.
Args:
msg: The original message to change. Only messages defined under
pipeline_pb2 will be supported. Other types will result in no-op.
parameter_bindings: A dict of parameter keys to parameter values that will
be used to substitute the runtime parameter placeholder.
Returns:
None
"""
# If the message is a pipeline_pb2.Value instance, try to find an substitute
# with runtime parameter bindings.
if isinstance(msg, pipeline_pb2.Value):
value = cast(pipeline_pb2.Value, msg)
which = value.WhichOneof('value')
if which == 'runtime_parameter':
real_value = _get_runtime_parameter_value(value.runtime_parameter,
parameter_bindings)
if real_value is None:
return
value.Clear()
common_utils.set_metadata_value(
metadata_value=value.field_value, value=real_value)
if which == 'structural_runtime_parameter':
real_value = _get_structural_runtime_parameter_value(
value.structural_runtime_parameter, parameter_bindings)
if real_value is None:
return
value.Clear()
common_utils.set_metadata_value(
metadata_value=value.field_value, value=real_value)
return
# For other cases, recursively call into sub-messages if any.
fields = msg.ListFields()
for field, sub_message in fields:
# No-op for non-message types.
if field.type != descriptor.FieldDescriptor.TYPE_MESSAGE:
continue
# Evaluates every map values in a map.
elif (field.message_type.has_options and
field.message_type.GetOptions().map_entry):
for key in sub_message:
substitute_runtime_parameter(sub_message[key], parameter_bindings)
# Evaluates every entry in a list.
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for element in sub_message:
substitute_runtime_parameter(element, parameter_bindings)
# Evaluates sub-message.
else:
substitute_runtime_parameter(sub_message, parameter_bindings)
```
|
{
"source": "jeffwdoak/pycalphad",
"score": 3
}
|
#### File: pycalphad/core/lower_convex_hull.py
```python
from __future__ import print_function
from pycalphad.core.cartesian import cartesian
from pycalphad.core.constants import MIN_SITE_FRACTION
from .hyperplane import hyperplane
import numpy as np
# The energetic difference, in J/mol-atom, below which is considered 'zero'
DRIVING_FORCE_TOLERANCE = 1e-8
def lower_convex_hull(global_grid, result_array):
"""
Find the simplices on the lower convex hull satisfying the specified
conditions in the result array.
Parameters
----------
global_grid : Dataset
A sample of the energy surface of the system.
result_array : Dataset
This object will be modified!
Coordinates correspond to conditions axes.
Returns
-------
None. Results are written to result_array.
Notes
-----
This routine will not check if any simplex is degenerate.
Degenerate simplices will manifest with duplicate or NaN indices.
Examples
--------
None yet.
"""
indep_conds = sorted([x for x in sorted(result_array.coords.keys()) if x in ['T', 'P']])
comp_conds = sorted([x for x in sorted(result_array.coords.keys()) if x.startswith('X_')])
pot_conds = sorted([x for x in sorted(result_array.coords.keys()) if x.startswith('MU_')])
# Determine starting combinations of chemical potentials and compositions
# TODO: Check Gibbs phase rule compliance
if len(pot_conds) > 0:
raise NotImplementedError('Chemical potential conditions are not yet supported')
# FIRST CASE: Only composition conditions specified
# We only need to compute the dependent composition value directly
# Initialize trial points as lowest energy point in the system
if (len(comp_conds) > 0) and (len(pot_conds) == 0):
comp_values = cartesian([result_array.coords[cond] for cond in comp_conds])
# Insert dependent composition value
# TODO: Handle W(comp) as well as X(comp) here
specified_components = {x[2:] for x in comp_conds}
dependent_component = set(result_array.coords['component'].values) - specified_components
dependent_component = list(dependent_component)
if len(dependent_component) != 1:
raise ValueError('Number of dependent components is different from one')
insert_idx = sorted(result_array.coords['component'].values).index(dependent_component[0])
comp_values = np.concatenate((comp_values[..., :insert_idx],
1 - np.sum(comp_values, keepdims=True, axis=-1),
comp_values[..., insert_idx:]),
axis=-1)
# Prevent compositions near an edge from going negative
comp_values[np.nonzero(comp_values < MIN_SITE_FRACTION)] = MIN_SITE_FRACTION*10
# TODO: Assumes N=1
comp_values /= comp_values.sum(axis=-1, keepdims=True)
#print(comp_values)
# SECOND CASE: Only chemical potential conditions specified
# TODO: Implementation of chemical potential
# THIRD CASE: Mixture of composition and chemical potential conditions
# TODO: Implementation of mixed conditions
# factored out via profiling
result_array_GM_values = result_array.GM.values
result_array_points_values = result_array.points.values
result_array_MU_values = result_array.MU.values
result_array_NP_values = result_array.NP.values
result_array_X_values = result_array.X.values
result_array_Y_values = result_array.Y.values
result_array_Phase_values = result_array.Phase.values
global_grid_GM_values = global_grid.GM.values
global_grid_X_values = global_grid.X.values
it = np.nditer(result_array_GM_values, flags=['multi_index'])
comp_coord_shape = tuple(len(result_array.coords[cond]) for cond in comp_conds)
while not it.finished:
indep_idx = it.multi_index[:len(indep_conds)]
if len(comp_conds) > 0:
comp_idx = np.ravel_multi_index(it.multi_index[len(indep_conds):], comp_coord_shape)
idx_comp_values = comp_values[comp_idx]
else:
idx_comp_values = np.atleast_1d(1.)
idx_global_grid_X_values = global_grid_X_values[indep_idx]
idx_global_grid_GM_values = global_grid_GM_values[indep_idx]
idx_result_array_MU_values = result_array_MU_values[it.multi_index]
idx_result_array_NP_values = result_array_NP_values[it.multi_index]
idx_result_array_GM_values = result_array_GM_values[it.multi_index]
idx_result_array_points_values = result_array_points_values[it.multi_index]
result_array_GM_values[it.multi_index] = \
hyperplane(idx_global_grid_X_values, idx_global_grid_GM_values,
idx_comp_values, idx_result_array_MU_values,
idx_result_array_NP_values, idx_result_array_points_values)
# Copy phase values out
points = result_array_points_values[it.multi_index]
result_array_Phase_values[it.multi_index] = global_grid.Phase.values[indep_idx].take(points, axis=0)
result_array_X_values[it.multi_index] = global_grid.X.values[indep_idx].take(points, axis=0)
result_array_Y_values[it.multi_index] = global_grid.Y.values[indep_idx].take(points, axis=0)
# Special case: Sometimes fictitious points slip into the result
# This can happen when we calculate stoichimetric phases by themselves
if '_FAKE_' in result_array_Phase_values[it.multi_index]:
# Chemical potentials are meaningless in this case
idx_result_array_MU_values[...] = 0
new_energy = 0.
molesum = 0.
for idx in range(len(result_array_Phase_values[it.multi_index])):
midx = it.multi_index + (idx,)
if result_array_Phase_values[midx] == '_FAKE_':
result_array_Phase_values[midx] = ''
result_array_X_values[midx] = np.nan
result_array_Y_values[midx] = np.nan
idx_result_array_NP_values[idx] = np.nan
else:
new_energy += idx_result_array_NP_values[idx] * global_grid.GM.values[np.index_exp[indep_idx + (points[idx],)]]
molesum += idx_result_array_NP_values[idx]
result_array_GM_values[it.multi_index] = new_energy / molesum
it.iternext()
del result_array['points']
return result_array
```
|
{
"source": "jeffwecan/hvac",
"score": 3
}
|
#### File: api/auth_methods/cert.py
```python
from hvac.api.vault_api_base import VaultApiBase
from hvac.utils import validate_pem_format
from hvac import exceptions, utils
import os
class Cert(VaultApiBase):
"""Cert Auth Method (API).
Reference: https://www.vaultproject.io/api/auth/cert/index.html
"""
def create_ca_certificate_role(self, name, certificate, allowed_common_names="", allowed_dns_sans="", allowed_email_sans="",
allowed_uri_sans="", allowed_organizational_units="", required_extensions="", display_name="", token_ttl=0,
token_max_ttl=0, token_policies=[], token_bound_cidrs=[], token_explicit_max_ttl=0,
token_no_default_policy=False, token_num_uses=0, token_period=0, token_type="", mount_point="cert"):
"""
Create CA Certificate Role
Sets a CA cert and associated parameters in a role name.
Supported methods:
POST: /auth/<mount point>/certs/:name. Produces: 204 (empty body)
»Parameters
:param name: The name of the certificate role.
:type name: str
:param certificate: The PEM-format CA certificate.
:type certificate: str
:param allowed_common_names: Constrain the Common Names in the client certificate with a globbed pattern. Value is a comma-separated list of patterns. Authentication requires at least one Name matching at least one pattern. If not set, defaults to allowing all names.
:type allowed_common_names: str | list
:param allowed_dns_sans: Constrain the Alternative Names in the client certificate with a globbed pattern. Value is a comma-separated list of patterns. Authentication requires at least one DNS matching at least one pattern. If not set, defaults to allowing all dns.
:type allowed_dns_sans: str | list
:param allowed_email_sans: Constrain the Alternative Names in the client certificate with a globbed pattern. Value is a comma-separated list of patterns. Authentication requires at least one Email matching at least one pattern. If not set, defaults to allowing all emails.
:type allowed_email_sans: str | list
:param allowed_uri_sans: Constrain the Alternative Names in the client certificate with a globbed pattern. Value is a comma-separated list of URI patterns. Authentication requires at least one URI matching at least one pattern. If not set, defaults to allowing all URIs.
:type allowed_uri_sans: str | list
:param allowed_organizational_units: Constrain the Organizational Units (OU) in the client certificate with a globbed pattern. Value is a comma-separated list of OU patterns. Authentication requires at least one OU matching at least one pattern. If not set, defaults to allowing all OUs.
:type allowed_organizational_units: str | list
:param required_extensions: Require specific Custom Extension OIDs to exist and match the pattern. Value is a comma separated string or array of oid:value. Expects the extension value to be some type of ASN1 encoded string. All conditions must be met. Supports globbing on value.
:type required_extensions: str | list
:param display_name: The display_name to set on tokens issued when authenticating against this CA certificate. If not set, defaults to the name of the role.
:type display_name: str | unicode
:param token_ttl: The incremental lifetime for generated tokens. This current value of this will be referenced at renewal time.
:type token_ttl: int | str
:param token_max_ttl: The maximum lifetime for generated tokens. This current value of this will be referenced at renewal time.
:type token_max_ttl: int | str
:param token_policies: List of policies to encode onto generated tokens. Depending on the auth method, this list may be supplemented by user/group/other values.
:type token_policies: list | str
:param token_bound_cidrs: List of CIDR blocks; if set, specifies blocks of IP addresses which can authenticate successfully, and ties the resulting token to these blocks as well.
:type token_bound_cidrs: list | str
:param token_explicit_max_ttl: If set, will encode an explicit max TTL onto the token. This is a hard cap even if token_ttl and token_max_ttl would otherwise allow a renewal.
:type token_explicit_max_ttl: int | str
:param token_no_default_policy: If set, the default policy will not be set on generated tokens; otherwise it will be added to the policies set in token_policies.
:type token_no_default_policy: bool
:param token_num_uses: The maximum number of times a generated token may be used (within its lifetime); 0 means unlimited. If you require the token to have the ability to create child tokens, you will need to set this value to 0.
:type token_num_uses: int
:param token_period: The period, if any, to set on the token.
:type token_period: int | str
:param token_type: The type of token that should be generated. Can be service, batch, or default to use the mount's tuned default (which unless changed will be service tokens). For token store roles, there are two additional possibilities: default-service and default-batch which specify the type to return unless the client requests a different type at generation time.
:type token_type: str
:param mount_point:
:type mount_point:
"""
try:
with open(certificate, 'r') as f_cert:
cert = f_cert.read()
except FileNotFoundError:
cert = certificate
params = utils.remove_nones(
{
"name": name,
"certificate": cert,
"allowed_common_names": allowed_common_names,
"allowed_dns_sans": allowed_dns_sans,
"allowed_email_sans": allowed_email_sans,
"allowed_uri_sans": allowed_uri_sans,
"allowed_organizational_units": allowed_organizational_units,
"required_extensions": required_extensions,
"display_name": display_name,
"token_ttl": token_ttl,
"token_max_ttl": token_max_ttl,
"token_policies": token_policies,
"token_bound_cidrs": token_bound_cidrs,
"token_explicit_max_ttl": token_explicit_max_ttl,
"token_no_default_policy": token_no_default_policy,
"token_num_uses": token_num_uses,
"token_period": token_period,
"token_type": token_type,
}
)
api_path = '/v1/auth/{mount_point}/certs/{name}'.format(mount_point=mount_point, name=name)
return self._adapter.post(
url=api_path,
json=params,
)
def read_ca_certificate_role(self, name, mount_point='cert'):
"""
Gets information associated with the named role.
Supported methods:
GET: /auth/<mount point>/certs/{name}. Produces: 200 application/json
:param name: The name of the certificate role
:type name: str | unicode
:param mount_point:
:type mount_point:
:return: The JSON response of the read_ca_certificate_role request.
:rtype: dict
"""
params = {
'name': name,
}
api_path = '/v1/auth/{mount_point}/certs/{name}'.format(mount_point=mount_point, name=name)
return self._adapter.get(
url=api_path,
json=params,
)
def list_certificate_roles(self, mount_point='cert'):
"""
Lists configured certificate names.
Supported methods:
LIST: /auth/<mount point>/certs. Produces: 200 application/json
:param mount_point:
:type mount_point:
:return: The response of the list_certificate request.
:rtype: requests.Response
"""
api_path = '/v1/auth/{mount_point}/certs'.format(mount_point=mount_point)
return self._adapter.list(
url=api_path
)
def delete_certificate_role(self, name, mount_point='cert'):
"""
List existing LDAP existing groups that have been created in this auth method.
Supported methods:
DELETE: /auth/{mount_point}/groups. Produces: 204 (empty body)
:param name: The name of the certificate role.
:type name: str | unicode
:param mount_point:
:type mount_point:
"""
api_path = '/v1/auth/{mount_point}/certs/{name}'.format(mount_point=mount_point, name=name)
return self._adapter.delete(
url=api_path,
)
def configure_tls_certificate(self, mount_point='cert', disable_binding=False):
"""
Configure options for the method.
Supported methods:
POST: /auth/<mount point>/config. Produces: 204 (empty body)
:param disable_binding: If set, during renewal, skips the matching of presented client identity with the client
identity used during login.
:type disable_binding: bool
:param mount_point:
:type mount_point:
"""
params = {
'disable_binding': disable_binding,
}
api_path = '/v1/auth/{mount_point}/config'.format(mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
)
def login(self, name="", cacert=False, cert_pem="", key_pem="", mount_point='cert', use_token=True):
"""
Log in and fetch a token. If there is a valid chain to a CA configured in the method and all role constraints
are matched, a token will be issued. If the certificate has DNS SANs in it, each of those will be verified.
If Common Name is required to be verified, then it should be a fully qualified DNS domain name and must be
duplicated as a DNS SAN
Supported methods:
POST: /auth/<mount point>/login Produces: 200 application/json
:param name: Authenticate against only the named certificate role, returning its policy list if successful. If
not set, defaults to trying all certificate roles and returning any one that matches.
:type name: str | unicode
:param cacert: The value used here is for the Vault TLS Listener CA certificate, not the CA that issued the
client authentication certificate. This can be omitted if the CA used to issue the Vault server certificate
is trusted by the local system executing this command.
:type cacert: str | bool
:param cert_pem: Location of the cert.pem used to authenticate the host.
:tupe cert_pem: str | unicode
:param key_pem: Location of the public key.pem used to authenticate the host.
:param key_pem: str | unicode
:param mount_point:
:type mount_point:
:param use_token: If the returned token is stored in the client
:param use_token: bool
:return: The response of the login request.
:rtype: requests.Response
"""
params = {}
if name != "":
params['name'] = name
api_path = '/v1/auth/{mount_point}/login'.format(mount_point=mount_point)
# Must have cert checking or a CA cert. This is caught lower down but harder to grok
if not cacert:
# If a cacert is not provided try to drop down to the adapter and get the cert there.
# If the cacert is not in the adapter already login will also.
if not self._adapter._kwargs.get('verify'):
raise self.CertificateAuthError("cacert must be True, a file_path, or valid CA Certificate.")
else:
cacert = self._adapter._kwargs.get('verify')
else:
validate_pem_format(cacert, "verify")
# if cert_pem is a string its ready to be used and either has the key with it or the key is provided as an arg
try:
if validate_pem_format(cert_pem, "cert_pem"):
tls_update = True
except exceptions.VaultError as e:
if isinstance(e, type(exceptions.ParamValidationError())):
tls_update = {}
if not (os.path.exists(cert_pem) or self._adapter._kwargs.get('cert')):
raise FileNotFoundError("Can't find the certificate.")
try:
for tls_part, value in {'cert_pem': cert_pem, 'key_pem': key_pem}:
if value != "":
tls_update[tls_part] = value
except ValueError:
tls_update = True
else:
raise e
additional_request_kwargs = {}
if tls_update:
additional_request_kwargs = {
"verify": cacert,
# need to define dict as cert is a tuple
"cert": tuple([cert_pem, key_pem]),
}
return self._adapter.login(
url=api_path,
use_token=use_token,
json=params,
**additional_request_kwargs
)
class CertificateAuthError(Exception):
pass
```
#### File: api/auth_methods/ldap.py
```python
from hvac import exceptions, utils
from hvac.api.vault_api_base import VaultApiBase
DEFAULT_MOUNT_POINT = 'ldap'
class Ldap(VaultApiBase):
"""LDAP Auth Method (API).
Reference: https://www.vaultproject.io/api/auth/ldap/index.html
"""
def configure(self, user_dn=None, group_dn=None, url=None, case_sensitive_names=None, starttls=None,
tls_min_version=None, tls_max_version=None, insecure_tls=None, certificate=None, bind_dn=None,
bind_pass=<PASSWORD>, user_attr=None, discover_dn=None, deny_null_bind=True, upn_domain=None,
group_filter=None, group_attr=None, use_token_groups=None, token_ttl=None, token_max_ttl=None,
mount_point=DEFAULT_MOUNT_POINT):
"""
Configure the LDAP auth method.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param user_dn: Base DN under which to perform user search. Example: ou=Users,dc=example,dc=com
:type user_dn: str | unicode
:param group_dn: LDAP search base to use for group membership search. This can be the root containing either
groups or users. Example: ou=Groups,dc=example,dc=com
:type group_dn: str | unicode
:param url: The LDAP server to connect to. Examples: ldap://ldap.myorg.com, ldaps://ldap.myorg.com:636.
Multiple URLs can be specified with commas, e.g. ldap://ldap.myorg.com,ldap://ldap2.myorg.com; these will be
tried in-order.
:type url: str | unicode
:param case_sensitive_names: If set, user and group names assigned to policies within the backend will be case
sensitive. Otherwise, names will be normalized to lower case. Case will still be preserved when sending the
username to the LDAP server at login time; this is only for matching local user/group definitions.
:type case_sensitive_names: bool
:param starttls: If true, issues a StartTLS command after establishing an unencrypted connection.
:type starttls: bool
:param tls_min_version: Minimum TLS version to use. Accepted values are tls10, tls11 or tls12.
:type tls_min_version: str | unicode
:param tls_max_version: Maximum TLS version to use. Accepted values are tls10, tls11 or tls12.
:type tls_max_version: str | unicode
:param insecure_tls: If true, skips LDAP server SSL certificate verification - insecure, use with caution!
:type insecure_tls: bool
:param certificate: CA certificate to use when verifying LDAP server certificate, must be x509 PEM encoded.
:type certificate: str | unicode
:param bind_dn: Distinguished name of object to bind when performing user search. Example:
cn=vault,ou=Users,dc=example,dc=com
:type bind_dn: str | unicode
:param bind_pass: Password to use along with binddn when performing user search.
:type bind_pass: str | unicode
:param user_attr: Attribute on user attribute object matching the username passed when authenticating. Examples:
sAMAccountName, cn, uid
:type user_attr: str | unicode
:param discover_dn: Use anonymous bind to discover the bind DN of a user.
:type discover_dn: bool
:param deny_null_bind: This option prevents users from bypassing authentication when providing an empty password.
:type deny_null_bind: bool
:param upn_domain: The userPrincipalDomain used to construct the UPN string for the authenticating user. The
constructed UPN will appear as [username]@UPNDomain. Example: example.com, which will cause vault to bind as
<EMAIL>@example.com.
:type upn_domain: str | unicode
:param group_filter: Go template used when constructing the group membership query. The template can access the
following context variables: [UserDN, Username]. The default is
`(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))`, which is compatible with several
common directory schemas. To support nested group resolution for Active Directory, instead use the following
query: (&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}})).
:type group_filter: str | unicode
:param group_attr: LDAP attribute to follow on objects returned by groupfilter in order to enumerate user group
membership. Examples: for groupfilter queries returning group objects, use: cn. For queries returning user
objects, use: memberOf. The default is cn.
:type group_attr: str | unicode
:param use_token_groups: If true, groups are resolved through Active Directory tokens. This may speed up nested
group membership resolution in large directories.
:type use_token_groups: bool
:param token_ttl: The incremental lifetime for generated tokens.
:type token_ttl: str | unicode
:param token_max_ttl: The maximum lifetime for generated tokens.
:type token_max_ttl: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the configure request.
:rtype: requests.Response
"""
params = utils.remove_nones({
'url': url,
'userdn': user_dn,
'groupdn': group_dn,
'case_sensitive_names': case_sensitive_names,
'starttls': starttls,
'tls_min_version': tls_min_version,
'tls_max_version': tls_max_version,
'insecure_tls': insecure_tls,
'certificate': certificate,
'userattr': user_attr,
'discoverdn': discover_dn,
'deny_null_bind': deny_null_bind,
'groupfilter': group_filter,
'groupattr': group_attr,
'upndomain': upn_domain,
'binddn': bind_dn,
'bindpass': bind_pass,
'certificate': certificate,
'use_token_groups': use_token_groups,
'token_ttl': token_ttl,
'token_max_ttl': token_max_ttl
})
api_path = utils.format_url('/v1/auth/{mount_point}/config', mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
)
def read_configuration(self, mount_point=DEFAULT_MOUNT_POINT):
"""
Retrieve the LDAP configuration for the auth method.
Supported methods:
GET: /auth/{mount_point}/config. Produces: 200 application/json
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the read_configuration request.
:rtype: dict
"""
api_path = utils.format_url('/v1/auth/{mount_point}/config', mount_point=mount_point)
return self._adapter.get(
url=api_path,
)
def create_or_update_group(self, name, policies=None, mount_point=DEFAULT_MOUNT_POINT):
"""
Create or update LDAP group policies.
Supported methods:
POST: /auth/{mount_point}/groups/{name}. Produces: 204 (empty body)
:param name: The name of the LDAP group
:type name: str | unicode
:param policies: List of policies associated with the group. This parameter is transformed to a comma-delimited
string before being passed to Vault.
:type policies: list
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the create_or_update_group request.
:rtype: requests.Response
"""
if policies is not None and not isinstance(policies, list):
error_msg = '"policies" argument must be an instance of list or None, "{policies_type}" provided.'.format(
policies_type=type(policies),
)
raise exceptions.ParamValidationError(error_msg)
params = {}
if policies is not None:
params['policies'] = ','.join(policies)
api_path = utils.format_url(
'/v1/auth/{mount_point}/groups/{name}',
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
json=params,
)
def list_groups(self, mount_point=DEFAULT_MOUNT_POINT):
"""
List existing LDAP existing groups that have been created in this auth method.
Supported methods:
LIST: /auth/{mount_point}/groups. Produces: 200 application/json
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the list_groups request.
:rtype: dict
"""
api_path = utils.format_url('/v1/auth/{mount_point}/groups', mount_point=mount_point)
return self._adapter.list(
url=api_path,
)
def read_group(self, name, mount_point=DEFAULT_MOUNT_POINT):
"""
Read policies associated with a LDAP group.
Supported methods:
GET: /auth/{mount_point}/groups/{name}. Produces: 200 application/json
:param name: The name of the LDAP group
:type name: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the read_group request.
:rtype: dict
"""
params = {
'name': name,
}
api_path = utils.format_url(
'/v1/auth/{mount_point}/groups/{name}',
mount_point=mount_point,
name=name,
)
return self._adapter.get(
url=api_path,
json=params,
)
def delete_group(self, name, mount_point=DEFAULT_MOUNT_POINT):
"""
Delete a LDAP group and policy association.
Supported methods:
DELETE: /auth/{mount_point}/groups/{name}. Produces: 204 (empty body)
:param name: The name of the LDAP group
:type name: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the delete_group request.
:rtype: requests.Response
"""
api_path = utils.format_url(
'/v1/auth/{mount_point}/groups/{name}',
mount_point=mount_point,
name=name,
)
return self._adapter.delete(
url=api_path,
)
def create_or_update_user(self, username, policies=None, groups=None, mount_point=DEFAULT_MOUNT_POINT):
"""
Create or update LDAP users policies and group associations.
Supported methods:
POST: /auth/{mount_point}/users/{username}. Produces: 204 (empty body)
:param username: The username of the LDAP user
:type username: str | unicode
:param policies: List of policies associated with the user. This parameter is transformed to a comma-delimited
string before being passed to Vault.
:type policies: str | unicode
:param groups: List of groups associated with the user. This parameter is transformed to a comma-delimited
string before being passed to Vault.
:type groups: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the create_or_update_user request.
:rtype: requests.Response
"""
list_required_params = {
'policies': policies,
'groups': groups,
}
for param_name, param_arg in list_required_params.items():
if param_arg is not None and not isinstance(param_arg, list):
error_msg = '"{param_name}" argument must be an instance of list or None, "{param_type}" provided.'.format(
param_name=param_name,
param_type=type(param_arg),
)
raise exceptions.ParamValidationError(error_msg)
params = {}
if policies is not None:
params['policies'] = ','.join(policies)
if groups is not None:
params['groups'] = ','.join(groups)
api_path = utils.format_url(
'/v1/auth/{mount_point}/users/{username}',
mount_point=mount_point,
username=username,
)
return self._adapter.post(
url=api_path,
json=params,
)
def list_users(self, mount_point=DEFAULT_MOUNT_POINT):
"""
List existing users in the method.
Supported methods:
LIST: /auth/{mount_point}/users. Produces: 200 application/json
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the list_users request.
:rtype: dict
"""
api_path = utils.format_url('/v1/auth/{mount_point}/users', mount_point=mount_point)
return self._adapter.list(
url=api_path,
)
def read_user(self, username, mount_point=DEFAULT_MOUNT_POINT):
"""
Read policies associated with a LDAP user.
Supported methods:
GET: /auth/{mount_point}/users/{username}. Produces: 200 application/json
:param username: The username of the LDAP user
:type username: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the read_user request.
:rtype: dict
"""
api_path = utils.format_url(
'/v1/auth/{mount_point}/users/{username}',
mount_point=mount_point,
username=username,
)
return self._adapter.get(
url=api_path,
)
def delete_user(self, username, mount_point=DEFAULT_MOUNT_POINT):
"""
Delete a LDAP user and policy association.
Supported methods:
DELETE: /auth/{mount_point}/users/{username}. Produces: 204 (empty body)
:param username: The username of the LDAP user
:type username: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the delete_user request.
:rtype: requests.Response
"""
api_path = utils.format_url(
'/v1/auth/{mount_point}/users/{username}',
mount_point=mount_point,
username=username,
)
return self._adapter.delete(
url=api_path,
)
def login(self, username, password, use_token=True, mount_point=DEFAULT_MOUNT_POINT):
"""
Log in with LDAP credentials.
Supported methods:
POST: /auth/{mount_point}/login/{username}. Produces: 200 application/json
:param username: The username of the LDAP user
:type username: str | unicode
:param password: The password for the LDAP user
:type password: str | unicode
:param use_token: if True, uses the token in the response received from the auth request to set the "token"
attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute.
:type use_token: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the login_with_user request.
:rtype: requests.Response
"""
params = {
'password': password,
}
api_path = utils.format_url(
'/v1/auth/{mount_point}/login/{username}',
mount_point=mount_point,
username=username,
)
return self._adapter.login(
url=api_path,
use_token=use_token,
json=params,
)
```
|
{
"source": "jeffwecan/pre-commit-hooks",
"score": 2
}
|
#### File: pre-commit-hooks/pre_commit_hooks/check_terraform.py
```python
import argparse
import subprocess
from typing import Any, Generator, NamedTuple, Optional, Sequence
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to check.')
args = parser.parse_args(argv)
retval = 0
for filename in args.filenames:
try:
subprocess.check_call(('terraform', '-check', '-diff', filename))
except subprocess.CalledProcessError as exc:
print(exc)
retval = 1
return retval
if __name__ == '__main__':
exit(main())
```
|
{
"source": "jeffweng8/pyrh",
"score": 2
}
|
#### File: pyrh/pyrh/urls.py
```python
from typing import Optional
from yarl import URL
# TODO: All url construction should happen here, not in robinhood.py
# Base
API_BASE = URL("https://api.robinhood.com")
# General
ACCOUNTS = API_BASE / "accounts/"
ACH_BASE = API_BASE / "ach/" # not implemented
APPLICATIONS = API_BASE / "applications/" # not implemented
DIVIDENDS = API_BASE / "dividends/"
DOCUMENTS = API_BASE / "documents/" # not implemented
DOCUMENT_REQUESTS = API_BASE / "upload/document_requests/" # not implemented
FUNDAMENTALS_BASE = API_BASE / "fundamentals/"
INSTRUMENTS_BASE = API_BASE / "instruments/"
MARGIN_UPGRADES = API_BASE / "margin/upgrades/" # not implemented
MARKETS = API_BASE / "markets/" # not implemented
MARKET_DATA_BASE = API_BASE / "marketdata/options/"
NEWS_BASE = API_BASE / "midlands/news/"
NOTIFICATIONS = API_BASE / "notifications/" # not implemented
ORDERS_BASE = API_BASE / "orders/"
PORTFOLIOS = API_BASE / "portfolios/"
POSITIONS = API_BASE / "positions/"
TAGS_BASE = API_BASE / "midlands/tags/tag/"
WATCHLISTS = API_BASE / "watchlists/" # not implemented
# Options
OPTIONS_BASE = API_BASE / "options/"
OPTIONS_EVENT = OPTIONS_BASE / "events/"
OPTIONS_ORDER = OPTIONS_BASE / "orders/"
OPTIONS_CHAIN_BASE = OPTIONS_BASE / "chains/"
OPTIONS_INSTRUMENTS_BASE = OPTIONS_BASE / "instruments/"
# User
USER = API_BASE / "user/"
INVESTMENT_PROFILE = USER / "investment_profile/"
# Quotes
QUOTES = API_BASE / "quotes/"
HISTORICALS = QUOTES / "historicals/"
# Auth
OAUTH_BASE: URL = API_BASE / "oauth2/"
OAUTH: URL = OAUTH_BASE / "token/"
OAUTH_REVOKE: URL = OAUTH_BASE / "revoke_token/"
MIGRATE_TOKEN: URL = OAUTH_BASE / "migrate_token/" # not implemented
PASSWORD_RESET: URL = API_BASE / "password_reset/request/" # not implemented
def build_challenge(challenge_id: str) -> URL:
"""Build challenge response url.
Args:
challenge_id: the id of the challenge passed in the oauth request flow.
Returns:
The constructed URL with the challenge_id embedded in teh url path.
"""
return API_BASE / f"challenge/{challenge_id}/respond/"
def build_ach(option: str) -> URL:
"""
Combination of 3 ACH endpoints. Options include:
* iav
* relationships
* transfers
"""
return ACH_BASE / "iav/auth/" if option == "iav" else ACH_BASE / f"{option}/"
def instruments(
symbol: Optional[str] = None, query: Optional[str] = None, id_: Optional[str] = None
) -> URL:
"""Construct urls that query instruments.
Note:
Each of the arguments are mutually exclusive.
Args:
symbol: A stock ticker symbol.
query: Keyword to search for an instrument. (might be in name or ticker)
id_: The UUID that represents the instrument.
Returns:
A constructed URL with the embedded query parameter
"""
# Note:
# INSTRUMENTS_BASE/{instrument_id}/splits will not be implemented since the url is
# embedded in the results of an individual instrument result. The same logic applies
# for INSTRUMENTS_BASE/{instrument_id}/splits/{split_id}/
if symbol is not None:
return INSTRUMENTS_BASE.with_query(symbol=symbol)
elif query is not None:
return INSTRUMENTS_BASE.with_query(query=query)
elif id_ is not None:
return INSTRUMENTS_BASE / f"{id_}/"
def build_orders(order_id: str = None) -> URL:
"""Build endpoint to place orders."
Args:
order_id: the id of the order
Returns:
A constructed URL for a particular order or the base URL for all orders.
"""
if order_id is not None:
return ORDERS_BASE / f"/{order_id}/"
else:
return ORDERS_BASE
def build_news(stock: str) -> URL:
"""Build news endpoint for a particular stock
Args:
stock: The stock ticker to build the URL
Returns:
A constructed URL for the input stock ticker.
"""
return NEWS_BASE / f"/{stock}/"
def build_fundamentals(stock: str) -> URL:
"""Build fundamentals endpoint for a particular stock
Args:
stock: The stock ticker to build the URL
Returns:
A constructed URL of the fundamentals for the input stock ticker.
"""
return FUNDAMENTALS_BASE / f"/{stock}/"
def build_tags(tag: str) -> URL:
"""Build endpoints for tickers with a particular tag.
Args:
tag: The tag to search for.
Returns:
A constructed URL for a particular tag.
"""
return TAGS_BASE / f"/{tag}/"
def build_chain(instrument_id: str) -> URL:
"""Build the query for a particular options chain.
# TODO: this isn't best practice
# (query construction should be a separate function)
Args:
instrument_id: The instrument in question.
Returns:
A constructed URL for the particular options chain search.
"""
return (
OPTIONS_CHAIN_BASE.with_query(equity_instrument_ids=f"{instrument_id}") / "/"
) # TODO: find out if this trailing slash is required.
def build_options(chain_id: str, dates: str, option_type: str) -> URL:
"""Build options search endpoint.
# TODO: this really isn't best practice.
Args:
chain_id: The id for a particular options chain.
dates: The range of dates to procure # TODO: document the format of the dates
option_type: The type of the option # TODO: document the types
"""
return OPTIONS_INSTRUMENTS_BASE.with_query(
chain_id=f"{chain_id}",
expiration_dates=f"{dates}",
state="active",
tradability="tradable",
type=f"{option_type}",
)
def build_market_data(option_id: Optional[str] = None) -> URL:
"""Build market data endpoint.
Args:
option_id: the id of the option.
Returns:
A constructed URL for market data for a particular `option_id`.
"""
if option_id is None:
return MARKET_DATA_BASE / f"/{option_id}/"
else:
return MARKET_DATA_BASE
```
|
{
"source": "jeffw-github/autoprotocol-python",
"score": 2
}
|
#### File: autoprotocol/liquid_handle/transfer.py
```python
from .liquid_handle_method import LiquidHandleMethod
from ..instruction import LiquidHandle
from ..unit import Unit
from ..util import parse_unit
# pylint: disable=unused-argument,too-many-instance-attributes,protected-access
class Transfer(LiquidHandleMethod):
"""LiquidHandleMethod for generating transfers between pairs of wells
LiquidHandleMethod for transferring volume from one well to another.
Attributes
----------
_source_liquid : LiquidClass
used to determine calibration, flowrates, and sensing thresholds
_destination_liquid : LiquidClass
used to determine calibration, flowrates, and sensing thresholds
Notes
-----
The primary entry points that for this class are:
- _aspirate_transports : generates transports for a source location
- _dispense_transports : generates transports for a destination location
See Also
--------
LiquidHandleMethod : base LiquidHandleMethod with reused functionality
Protocol.transfer : the standard interface for interacting with Transfer
"""
def __init__(
self,
tip_type=None,
blowout=True,
prime=True,
transit=True,
mix_before=False,
mix_after=True,
aspirate_z=None,
dispense_z=None,
):
"""
Parameters
----------
tip_type : str, optional
tip_type to be used for the LiquidHandlingMethod
blowout : bool or dict, optional
whether to execute a blowout step or the parameters for one.
this generates a pair of operations: an initial air aspiration
before entering any wells and a corresponding air dispense after the
last operation that involves liquid
See Also LiquidHandle.builders.blowout
prime : bool or Unit, optional
whether to execute a prime step or the parameters for one.
this generates a pair of aspirate/dispense operations around the
aspiration step in the sequence:
aspirate_prime -> aspirate_target_volume -> dispense_prime
transit : bool or Unit, optional
whether to execute a transit step or the parameters for one.
this generates a pair of operations wherein air is aspirated just
before leaving the source location and dispensed immediately
after reaching the destination location
mix_before : bool or dict, optional
whether to execute a mix_before step or the parameters for one.
this generates a series of aspirate and dispense steps within the
source location before aspirating the target volume
See Also LiquidHandle.builders.mix
mix_after : bool or dict, optional
whether to execute a mix_after step or the parameters for one.
this generates a series of aspirate and dispense steps within the
destination location after dispensing the target volume
See Also LiquidHandle.builders.mix
aspirate_z : dict, optional
the position that the tip should move to prior to aspirating, if the
position references the `liquid_surface` then aspirate movements
will track the surface with the defined offset.
See Also LiquidHandle.builders.position_z
dispense_z : dict, optional
the position that the tip should move to prior to dispensing, if the
position references the `liquid_surface` then dispense
will track the surface with the defined offset.
See Also LiquidHandle.builders.position_z
"""
super(Transfer, self).__init__(tip_type=tip_type, blowout=blowout)
# parameters for required behavior
self.aspirate_z = aspirate_z
self.dispense_z = dispense_z
# parameters for optional behavior
self.prime = prime
self.transit = transit
self.mix_before = mix_before
self.mix_after = mix_after
# LiquidHandle parameters that are generated and modified at runtime
self._source_liquid = None
self._destination_liquid = None
def _has_calibration(self):
liquids = [self._source_liquid, self._destination_liquid]
return any(_ and _._has_calibration() for _ in liquids)
def _calculate_overage_volume(self, volume):
calibration_overage = (
self._estimate_calibrated_volume(volume, self._source_liquid, self.tip_type)
- volume
)
# handle whichever is larger, prime or transit volume
if self.prime is True:
prime = self.default_prime(volume)
elif self.prime is False:
prime = Unit(0, "uL")
else:
prime = self.prime
if self.transit is True:
transit = self.default_transit(volume)
elif self.transit is False:
transit = Unit(0, "uL")
else:
transit = self.transit
prime_or_transit = max([prime, transit])
return calibration_overage + prime_or_transit
def default_blowout(self, volume):
if self._is_single_channel():
if volume < Unit("10:ul"):
blowout_vol = Unit("5:ul")
elif volume < Unit("25:ul"):
blowout_vol = Unit("10:ul")
elif volume < Unit("75:ul"):
blowout_vol = Unit("15:ul")
elif volume < Unit("100:ul"):
blowout_vol = Unit("20:ul")
else:
blowout_vol = Unit("25:ul")
else:
blowout_vol = Unit("5:ul")
return LiquidHandle.builders.blowout(
volume=blowout_vol,
initial_z=self.default_well_top_position_z(),
flowrate=None,
)
def _aspirate_transports(self, volume, density):
"""Generates source well transports
Generates and returns all of the transports that should happen within
the source well of a transfer operation.
Calls a series of _transport_`y` helper methods that each query the `y`
parameter and default_`y` method to decide on a set of behavior and use
that to define transports that are appended to the _transports list.
Parameters
----------
volume : Unit
Return
------
list
source well transports corresponding to the aspirate operation
Notes
-----
This method defines what lower level transport-generating methods are
called and in what order. It can be overwritten when adding an
entirely new set of transport-generating behavior.
See Also
--------
_dispense_transports : corresponding destination well method
"""
self._transports = []
volume = parse_unit(Unit(volume), "ul")
# No transports if no volume specified
if volume == Unit("0:ul"):
return []
self._transport_pre_buffer(volume)
self._transport_mix_before(volume)
self._transport_aspirate_target_volume(volume, density)
self._transport_aspirate_transit(volume)
return self._transports
def _dispense_transports(self, volume, density):
"""Generates destination well transports
Generates and returns all of the transports that should happen within
the destination well of a transfer operation.
Calls a series of _transport_`y` helper methods that each query the `y`
parameter and default_`y` method to decide on a set of behavior and use
that to define transports that are appended to the _transports list.
Parameters
----------
volume : Unit
Return
------
list
destination well transports corresponding to the dispense operation
Notes
-----
This method defines what lower level transport-generating methods are
called and in what order. It can be overwritten when adding an
entirely new set of transport-generating behavior.
See Also
--------
_aspirate_transports : corresponding source well method
"""
self._transports = []
volume = parse_unit(volume, "ul")
# No transports if no volume specified
if volume == Unit("0:ul"):
return []
self._transport_dispense_transit(volume)
self._transport_dispense_target_volume(volume, density)
self._transport_mix_after(volume)
self._transport_blowout(volume)
return self._transports
def _transport_mix_before(self, volume):
"""Mixes volume in the source well before aspirating
Parameters
----------
volume : Unit
See Also
--------
mix_before : holds any user defined mix_before parameters
default_mix_before : specifies default mix_before parameters
_mix : lower level helper that generates the mix_before transports
"""
if self.mix_before is True:
mix_before = self.default_mix_before(volume)
elif self.mix_before is False:
mix_before = False
else:
mix_before = self.mix_before
if mix_before is not False:
mix_before = LiquidHandle.builders.mix(**mix_before)
self._mix(
delay_time=self._source_liquid.delay_time,
liquid_class=self._source_liquid.name,
**mix_before
)
def default_mix_before(self, volume):
"""Default mix_before parameters
Parameters
----------
volume : Unit
Returns
-------
dict
mix_before params
See Also
--------
mix_before : holds any user defined mix_before parameters
_transport_mix : generates the actual mix_before transports
"""
if self._is_single_channel():
mix_z = self.default_lld_position_z(liquid=self._source_liquid)
else:
mix_z = self.default_well_bottom_position_z()
return LiquidHandle.builders.mix(
volume=volume,
repetitions=10,
initial_z=mix_z,
asp_flowrate=self._source_liquid._get_aspirate_flowrate(
volume, self.tip_type
),
dsp_flowrate=self._source_liquid._get_dispense_flowrate(
volume, self.tip_type
),
)
def _transport_aspirate_target_volume(self, volume, density):
"""Aspirates the target volume from the source location
Parameters
----------
volume : Unit
density : Unit
See Also
--------
aspirate_z : holds any user defined aspirate_z parameters
default_aspirate_z : specifies default aspirate_z parameters
prime : holds any user defined prime volume
default_prime : specifies default prime volume
_aspirate_simple : lower level helper that generates aspirate transports
_aspirate_with_prime : lower level helper for aspirating with priming
"""
aspirate_z = self.aspirate_z or self.default_aspirate_z(volume)
if self.prime is True:
prime = self.default_prime(volume)
elif self.prime is False:
prime = False
else:
prime = self.prime
aspirate_z = LiquidHandle.builders.position_z(**aspirate_z)
if prime is not False:
prime = parse_unit(prime, "uL")
self._aspirate_with_prime(
volume=volume,
prime_vol=prime,
calibrated_vol=self._source_liquid._get_calibrated_volume(
volume, self.tip_type
),
initial_z=aspirate_z,
asp_flowrate=self._source_liquid._get_aspirate_flowrate(
volume, self.tip_type
),
dsp_flowrate=self._source_liquid._get_dispense_flowrate(
volume, self.tip_type
),
delay_time=self._source_liquid.delay_time,
liquid_class=self._source_liquid.name,
density=density,
)
else:
self._aspirate_simple(
volume=volume,
calibrated_vol=self._source_liquid._get_calibrated_volume(
volume, self.tip_type
),
initial_z=aspirate_z,
flowrate=self._source_liquid._get_aspirate_flowrate(
volume, self.tip_type
),
delay_time=self._source_liquid.delay_time,
liquid_class=self._source_liquid.name,
density=density,
)
def default_aspirate_z(self, volume):
"""Default aspirate_z parameters
Parameters
----------
volume : Unit
Returns
-------
dict
aspirate position_z
See Also
--------
aspirate_z : holds any user defined aspirate_z parameters
_transport_aspirate_target_volume : generates actual aspirate transports
"""
if self._is_single_channel():
aspirate_z = self.default_lld_position_z(liquid=self._source_liquid)
else:
aspirate_z = self.default_well_bottom_position_z()
return aspirate_z
# pylint: disable=no-self-use
def default_prime(self, volume):
"""Default prime volume
Parameters
----------
volume : Unit
Returns
-------
Unit
priming volume
See Also
--------
prime : holds any user defined prime volume
_transport_aspirate_target_volume : generates actual aspirate transports
"""
return Unit(5, "ul")
def _transport_aspirate_transit(self, volume):
"""Aspirates air above the source before moving to the destination
Parameters
----------
volume : Unit
See Also
--------
transit : holds any user defined transit volume
default_transit : specifies default transit volume
_transport_dispense_transit : the corresponding air dispense step
"""
if self.transit is True:
transit = self.default_transit(volume)
elif self.transit is False:
transit = False
else:
transit = self.transit
if transit is not False:
transit_vol = parse_unit(transit, "uL")
self._aspirate_simple(
volume=transit_vol,
initial_z=self.default_well_top_position_z(),
liquid_class="air",
)
def _transport_dispense_transit(self, volume):
"""Dispenses air above the destination after moving from the source
Parameters
----------
volume : Unit
See Also
--------
transit : holds any user defined transit volume
default_transit : specifies default transit volume
_transport_aspirate_transit : the corresponding air aspirate step
"""
if self.transit is True:
transit = self.default_transit(volume)
elif self.transit is False:
transit = False
else:
transit = self.transit
if transit is not False:
transit_vol = parse_unit(transit, "uL")
self._dispense_simple(
volume=transit_vol,
initial_z=self.default_well_top_position_z(),
liquid_class="air",
)
def default_transit(self, volume):
"""Default transit volume
Parameters
----------
volume : Unit
Returns
-------
Unit
transit volume
See Also
--------
transit : holds any user defined transit volume
_transport_aspirate_transit : generates the actual transit transports
_transport_dispense_transit : generates the actual transit transports
"""
if self._is_single_channel():
transit_vol = Unit("2:ul")
else:
transit_vol = Unit("1:ul")
return transit_vol
def _transport_dispense_target_volume(self, volume, density):
"""Dispenses the target volume into the destination location
Parameters
----------
volume : Unit
density : Unit
See Also
--------
dispense_z : holds any user defined dispense_z parameters
default_dispense_z : specifies default dispense_z parameters
_dispense_simple : lower level helper that generates dispense transports
"""
dispense_z = self.dispense_z or self.default_dispense_z(volume)
dispense_z = LiquidHandle.builders.position_z(**dispense_z)
self._dispense_simple(
volume=volume,
calibrated_vol=self._source_liquid._get_calibrated_volume(
volume, self.tip_type
),
initial_z=dispense_z,
flowrate=self._source_liquid._get_dispense_flowrate(volume, self.tip_type),
delay_time=self._source_liquid.delay_time,
liquid_class=self._source_liquid.name,
density=density,
)
def default_dispense_z(self, volume):
"""Default aspirate_z parameters
Parameters
----------
volume : Unit
Returns
-------
dict
dispense position_z
See Also
--------
dispense_z : holds any user defined dispense_z parameters
_transport_dispense_target_volume : generates actual dispense transports
"""
if self._is_single_channel():
dispense_z = self.default_lld_position_z(liquid=self._destination_liquid)
else:
dispense_z = self.default_tracked_position_z()
return dispense_z
def _transport_mix_after(self, volume):
"""Mixes volume in the destination well after dispensing
Parameters
----------
volume : Unit
See Also
--------
mix_after : holds any user defined mix_after parameters
default_mix_after : specifies default mix_after parameters
_mix : lower level helper that generates the mix_after transports
"""
if self.mix_after is True:
mix_after = self.default_mix_after(volume)
elif self.mix_after is False:
mix_after = False
else:
mix_after = self.mix_after
if mix_after is not False:
mix_after = LiquidHandle.builders.mix(**mix_after)
self._mix(
delay_time=self._source_liquid.delay_time,
liquid_class=self._source_liquid.name,
**mix_after
)
def default_mix_after(self, volume):
"""Default mix_after parameters
Parameters
----------
volume : Unit
Returns
-------
dict
mix_after params
See Also
--------
mix_after : holds any user defined mix_after parameters
_transport_mix : generates the actual mix_after transports
"""
if self._is_single_channel():
mix_z = self.default_lld_position_z(liquid=self._destination_liquid)
else:
mix_z = self.default_well_bottom_position_z()
return LiquidHandle.builders.mix(
volume=volume,
repetitions=10,
initial_z=mix_z,
asp_flowrate=self._source_liquid._get_aspirate_flowrate(
volume, self.tip_type
),
dsp_flowrate=self._source_liquid._get_dispense_flowrate(
volume, self.tip_type
),
)
class DryWellTransfer(Transfer):
"""Dispenses while tracking liquid without mix_after"""
def __init__(
self,
tip_type=None,
blowout=True,
prime=True,
transit=True,
mix_before=False,
mix_after=False,
aspirate_z=None,
dispense_z=None,
):
super(DryWellTransfer, self).__init__(
tip_type=tip_type,
blowout=blowout,
prime=prime,
transit=transit,
mix_before=mix_before,
mix_after=mix_after,
aspirate_z=aspirate_z,
dispense_z=dispense_z,
)
def default_dispense_z(self, volume):
return self.default_tracked_position_z()
class PreMixBlowoutTransfer(Transfer):
"""Adds an additional blowout before the mix_after step"""
def __init__(
self,
tip_type=None,
blowout=True,
prime=True,
transit=True,
mix_before=False,
mix_after=True,
aspirate_z=None,
dispense_z=None,
pre_mix_blowout=True,
):
super(PreMixBlowoutTransfer, self).__init__(
tip_type=tip_type,
blowout=blowout,
prime=prime,
transit=transit,
mix_before=mix_before,
mix_after=mix_after,
aspirate_z=aspirate_z,
dispense_z=dispense_z,
)
self.pre_mix_blowout = pre_mix_blowout
def _dispense_transports(self, volume=None, density=None):
self._transports = []
volume = parse_unit(volume, "ul")
# No transports if no volume specified
if volume == Unit("0:ul"):
return []
self._transport_dispense_transit(volume)
self._transport_dispense_target_volume(volume, density)
self._transport_pre_mix_blowout(volume)
self._transport_mix_after(volume)
self._transport_blowout(volume)
return self._transports
def _calculate_pre_buffer(self, volume):
if self.blowout is True:
blowout = self.default_blowout(volume)
elif self.blowout is False:
blowout = {}
else:
blowout = self.blowout
if self.pre_mix_blowout is True:
secondary_blowout = self.default_pre_mix_blowout(volume)
elif self.pre_mix_blowout is False:
secondary_blowout = {}
else:
secondary_blowout = self.pre_mix_blowout
blowout_vol = parse_unit(blowout.get("volume", Unit("0:uL")), "uL")
secondary_blowout_vol = parse_unit(
secondary_blowout.get("volume", Unit("0:uL")), "uL"
)
return blowout_vol + secondary_blowout_vol
def _transport_pre_mix_blowout(self, volume):
"""Dispenses a secondary air volume befiore the mix_after step
Notes
-----
For some liquid classes this has resulted in more complete dispensing of
the target volume than just a single blowout.
Parameters
----------
volume : Unit
See Also
--------
pre_mix_blowout : holds any user defined pre_mix_blowout parameters
default_pre_mix_blowout : specifies default pre_mix_blowout parameters
"""
if self.pre_mix_blowout is True:
pre_mix_blowout = self.default_pre_mix_blowout(volume)
elif self.pre_mix_blowout is False:
pre_mix_blowout = False
else:
pre_mix_blowout = self.pre_mix_blowout
if pre_mix_blowout is not False:
pre_mix_blowout = LiquidHandle.builders.blowout(**pre_mix_blowout)
self._dispense_simple(liquid_class="air", **pre_mix_blowout)
def default_pre_mix_blowout(self, volume):
"""Default pre_mix_blowout parameters
Parameters
----------
volume : Unit
Returns
-------
dict
pre_mix_blowout params
See Also
--------
pre_mix_blowout : holds any user defined pre_mix_blowout parameters
_transport_pre_mix_blowout : generates the actual blowout transports
"""
return LiquidHandle.builders.blowout(
volume=Unit(5, "ul"),
initial_z=self.default_well_top_position_z(),
flowrate=None,
)
```
#### File: autoprotocol-python/test/test_util.py
```python
import json
class TestUtils:
@staticmethod
def read_json_file(file_path: str):
file = open("./test/data/{0}".format(file_path))
data = json.load(file)
return json.dumps(data, indent=2, sort_keys=True)
```
|
{
"source": "jeffwhite530/d2yabt",
"score": 3
}
|
#### File: d2yabt/dcos/bundle.py
```python
import sys
import os
import re
import pandas
import d2yabt
def extract_diag(bundle_name):
"""Expand the DC/OS bundle into a directory.
"""
bundle_name = d2yabt.util.relocate_bundle(bundle_name)
bundle_dir = d2yabt.util.get_bundle_dir(bundle_name)
print("Extracting DC/OS diagnostic bundle to", bundle_dir)
d2yabt.util.unzip(bundle_name, bundle_dir)
return bundle_dir
def extract_oneliner(bundle_name):
"""Expand the oneliner bundle into a directory.
"""
bundle_name = d2yabt.util.relocate_bundle(bundle_name)
bundle_dir = d2yabt.util.get_bundle_dir(bundle_name)
print("Extracting DC/OS oneliner bundle to", bundle_dir)
d2yabt.util.untar(bundle_name, bundle_dir)
return bundle_dir
def get_nodes(bundle_dir, bundle_type):
"""Get the list of nodes and create an object for each.
"""
print("Obtaining list of nodes")
node_objs = list()
if bundle_type == "dcos_diag":
for node_dir in os.listdir(bundle_dir):
if not os.path.isdir(os.path.join(bundle_dir, node_dir)):
continue
node_obj = d2yabt.Node()
node_obj.dir = os.path.join(bundle_dir, node_dir)
if node_dir.endswith("_master"):
node_obj.type = "master"
elif node_dir.endswith("_agent"):
node_obj.type = "priv_agent"
elif node_dir.endswith("_agent_public"):
node_obj.type = "pub_agent"
node_obj.ip = node_dir.split("_")[0]
node_objs.append(node_obj)
elif bundle_type == "dcos_oneliner":
node_obj = d2yabt.Node()
node_obj.dir = bundle_dir
node_obj.ip = "unknown"
if os.path.exists(os.path.join(bundle_dir, "dcos-mesos-master.service.log")):
node_obj.type = "master"
elif os.path.exists(os.path.join(bundle_dir, "dcos-mesos-slave.service.log")):
node_obj.type = "priv_agent"
elif os.path.exists(os.path.join(bundle_dir, "dcos-mesos-slave-public.service.log")):
node_obj.type = "pub_agent"
node_objs.append(node_obj)
if not node_objs:
print("Failed to find any nodes in the bundle directory", file=sys.stderr)
sys.exit(1)
return node_objs
def get_node_info(node_objs):
"""Gather information about DC/OS nodes.
"""
for node_obj in node_objs:
# Get the Docker version
if node_obj.type == "master":
node_obj.docker_version = "n/a"
else:
if os.path.exists(os.path.join(node_obj.dir, "docker_--version.output")):
docker_version_text = open(os.path.join(node_obj.dir, "docker_--version.output"), "r").read()
docker_version = re.search(r"Docker version (.*),", docker_version_text).group(1)
node_obj.docker_version = docker_version
else:
node_obj.docker_version = "unknown"
# Get the OS
if os.path.exists(os.path.join(node_obj.dir, "binsh_-c_cat etc*-release.output")):
os_file_text = open(os.path.join(node_obj.dir, "binsh_-c_cat etc*-release.output"), "r").read()
node_os = re.search(r'ID="(.*)"', os_file_text).group(1)
node_obj.os = node_os
else:
node_obj.os = "unkown"
def print_nodes(node_objs):
"""Prints a table of nodes.
"""
node_table = pandas.DataFrame(data={
"IP": [o.ip for o in node_objs],
"Type": [o.type for o in node_objs],
"OS": [o.os for o in node_objs],
"Docker": [o.docker_version for o in node_objs],
}
)
node_table.sort_values("Type", inplace=True)
node_table.reset_index(inplace=True, drop=True)
node_table.index += 1
print(node_table)
```
#### File: lib/d2yabt/__init__.py
```python
import operator
import d2yabt.util
import d2yabt.dcos.bundle
import d2yabt.dcos.check
import d2yabt.service.bundle
import d2yabt.service.check
import d2yabt.konvoy.bundle
import d2yabt.konvoy.check
__version__ = "1.0.5"
class Node:
"""This class holds information about a DC/OS or Konvoy node.
"""
def __init__(self):
self.ip = ""
self.type = ""
self.dir = ""
self.os = ""
self.dcos_version = ""
self.docker_verison = ""
self.zk_fsync_warning_count = 0
self._zk_longest_fsyncs = list()
self.oom_invoked_count = 0
self._oom_procs = dict()
def add_zk_fsync(self, zk_fsync: int):
"""Add an ZK fsync time entry.
"""
self._zk_longest_fsyncs.append(zk_fsync)
self._zk_longest_fsyncs.sort(reverse=True)
self._zk_longest_fsyncs = self._zk_longest_fsyncs[:5]
def get_longest_zk_fsyncs(self):
"""Returns a list of the top 5 longest ZK fsync times.
"""
return self._zk_longest_fsyncs
def add_oom_proc(self, oom_proc):
"""Add a process to the list of ones which invoked oom-killer.
"""
if oom_proc not in self._oom_procs:
self._oom_procs[oom_proc] = 1
else:
self._oom_procs[oom_proc] += 1
def get_top_oom_procs(self):
"""Returns a dict of the top 5 processes which invoked oom-killer.
"""
return sorted(self._oom_procs.items(), key=operator.itemgetter(1), reverse=True)[0:5]
```
#### File: lib/d2yabt/util.py
```python
import sys
import os
import gzip
import shutil
import json
import zipfile
import tarfile
import subprocess
def untar(tar_file, output_dir):
"""Untar a gzipped tar file to a given directory.
"""
tarfile_obj = tarfile.open(tar_file, "r:gz")
tarfile_obj.extractall(output_dir)
tarfile_obj.close()
def unzip(zip_file, output_dir):
"""Unzip a file to a given directory.
"""
os.mkdir(output_dir)
try:
zip_ref = zipfile.ZipFile(zip_file, "r")
zip_ref.extractall(output_dir)
zip_ref.close()
except zipfile.BadZipFile:
print("Failed to extract file, corrupt zip? Attempting to extract with 7zip", file=sys.stderr)
zip7_command = shutil.which("7z")
if zip7_command is None:
print("7zip command (7z) not found. Please install 7zip.", file=sys.stderr)
sys.exit(1)
zip7_process = subprocess.Popen([zip7_command, "x", "-o" + output_dir, "-y", zip_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
zip7_process.wait()
# Note that we're not checking if 7zip was successful because it will exit non-zero even if it was able to partially extract the zip.
# If the extracted files are within a directory, move the contents of that directory up one
output_dir_contents = os.listdir(output_dir)
if len(output_dir_contents) == 1:
for each in os.listdir(os.path.join(output_dir, output_dir_contents[0])):
os.rename(os.path.join(output_dir, output_dir_contents[0], each), os.path.join(output_dir, each))
os.rmdir(os.path.join(output_dir, output_dir_contents[0]))
def decompress_gzip_files(start_dir):
"""Walk a directory tree and decompress all gzip files found.
"""
print("Expanding bundle files")
for root, _dirs, files in os.walk(start_dir):
for each_file in files:
if not each_file.endswith(".gz"):
continue
gzipfile_with_path = os.path.join(root, each_file)
gzipfile_with_path_no_ext = gzipfile_with_path[:-3]
try:
with gzip.open(gzipfile_with_path, "rb") as f_in:
with open(gzipfile_with_path_no_ext, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
except EOFError:
print("Failed to expand", gzipfile_with_path, "EOF reached, incomplete file?")
except OSError:
print("Failed to expand", gzipfile_with_path + ", not a gzip file?")
else:
os.remove(gzipfile_with_path)
def format_json(bundle_dir):
"""Format the JSON files into a human-readable form.
"""
print("Formatting JSON files")
for root, _dirs, files in os.walk(bundle_dir):
for each_file in files:
if not each_file.endswith(".json"):
continue
# This file always fails to parse, just skip it
if each_file == "443-licensing_v1_audit_decrypt_1.json":
continue
file_with_path = os.path.join(root, each_file)
with open(file_with_path, "r+") as json_file_handle:
try:
json_data = json.load(json_file_handle)
json_file_handle.seek(0)
json_file_handle.write(json.dumps(json_data, indent=2, sort_keys=True))
json_file_handle.write("\n")
except (json.decoder.JSONDecodeError, UnicodeDecodeError):
print("Failed to parse JSON:", file_with_path, file=sys.stderr)
def get_bundle_type(bundle_name):
"""Determine the type of bundle given and return a string of either:
* dcos_diag
* dcos_oneliner
* service_diag
* konvoy_diag
"""
bundle_file_types = {
"dcos-mesos-master.service": "dcos_diag",
"dcos-mesos-master.service.gz": "dcos_diag",
"dcos-mesos-master.service.log": "dcos_oneliner",
"dcos-mesos-slave.service.log": "dcos_oneliner",
"dcos-mesos-slave-public.service.log": "dcos_oneliner",
"dcos_services.json": "service_diag",
"bundles": "konvoy_diag"
}
bundle_contents = list()
if os.path.isdir(bundle_name):
for _root, dirs, files in os.walk(bundle_name):
for each_file in files:
bundle_contents.append(each_file)
for each_dir in dirs:
bundle_contents.append(each_dir)
elif bundle_name.endswith(".tgz") or bundle_name.endswith(".tar.gz"):
mytar = tarfile.open(bundle_name, "r:gz")
for each_entry in mytar.getnames():
for each in os.path.split(each_entry):
bundle_contents.append(each)
elif bundle_name.endswith(".zip"):
try:
myzip = zipfile.ZipFile(bundle_name, "r")
for each_entry in myzip.namelist():
for each in os.path.split(each_entry):
bundle_contents.append(each)
except zipfile.BadZipFile:
print("Failed to list archive contents, corrupt zip? Attempting to list contents with 7zip", file=sys.stderr)
zip7_command = shutil.which("7z")
if zip7_command is None:
print("7zip command (7z) not found. Please install 7zip.", file=sys.stderr)
sys.exit(1)
zip7_process = subprocess.Popen([zip7_command, "-ba", "l", bundle_name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in zip7_process.stdout:
line = line.decode("UTF-8").rstrip()
each_entry = line.split()[-1]
for each in os.path.split(each_entry):
bundle_contents.append(each)
for bundle_content in bundle_contents:
if bundle_content in bundle_file_types:
return bundle_file_types[bundle_content]
print("Unable to determine bundle type", file=sys.stderr)
sys.exit(1)
def get_bundle_dir(bundle_name):
"""Parse the bundle directory from the bundle name.
"""
if os.path.isdir(bundle_name):
return bundle_name
if bundle_name.endswith(".tgz") or bundle_name.endswith(".zip"):
return bundle_name[:-4]
if bundle_name.endswith(".tar.gz"):
return bundle_name[:-7]
print("Unable to parse bundle name", file=sys.stderr)
sys.exit(1)
def is_bundle_extracted(bundle_name):
"""Checks if the named bundle is already extracted.
If yes: return True
If no: return False
"""
bundle_dir = get_bundle_dir(bundle_name)
if os.path.exists(bundle_dir):
return True
return False
def relocate_bundle(bundle_name):
"""Moves the bundle file to the current working directory if it isn't already there.
Returns the new bundle name without path.
"""
bundle_name_base = os.path.basename(bundle_name)
if not bundle_name == bundle_name_base:
os.rename(bundle_name, bundle_name_base)
print("Moved", bundle_name, "to the current working directory")
return bundle_name_base
return bundle_name
```
|
{
"source": "jeffwillette/few_shot_meta_learning",
"score": 2
}
|
#### File: few_shot_meta_learning/data/toy.py
```python
from typing import List, Tuple, Any
import os
import numpy as np # type: ignore
import random
import torch
from torch.utils.data import Dataset
from matplotlib.colors import to_rgba # type: ignore
from matplotlib import pyplot as plt # type: ignore
from matplotlib.lines import Line2D # type: ignore
from sklearn.datasets import make_moons, make_circles # type: ignore
T = torch.Tensor
def get_biased_sample_idx(x: Any, y: Any, k_shot: int) -> Tuple[Any, ...]:
classes = np.unique(y)
n_sections = 2 # (n-way + kshot) * classes needs to be equally divisible by n_sections
sx, sy, qx, qy = np.empty((0, 2)), np.empty((0,)), np.empty((0, 2)), np.empty((0,))
for c in classes:
class_idx = np.argwhere(y == c).squeeze(1)
class_x, class_y = x[class_idx], y[class_idx]
x_or_y = 0 if np.sign(np.random.rand() - 0.5) < 0 else 1 # choose x or y index randomly
section = np.random.permutation(n_sections) # which half of the data to get
x_idx = np.argsort(class_x[:, x_or_y])
def sec(n: int) -> int:
return int(n * (x_idx.shape[0] // n_sections))
# get the support and qeury sets for this class which are split by section (whichever biased section we chose)
spt_x = class_x[x_idx[sec(section[0]) : sec(section[0] + 1)]] # get the proper third
spt_y = class_y[x_idx[sec(section[0]) : sec(section[0] + 1)]] # get the proper third
qry_x = class_x[x_idx[sec(section[1]) : sec(section[1] + 1)]]
qry_y = class_y[x_idx[sec(section[1]) : sec(section[1] + 1)]]
# collect random k of the biased support sets into one and leave the rest for the qeury set
spt_perm = np.random.permutation(spt_x.shape[0])
sx = np.concatenate((sx, spt_x[spt_perm[:k_shot]]))
sy = np.concatenate((sy, spt_y[spt_perm[:k_shot]]))
qx = np.concatenate((qx, spt_x[spt_perm[k_shot:]], qry_x))
qy = np.concatenate((qy, spt_y[spt_perm[k_shot:]], qry_y))
return sx, sy, qx, qy
class ToyDataset(Dataset):
def __init__(self, seed: int = 0, k_shot: int = 10, total_tasks: int = 100, test_shots: int = 50):
self.seed = seed
self.k_shot = k_shot
self.total_tasks = total_tasks
self.test_shots = test_shots
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
def __len__(self) -> int:
return self.total_tasks
class MetaMoons(ToyDataset):
def __init__(
self,
seed: int = 0,
k_shot: int = 10,
total_tasks: int = 100,
test_shots: int = 50,
):
super().__init__(seed=seed, k_shot=k_shot, total_tasks=total_tasks, test_shots=test_shots)
self.n_way = 2
self.name = "moons"
self.path = os.path.join("toy-moons", "2-way", f"{k_shot}-shot", f"{test_shots}-testshot")
def __getitem__(self, i: int) -> Tuple[T, T, T, T]:
return self.gen_random_task()
def sample_uniform(self) -> T:
x = torch.linspace(-3, 3, 100)
return torch.stack(torch.meshgrid(x, x), dim=-1).view(-1, 2)
def gen_random_task(self) -> Tuple[T, T, T, T]:
noise = np.random.rand() * .25
x, y = make_moons(n_samples=self.n_way * (self.k_shot + self.test_shots), noise=noise, random_state=self.seed)
sx, sy, qx, qy = get_biased_sample_idx(x, y, self.k_shot)
sx, sy, qx, qy = torch.from_numpy(sx).float(), torch.from_numpy(sy).long(), torch.from_numpy(qx).float(), torch.from_numpy(qy).long()
return sx, sy, qx, qy
class MetaCircles(ToyDataset):
def __init__(
self,
seed: int = 0,
k_shot: int = 10,
total_tasks: int = 100,
test_shots: int = 50,
):
super().__init__(seed=seed, k_shot=k_shot, total_tasks=total_tasks, test_shots=test_shots)
self.n_way = 2
self.name = "circles"
self.path = os.path.join("toy-circles", "2-way", f"{k_shot}-shot", f"{test_shots}-testshot")
def __getitem__(self, i: int) -> Tuple[T, T, T, T]:
return self.gen_random_task()
def sample_uniform(self) -> T:
x = torch.linspace(-3, 3, 100)
return torch.stack(torch.meshgrid(x, x), dim=-1).view(-1, 2)
def gen_random_task(self) -> Tuple[T, T, T, T]:
noise = np.random.rand() * .25
scale = np.random.rand() * 0.8
x, y = make_circles(n_samples=self.k_shot + self.test_shots, noise=noise, factor=scale, random_state=self.seed)
sx, sy, qx, qy = get_biased_sample_idx(x, y, self.k_shot)
sx, sy, qx, qy = torch.from_numpy(sx).float(), torch.from_numpy(sy).long(), torch.from_numpy(qx).float(), torch.from_numpy(qy).long()
return sx, sy, qx, qy
class RandomGaussians(ToyDataset):
def __init__(
self,
seed: int = 0,
n_way: int = 5,
k_shot: int = 5,
total_tasks: int = 100,
test_shots: int = 15,
mu_rng: List[int] = [-5, 5],
var_rng: List[float] = [0.1, 1.0],
dim: int = 2
):
super().__init__(seed=seed, k_shot=k_shot, total_tasks=total_tasks, test_shots=test_shots)
self.name = "2d-gaussians"
self.mu_rng = mu_rng
self.n_way = n_way
self.var_rng = var_rng
self.var = var_rng
self.dim = dim
self.name = "gausian"
self.path = os.path.join("toy-gaussian", f"{n_way}-way", f"{k_shot}-shot", f"{test_shots}-testshot")
def sample_uniform(self) -> T:
x = torch.linspace(-3, 3, 100)
return torch.stack(torch.meshgrid(x, x), dim=-1).view(-1, self.dim)
def sample(self, N: torch.distributions.MultivariateNormal, variant: str = "uniform") -> Tuple[T, T]:
train, test = N.sample((self.k_shot,)).transpose(0, 1), N.sample((self.test_shots,)).transpose(0, 1)
return train, test
def gen_random_task(self) -> Tuple[T, T, T, T]:
# sample mus and sigmas uniformyl according to their range
mus = torch.rand((self.n_way, self.dim)) * (self.mu_rng[1] - self.mu_rng[0]) + self.mu_rng[0]
# decompose PSD sigma as O^TDO with orthogonal O's to make random PSD covariance
# https://stats.stackexchange.com/questions/2746/how-to-efficiently-generate-random-positive-semidefinite-correlation-matrices
O = torch.rand((self.n_way, self.dim, self.dim)) * 2 - 1
O = torch.qr(O)[0]
D = torch.stack([torch.eye(self.dim) * torch.rand(self.dim) for i in range(self.n_way)])
# make the eigenvectors be different lengths in order to make the direction elliptical ratio of 5:1
tmp = (torch.rand((self.n_way, self.dim)) * (self.var_rng[1] - self.var_rng[0]) + self.var_rng[0]).unsqueeze(1)
tmp[:, :, 1] = tmp[:, :, 0] / 5
D = D * tmp
sigmas = O.transpose(1, 2).bmm(D.bmm(O))
N = torch.distributions.MultivariateNormal(mus, sigmas)
labels = torch.randperm(self.n_way)
train_x, test_x = self.sample(N)
mu, sigma = train_x.mean(dim=(0, 1)), train_x.std(dim=(0, 1))
train_x = (train_x - mu) / sigma
test_x = (test_x - mu) / sigma
train_y = labels.unsqueeze(-1).repeat(1, self.k_shot)
test_y = labels.unsqueeze(-1).repeat(1, self.test_shots)
train_x, train_y, test_x, test_y = train_x.reshape(-1, self.dim).numpy(), train_y.reshape(-1).numpy(), test_x.reshape(-1, self.dim).numpy(), test_y.reshape(-1).numpy()
x, y = np.concatenate((train_x, test_x)), np.concatenate((train_y, test_y))
assert x.shape[0] % 2 == 0, f"x needs to be evenly divisible by 2 (got shape {x.shape}) for the toy Gaussian, if not you have to fix 'get biased sample function'"
sx, sy, qx, qy = get_biased_sample_idx(x, y, self.k_shot)
return torch.from_numpy(sx).float(), torch.from_numpy(sy).long(), torch.from_numpy(qx).float(), torch.from_numpy(qy).long()
def __getitem__(self, i: int) -> Tuple[T, T, T, T]:
return self.gen_random_task()
colors = [
"tab:blue", "tab:orange", "tab:green", "tab:red", "tab:purple",
"tab:brown", "tab:pink", "tab:gray", "tab:olive", "tab:cyan",
"mediumseagreen", "teal", "navy", "darkgoldenrod", "darkslateblue",
]
def get_color(i: int) -> Tuple[float, ...]:
if i < len(colors):
return to_rgba(colors[i]) # type: ignore
return (np.random.rand(), np.random.rand(), np.random.rand(), 1.0)
BATCH_SIZE = 3
SEED = 1
if __name__ == "__main__":
ds: Any
do_plots = ["moons", "circles", "gaussian"]
if "moons" in do_plots:
ds = MetaMoons(seed=SEED)
fig, axes = plt.subplots(nrows=1, ncols=BATCH_SIZE, figsize=(BATCH_SIZE * 7, 6))
for i, ax in enumerate(axes):
xtr, ytr, xte, yte = ds[0]
# this sample will be form a different task, but we are only taking the uniform noise so it is ok
ax.scatter(xtr[:, 0], xtr[:, 1], c=[get_color(v.item()) for v in ytr], s=50, edgecolors=(0, 0, 0, 0.5), linewidths=2.0)
ax.scatter(xte[:, 0], xte[:, 1], c=[get_color(v.item()) for v in yte], marker='*', s=20)
ax.set_title(f"task: {i}")
if i == BATCH_SIZE - 1:
legend_elements = [
Line2D([0], [0], marker='o', color='w', label='train', markerfacecolor='black', markersize=10),
Line2D([0], [0], marker='*', color='w', label='test', markerfacecolor='black', markersize=10),
]
ax.legend(handles=legend_elements)
path = os.path.join("data", "examples", "toy-moons")
os.makedirs(path, exist_ok=True)
fig.tight_layout()
fig.savefig(os.path.join(path, "metatrain-example.pdf"))
fig.savefig(os.path.join(path, "metatrain-example.png"))
if "circles" in do_plots:
ds = MetaCircles(seed=SEED)
fig, axes = plt.subplots(nrows=1, ncols=BATCH_SIZE, figsize=(BATCH_SIZE * 7, 6))
for i, ax in enumerate(axes):
xtr, ytr, xte, yte = ds[0]
# this sample will be form a different task, but we are only taking the uniform noise so it is ok
ax.scatter(xtr[:, 0], xtr[:, 1], c=[get_color(v.item()) for v in ytr], s=50, edgecolors=(0, 0, 0, 0.5), linewidths=2.0)
ax.scatter(xte[:, 0], xte[:, 1], c=[get_color(v.item()) for v in yte], marker='*', s=20)
ax.set_title(f"task: {i}")
if i == BATCH_SIZE - 1:
legend_elements = [
Line2D([0], [0], marker='o', color='w', label='train', markerfacecolor='black', markersize=10),
Line2D([0], [0], marker='*', color='w', label='test', markerfacecolor='black', markersize=10),
]
ax.legend(handles=legend_elements)
path = os.path.join("data", "examples", "toy-circles")
os.makedirs(path, exist_ok=True)
fig.tight_layout()
fig.savefig(os.path.join(path, "metatrain-example.pdf"))
fig.savefig(os.path.join(path, "metatrain-example.png"))
if "gaussian" in do_plots:
# RANDOM GAUSSIANS
ds = RandomGaussians(seed=SEED, k_shot=5, test_shots=15)
fig, axes = plt.subplots(nrows=1, ncols=BATCH_SIZE, figsize=(BATCH_SIZE * 7, 6))
for i, ax in enumerate(axes):
xtr, ytr, xte, yte = ds[0]
ax.scatter(xtr[:, 0], xtr[:, 1], c=[get_color(v.item()) for v in ytr], s=50, edgecolors=(0, 0, 0, 0.5), linewidths=2.0)
ax.scatter(xte[:, 0], xte[:, 1], c=[get_color(v.item()) for v in yte], marker='*', s=20)
ax.set_title(f"task: {i}")
if i == BATCH_SIZE - 1:
legend_elements = [
Line2D([0], [0], marker='o', color='w', label='train', markerfacecolor='black', markersize=10),
Line2D([0], [0], marker='*', color='w', label='test', markerfacecolor='black', markersize=10),
]
ax.legend(handles=legend_elements)
path = os.path.join("data", "examples", "toy-gaussian")
os.makedirs(path, exist_ok=True)
fig.tight_layout()
fig.savefig(os.path.join(path, "metatrain-example.pdf"))
fig.savefig(os.path.join(path, "metatrain-example.png"))
```
#### File: jeffwillette/few_shot_meta_learning/_utils.py
```python
import csv
import itertools
import os
import random
import typing
import numpy as np
import torch
def list_dir(root: str, prefix: bool = False) -> typing.List[str]:
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = [p for p in os.listdir(root) if os.path.isdir(os.path.join(root, p))]
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root: str, suffix: str, prefix: bool = False) -> typing.List[str]:
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = [p for p in os.listdir(root) if os.path.isfile(os.path.join(root, p)) and p.endswith(suffix)]
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
def train_val_split(X: typing.List[typing.List[np.ndarray]], k_shot: int, shuffle: bool = True) -> typing.Tuple[np.ndarray, typing.List[int], np.ndarray, typing.List[int]]:
"""Split data into train and validation
Args:
X: a list of sub-list of numpy array.
Each sub-list consists of data belonging to the same class
k_shot: number of training data per class
shuffle: shuffle data before splitting
Returns:
"""
# get information of image size
nc, iH, iW = X[0][0].shape
v_shot = len(X[0]) - k_shot
num_classes = len(X)
x_t = np.empty(shape=(num_classes, k_shot, nc, iH, iW))
x_v = np.empty(shape=(num_classes, v_shot, nc, iH, iW))
y_t = [0] * num_classes * k_shot
y_v = [0] * num_classes * v_shot
for cls_id in range(num_classes):
if shuffle:
random.shuffle(x=X[cls_id]) # in-place shuffle data within the same class
x_t[cls_id, :, :, :, :] = np.array(X[cls_id][:k_shot])
x_v[cls_id, :, :, :, :] = np.array(X[cls_id][k_shot:])
y_t[k_shot * cls_id: k_shot * (cls_id + 1)] = [cls_id] * k_shot
y_v[v_shot * cls_id: v_shot * (cls_id + 1)] = [cls_id] * v_shot
x_t = np.concatenate(x_t, axis=0)
x_v = np.concatenate(x_v, axis=0)
return x_t, y_t, x_v, y_v
def get_episodes(episode_file_path: typing.Optional[str] = None, num_episodes: int = 100) -> typing.List[str]:
"""Get episodes from a file
Args:
episode_file_path:
num_episodes: dummy variable in training to create an infinite
episode (str) generator. In testing, it defines how many
episodes to evaluate
Return: an episode (str) generator
"""
# get episode list if not None
if episode_file_path is not None:
episodes = []
with open(file=episode_file_path, mode='r') as f_csv:
csv_rd = csv.reader(f_csv, delimiter=',')
episodes = list(csv_rd)
else:
episodes = [None] * num_episodes
return episodes
def _weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
if m.weight is not None:
torch.nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
torch.nn.init.zeros_(m.bias.data)
elif classname.find('BatchNorm') != -1:
if m.weight is not None:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0)
def euclidean_distance(matrixN: torch.Tensor, matrixM: torch.Tensor) -> torch.Tensor:
"""Calculate Euclidean distance from N points to M points
Args:
matrixN: an N x D matrix for N points
matrixM: a M x D matrix for M points
Returns: N x M matrix
"""
N = matrixN.size(0)
M = matrixM.size(0)
D = matrixN.size(1)
assert D == matrixM.size(1)
matrixN = matrixN.unsqueeze(1).expand(N, M, D)
matrixM = matrixM.unsqueeze(0).expand(N, M, D)
return torch.norm(input=matrixN - matrixM, p='fro', dim=2)
def get_cls_prototypes(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""Calculate the prototypes/centroids
Args:
x: input data
y: corresponding labels
Returns: a tensor of prototypes with shape (C, d),
where C is the number of classes, d is the embedding dimension
"""
_, d = x.shape
cls_idx = torch.unique(input=y, return_counts=False)
C = cls_idx.shape[0]
prototypes = torch.empty(size=(C, d), device=x.device)
for c in range(C):
prototypes[c, :] = torch.mean(input=x[y == cls_idx[c]], dim=0)
return prototypes
def kl_divergence_gaussians(p: typing.List[torch.Tensor], q: typing.List[torch.Tensor]) -> torch.Tensor:
"""Calculate KL divergence between 2 diagonal Gaussian
Args: each paramter is list with 1st half as mean, and the 2nd half is log_std
Returns: KL divergence
"""
assert len(p) == len(q)
n = len(p) // 2
kl_div = 0
for i in range(n):
p_mean = p[i]
p_log_std = p[n + i]
q_mean = q[i]
q_log_std = q[n + i]
s1_vec = torch.exp(input=2 * q_log_std)
mahalanobis = torch.sum(input=torch.square(input=p_mean - q_mean) / s1_vec)
tr_s1inv_s0 = torch.sum(input=torch.exp(input=2 * (p_log_std - q_log_std)))
log_det = 2 * torch.sum(input=q_log_std - p_log_std)
kl_div_temp = mahalanobis + tr_s1inv_s0 + log_det - torch.numel(p_mean)
kl_div_temp = kl_div_temp / 2
kl_div = kl_div + kl_div_temp
return kl_div
def intialize_parameters(state_dict: dict) -> typing.List[torch.Tensor]:
""""""
p = list(state_dict.values())
for m in p:
if m.ndim > 1:
torch.nn.init.kaiming_normal_(tensor=m, nonlinearity='relu')
else:
torch.nn.init.zeros_(tensor=m)
return p
class IdentityNet(torch.nn.Module):
"""Identity hyper-net class for MAML"""
def __init__(self, base_net: torch.nn.Module) -> None:
super(IdentityNet, self).__init__()
base_state_dict = base_net.state_dict()
params = intialize_parameters(state_dict=base_state_dict)
self.params = torch.nn.ParameterList([torch.nn.Parameter(p) \
for p in params])
self.identity = torch.nn.Identity()
def forward(self) -> typing.List[torch.Tensor]:
out = []
for param in self.params:
temp = self.identity(param)
out.append(temp)
return out
class NormalVariationalNet(torch.nn.Module):
"""A simple neural network that simulate the
reparameterization trick. Its parameters are
the mean and std-vector
"""
def __init__(self, base_net: torch.nn.Module) -> None:
"""
Args:
base_net: the base network
"""
super(NormalVariationalNet, self).__init__()
# dict of parameters of based network
base_state_dict = base_net.state_dict()
mean = intialize_parameters(state_dict=base_state_dict)
# initialize parameters
self.mean = torch.nn.ParameterList([torch.nn.Parameter(m) \
for m in mean])
self.log_std = torch.nn.ParameterList([torch.nn.Parameter(torch.rand_like(v) - 4) \
for v in base_state_dict.values()])
self.num_base_params = np.sum([torch.numel(p) for p in self.mean])
def forward(self) -> typing.List[torch.Tensor]:
"""Output the parameters of the base network in list format to pass into higher monkeypatch
"""
out = []
for m, log_s in zip(self.mean, self.log_std):
eps_normal = torch.randn_like(m, device=m.device)
temp = m + eps_normal * torch.exp(input=log_s)
out.append(temp)
return out
```
|
{
"source": "jeff-wishnie/ion-python",
"score": 2
}
|
#### File: amazon/ion/reader_binary.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from datetime import timedelta
from decimal import Decimal, localcontext
from functools import partial
from io import BytesIO
from struct import unpack
from .core import ION_STREAM_INCOMPLETE_EVENT, ION_STREAM_END_EVENT, ION_VERSION_MARKER_EVENT,\
IonEventType, IonType, IonEvent, IonThunkEvent, Transition, \
TimestampPrecision, Timestamp, OffsetTZInfo
from .exceptions import IonException
from .util import coroutine, record, Enum
from .reader import reader_trampoline, BufferQueue, ReadEventType
from .symbols import SYMBOL_ZERO_TOKEN, SymbolToken
class _TypeID(Enum):
"""Type IDs in the binary encoding which is distinct from the :class:`IonType` enum."""
NULL = 0
BOOL = 1
POS_INT = 2
NEG_INT = 3
FLOAT = 4
DECIMAL = 5
TIMESTAMP = 6
SYMBOL = 7
STRING = 8
CLOB = 9
BLOB = 10
LIST = 11
SEXP = 12
STRUCT = 13
ANNOTATION = 14
# Mappings from type code to value type.
_TID_VALUE_TYPE_TABLE = (
IonType.NULL,
IonType.BOOL,
IonType.INT, # Positive integer
IonType.INT, # Negative integer
IonType.FLOAT,
IonType.DECIMAL,
IonType.TIMESTAMP,
IonType.SYMBOL,
IonType.STRING,
IonType.CLOB,
IonType.BLOB,
IonType.LIST,
IonType.SEXP,
IonType.STRUCT,
None, # Annotations do not have an Ion type.
)
# Streams are infinite.
_STREAM_REMAINING = Decimal('Inf')
_VAR_INT_VALUE_MASK = 0b01111111
_VAR_INT_VALUE_BITS = 7
_VAR_INT_SIGN_MASK = 0b01000000
_VAR_INT_SIGN_VALUE_MASK = 0b00111111
_VAR_INT_SIGNAL_MASK = 0b10000000
_SIGNED_INT_SIGN_MASK = 0b10000000
_SIGNED_INT_SIGN_VALUE_MASK = 0b01111111
_LENGTH_LN_MAX = 0xD
_LENGTH_FIELD_FOLLOWS = 0xE
_ALL_LENGTH_LNS = tuple(range(0, _LENGTH_FIELD_FOLLOWS + 1))
_NON_ZERO_LENGTH_LNS = tuple(range(1, _LENGTH_FIELD_FOLLOWS + 1))
_ANNOTATION_LENGTH_LNS = tuple(range(3, _LENGTH_FIELD_FOLLOWS + 1))
_IVM_START_OCTET = 0xE0
_IVM_TAIL = b'\x01\x00\xEA'
_IVM_TAIL_LEN = len(_IVM_TAIL)
# Type IDs for value types that are nullable.
_NULLABLE_TIDS = tuple(range(0, 14))
_NULL_LN = 0xF
_STATIC_SCALARS = (
# Boolean
(0x10, IonType.BOOL, False),
(0x11, IonType.BOOL, True),
# Zero-values
(0x20, IonType.INT, 0),
(0x40, IonType.FLOAT, 0.0),
(0x50, IonType.DECIMAL, Decimal()),
(0x70, IonType.SYMBOL, SYMBOL_ZERO_TOKEN),
# Empty string/clob/blob
(0x80, IonType.STRING, u''),
(0x90, IonType.CLOB, b''),
(0xA0, IonType.BLOB, b''),
)
# Mapping of valid LNs and the struct.unpack format strings
_FLOAT_LN_TABLE = {
0x4: '>f',
0x8: '>d'
}
_CONTAINER_TIDS = (_TypeID.LIST, _TypeID.SEXP, _TypeID.STRUCT)
def _gen_type_octet(hn, ln):
"""Generates a type octet from a high nibble and low nibble."""
return (hn << 4) | ln
def _parse_var_int_components(buf, signed):
"""Parses a ``VarInt`` or ``VarUInt`` field from a file-like object."""
value = 0
sign = 1
while True:
ch = buf.read(1)
if ch == '':
raise IonException('Variable integer under-run')
octet = ord(ch)
if signed:
if octet & _VAR_INT_SIGN_MASK:
sign = -1
value = octet & _VAR_INT_SIGN_VALUE_MASK
signed = False
else:
value <<= _VAR_INT_VALUE_BITS
value |= octet & _VAR_INT_VALUE_MASK
if octet & _VAR_INT_SIGNAL_MASK:
break
return sign, value
def _parse_var_int(buf, signed):
sign, value = _parse_var_int_components(buf, signed)
return sign * value
def _parse_signed_int_components(buf):
"""Parses the remainder of a file-like object as a signed magnitude value.
Returns:
Returns a pair of the sign bit and the unsigned magnitude.
"""
sign_bit = 0
value = 0
first = True
while True:
ch = buf.read(1)
if ch == b'':
break
octet = ord(ch)
if first:
if octet & _SIGNED_INT_SIGN_MASK:
sign_bit = 1
value = octet & _SIGNED_INT_SIGN_VALUE_MASK
first = False
else:
value <<= 8
value |= octet
return sign_bit, value
def _parse_decimal(buf):
"""Parses the remainder of a file-like object as a decimal."""
from decimal import localcontext
exponent = _parse_var_int(buf, signed=True)
sign_bit, coefficient = _parse_signed_int_components(buf)
if coefficient == 0:
# Handle the zero cases--especially negative zero
value = Decimal((sign_bit, (0,), exponent))
else:
coefficient *= sign_bit and -1 or 1
with localcontext() as context:
# Adjusting precision for taking into account arbitrarily
# large/small numbers
context.prec = len(str(coefficient))
value = Decimal(coefficient).scaleb(exponent)
return value
def _parse_sid_iter(data):
"""Parses the given :class:`bytes` data as a list of :class:`SymbolToken`"""
limit = len(data)
buf = BytesIO(data)
while buf.tell() < limit:
sid = _parse_var_int(buf, signed=False)
yield SymbolToken(None, sid)
class _HandlerContext(record(
'position', 'limit', 'queue', 'field_name', 'annotations', 'depth', 'whence'
)):
"""A context for a handler co-routine.
Args:
position (int): The offset of the *start* of the data being parsed.
limit (Optional[int]): The logical offset that represents the *end* of the container.
queue (BufferQueue): The data source for the handler.
field_name (Optional[SymbolToken]): The token representing the field name for the handled
value.
annotations (Optional[Sequence[SymbolToken]]): The sequence of annotations tokens
for the value to be parsed.
depth (int): the depth of the parser.
whence (Coroutine): The reference to the co-routine that this handler should delegate
back to when the handler is logically done.
"""
@property
def remaining(self):
"""Determines how many bytes are remaining in the current context."""
if self.depth == 0:
return _STREAM_REMAINING
return self.limit - self.queue.position
def read_data_transition(self, length, whence=None,
skip=False, stream_event=ION_STREAM_INCOMPLETE_EVENT):
"""Returns an immediate event_transition to read a specified number of bytes."""
if whence is None:
whence = self.whence
return Transition(
None, _read_data_handler(length, whence, self, skip, stream_event)
)
def event_transition(self, event_cls, event_type,
ion_type=None, value=None, annotations=None, depth=None, whence=None):
"""Returns an ion event event_transition that yields to another co-routine.
If ``annotations`` is not specified, then the ``annotations`` are the annotations of this
context.
If ``depth`` is not specified, then the ``depth`` is depth of this context.
If ``whence`` is not specified, then ``whence`` is the whence of this context.
"""
if annotations is None:
annotations = self.annotations
if annotations is None:
annotations = ()
if not (event_type is IonEventType.CONTAINER_START) and \
annotations and (self.limit - self.queue.position) != 0:
# This value is contained in an annotation wrapper, from which its limit was inherited. It must have
# reached, but not surpassed, that limit.
raise IonException('Incorrect annotation wrapper length.')
if depth is None:
depth = self.depth
if whence is None:
whence = self.whence
return Transition(
event_cls(event_type, ion_type, value, self.field_name, annotations, depth),
whence
)
def immediate_transition(self, delegate=None):
"""Returns an immediate transition to another co-routine.
If ``delegate`` is not specified, then ``whence`` is the delegate.
"""
if delegate is None:
delegate = self.whence
return Transition(None, delegate)
def derive_container_context(self, length, add_depth=1):
new_limit = self.queue.position + length
return _HandlerContext(
self.position,
new_limit,
self.queue,
self.field_name,
self.annotations,
self.depth + add_depth,
self.whence
)
def derive_child_context(self, position, field_name, annotations, whence):
return _HandlerContext(
position,
self.limit,
self.queue,
field_name,
annotations,
self.depth,
whence
)
#
# Handler Co-routine Factories
#
def _create_delegate_handler(delegate):
"""Creates a handler function that creates a co-routine that can yield once with the given
positional arguments to the delegate as a transition.
Args:
delegate (Coroutine): The co-routine to delegate to.
Returns:
A :class:`callable` handler that returns a co-routine that ignores the data it receives
and sends with the arguments given to the handler as a :class:`Transition`.
"""
@coroutine
def handler(*args):
yield
yield delegate.send(Transition(args, delegate))
return handler
@coroutine
def _read_data_handler(length, whence, ctx, skip=False, stream_event=ION_STREAM_INCOMPLETE_EVENT):
"""Creates a co-routine for retrieving data up to a requested size.
Args:
length (int): The minimum length requested.
whence (Coroutine): The co-routine to return to after the data is satisfied.
ctx (_HandlerContext): The context for the read.
skip (Optional[bool]): Whether the requested number of bytes should be skipped.
stream_event (Optional[IonEvent]): The stream event to return if no bytes are read or
available.
"""
trans = None
queue = ctx.queue
if length > ctx.remaining:
raise IonException('Length overrun: %d bytes, %d remaining' % (length, ctx.remaining))
# Make sure to check the queue first.
queue_len = len(queue)
if queue_len > 0:
# Any data available means we can only be incomplete.
stream_event = ION_STREAM_INCOMPLETE_EVENT
length -= queue_len
if skip:
# For skipping we need to consume any remnant in the buffer queue.
if length >= 0:
queue.skip(queue_len)
else:
queue.skip(queue_len + length)
while True:
data_event, self = (yield trans)
if data_event is not None and data_event.data is not None:
data = data_event.data
data_len = len(data)
if data_len > 0:
# We got something so we can only be incomplete.
stream_event = ION_STREAM_INCOMPLETE_EVENT
length -= data_len
if not skip:
queue.extend(data)
else:
pos_adjustment = data_len
if length < 0:
pos_adjustment += length
# More data than we need to skip, so make sure to accumulate that remnant.
queue.extend(data[length:])
queue.position += pos_adjustment
if length <= 0:
# We got all the data we need, go back immediately
yield Transition(None, whence)
trans = Transition(stream_event, self)
@coroutine
def _invalid_handler(type_octet, ctx):
"""Placeholder co-routine for invalid type codes."""
yield
raise IonException('Invalid type octet: 0x%02X' % type_octet)
@coroutine
def _var_uint_field_handler(handler, ctx):
"""Handler co-routine for variable unsigned integer fields that.
Invokes the given ``handler`` function with the read field and context,
then immediately yields to the resulting co-routine.
"""
_, self = yield
queue = ctx.queue
value = 0
while True:
if len(queue) == 0:
# We don't know when the field ends, so read at least one byte.
yield ctx.read_data_transition(1, self)
octet = queue.read_byte()
value <<= _VAR_INT_VALUE_BITS
value |= octet & _VAR_INT_VALUE_MASK
if octet & _VAR_INT_SIGNAL_MASK:
break
yield ctx.immediate_transition(handler(value, ctx))
@coroutine
def _ivm_handler(ctx):
_, self = yield
if ctx.depth != 0:
raise IonException('IVM encountered below top-level')
yield ctx.read_data_transition(_IVM_TAIL_LEN, self)
ivm_tail = ctx.queue.read(_IVM_TAIL_LEN)
if _IVM_TAIL != ivm_tail:
raise IonException('Invalid IVM tail: %r' % ivm_tail)
yield Transition(ION_VERSION_MARKER_EVENT, ctx.whence)
@coroutine
def _nop_pad_handler(ion_type, length, ctx):
yield
if ctx.field_name is not None and ctx.field_name != SYMBOL_ZERO_TOKEN:
raise IonException(
'Cannot have NOP pad with non-zero symbol field, field SID %d' % ctx.field_name)
if length > 0:
yield ctx.read_data_transition(length, ctx.whence, skip=True)
# Nothing to skip, so we just go back from whence we came...
yield ctx.immediate_transition()
@coroutine
def _static_scalar_handler(ion_type, value, ctx):
yield
yield ctx.event_transition(IonEvent, IonEventType.SCALAR, ion_type, value)
@coroutine
def _length_scalar_handler(scalar_factory, ion_type, length, ctx):
"""Handles scalars, ``scalar_factory`` is a function that returns a value or thunk."""
_, self = yield
if length == 0:
data = b''
else:
yield ctx.read_data_transition(length, self)
data = ctx.queue.read(length)
scalar = scalar_factory(data)
event_cls = IonEvent
if callable(scalar):
# TODO Wrap the exception to get context position.
event_cls = IonThunkEvent
yield ctx.event_transition(event_cls, IonEventType.SCALAR, ion_type, scalar)
@coroutine
def _start_type_handler(field_name, whence, ctx, expects_ivm=False, at_top=False, annotations=None):
_, self = yield
child_position = ctx.queue.position
# Read type byte.
if at_top:
incomplete_event = ION_STREAM_END_EVENT
else:
incomplete_event = ION_STREAM_INCOMPLETE_EVENT
yield ctx.read_data_transition(1, self, stream_event=incomplete_event)
type_octet = ctx.queue.read_byte()
if expects_ivm and type_octet != _IVM_START_OCTET:
raise IonException(
'Expected binary version marker, got: %02X' % type_octet)
handler = _HANDLER_DISPATCH_TABLE[type_octet]
child_ctx = ctx.derive_child_context(child_position, field_name, annotations, whence)
yield ctx.immediate_transition(handler(child_ctx))
@coroutine
def _annotation_handler(ion_type, length, ctx):
"""Handles annotations. ``ion_type`` is ignored."""
_, self = yield
self_handler = _create_delegate_handler(self)
if ctx.annotations is not None:
raise IonException('Annotation cannot be nested in annotations')
# We have to replace our context for annotations specifically to encapsulate the limit
ctx = ctx.derive_container_context(length, add_depth=0)
# Immediately read the length field and the annotations
(ann_length, _), _ = yield ctx.immediate_transition(
_var_uint_field_handler(self_handler, ctx)
)
if ann_length < 1:
raise IonException('Invalid annotation length subfield; annotation wrapper must have at least one annotation.')
# Read/parse the annotations.
yield ctx.read_data_transition(ann_length, self)
ann_data = ctx.queue.read(ann_length)
annotations = tuple(_parse_sid_iter(ann_data))
if ctx.limit - ctx.queue.position < 1:
# There is no space left for the 'value' subfield, which is required.
raise IonException('Incorrect annotation wrapper length.')
# Go parse the start of the value but go back to the real parent container.
yield ctx.immediate_transition(
_start_type_handler(ctx.field_name, ctx.whence, ctx, annotations=annotations)
)
@coroutine
def _ordered_struct_start_handler(handler, ctx):
"""Handles the special case of ordered structs, specified by the type ID 0xD1.
This coroutine's only purpose is to ensure that the struct in question declares at least one field name/value pair,
as required by the spec.
"""
_, self = yield
self_handler = _create_delegate_handler(self)
(length, _), _ = yield ctx.immediate_transition(
_var_uint_field_handler(self_handler, ctx)
)
if length < 2:
# A valid field name/value pair is at least two octets: one for the field name SID and one for the value.
raise IonException('Ordered structs (type ID 0xD1) must have at least one field name/value pair.')
yield ctx.immediate_transition(handler(length, ctx))
@coroutine
def _container_start_handler(ion_type, length, ctx):
"""Handles container delegation."""
_, self = yield
container_ctx = ctx.derive_container_context(length)
if ctx.annotations and ctx.limit != container_ctx.limit:
# 'ctx' is the annotation wrapper context. `container_ctx` represents the wrapper's 'value' subfield. Their
# limits must match.
raise IonException('Incorrect annotation wrapper length.')
delegate = _container_handler(ion_type, container_ctx)
# We start the container, and transition to the new container processor.
yield ctx.event_transition(
IonEvent, IonEventType.CONTAINER_START, ion_type, value=None, whence=delegate
)
@coroutine
def _container_handler(ion_type, ctx):
"""Handler for the body of a container (or the top-level stream).
Args:
ion_type (Optional[IonType]): The type of the container or ``None`` for the top-level.
ctx (_HandlerContext): The context for the container.
"""
transition = None
first = True
at_top = ctx.depth == 0
while True:
data_event, self = (yield transition)
if data_event is not None and data_event.type is ReadEventType.SKIP:
yield ctx.read_data_transition(ctx.remaining, self, skip=True)
if ctx.queue.position == ctx.limit:
# We are at the end of the container.
# Yield the close event and go to enclosing container.
yield Transition(
IonEvent(IonEventType.CONTAINER_END, ion_type, depth=ctx.depth-1),
ctx.whence
)
if ion_type is IonType.STRUCT:
# Read the field name.
self_handler = _create_delegate_handler(self)
(field_sid, _), _ = yield ctx.immediate_transition(
_var_uint_field_handler(self_handler, ctx)
)
field_name = SymbolToken(None, field_sid)
else:
field_name = None
expects_ivm = first and at_top
transition = ctx.immediate_transition(
_start_type_handler(field_name, self, ctx, expects_ivm, at_top=at_top)
)
first = False
#
# Scalar Factories
#
def _rslice(data, rem, size):
start = -rem
end = start + size
if end >= 0:
end = None
return data[slice(start, end)]
def _int_factory(sign, data):
def parse_int():
value = 0
length = len(data)
while length >= 8:
segment = _rslice(data, length, 8)
value <<= 64
value |= unpack('>Q', segment)[0]
length -= 8
if length >= 4:
segment = _rslice(data, length, 4)
value <<= 32
value |= unpack('>I', segment)[0]
length -= 4
if length >= 2:
segment = _rslice(data, length, 2)
value <<= 16
value |= unpack('>H', segment)[0]
length -= 2
if length == 1:
value <<= 8
value |= six.indexbytes(data, -length)
return sign * value
return parse_int
def _float_factory(data):
fmt = _FLOAT_LN_TABLE.get(len(data))
if fmt is None:
raise ValueError('Invalid data length for float: %d' % len(data))
return lambda: unpack(fmt, data)[0]
def _decimal_factory(data):
def parse_decimal():
return _parse_decimal(BytesIO(data))
return parse_decimal
def _timestamp_factory(data):
def parse_timestamp():
end = len(data)
buf = BytesIO(data)
precision = TimestampPrecision.YEAR
off_sign, off_value = _parse_var_int_components(buf, signed=True)
off_value *= off_sign
if off_sign == -1 and off_value == 0:
# -00:00 (unknown UTC offset) is a naive datetime.
tz = None
else:
tz = OffsetTZInfo(timedelta(minutes=off_value))
year = _parse_var_int(buf, signed=False)
if buf.tell() == end:
month = 1
else:
month = _parse_var_int(buf, signed=False)
precision = TimestampPrecision.MONTH
if buf.tell() == end:
day = 1
else:
day = _parse_var_int(buf, signed=False)
precision = TimestampPrecision.DAY
if buf.tell() == end:
hour = 0
minute = 0
else:
hour = _parse_var_int(buf, signed=False)
minute = _parse_var_int(buf, signed=False)
precision = TimestampPrecision.MINUTE
if buf.tell() == end:
second = 0
else:
second = _parse_var_int(buf, signed=False)
precision = TimestampPrecision.SECOND
if buf.tell() == end:
fraction = None
else:
fraction = _parse_decimal(buf)
fraction_exponent = fraction.as_tuple().exponent
if fraction == 0 and fraction_exponent > -1:
# According to the spec, fractions with coefficients of zero and exponents >= zero are ignored.
fraction = None
return Timestamp.adjust_from_utc_fields(
year, month, day,
hour, minute, second, None,
tz,
precision=precision, fractional_precision=None, fractional_seconds=fraction
)
return parse_timestamp
def _symbol_factory(data):
parse_sid = _int_factory(1, data)
def parse_symbol():
sid = parse_sid()
return SymbolToken(None, sid)
return parse_symbol
def _string_factory(data):
return lambda: data.decode('utf-8')
def _lob_factory(data):
# Lobs are a trivial return of the byte data.
return data
#
# Binding Functions
#
# Handler table for type octet to handler co-routine.
_HANDLER_DISPATCH_TABLE = [None] * 256
def _bind_invalid_handlers():
"""Seeds the co-routine table with all invalid handlers."""
for type_octet in range(256):
_HANDLER_DISPATCH_TABLE[type_octet] = partial(_invalid_handler, type_octet)
def _bind_null_handlers():
for tid in _NULLABLE_TIDS:
type_octet = _gen_type_octet(tid, _NULL_LN)
ion_type = _TID_VALUE_TYPE_TABLE[tid]
_HANDLER_DISPATCH_TABLE[type_octet] = partial(_static_scalar_handler, ion_type, None)
def _bind_static_scalar_handlers():
for type_octet, ion_type, value in _STATIC_SCALARS:
_HANDLER_DISPATCH_TABLE[type_octet] = partial(_static_scalar_handler, ion_type, value)
def _bind_length_handlers(tids, user_handler, lns):
"""Binds a set of handlers with the given factory.
Args:
tids (Sequence[int]): The Type IDs to bind to.
user_handler (Callable): A function that takes as its parameters
:class:`IonType`, ``length``, and the ``ctx`` context
returning a co-routine.
lns (Sequence[int]): The low-nibble lengths to bind to.
"""
for tid in tids:
for ln in lns:
type_octet = _gen_type_octet(tid, ln)
ion_type = _TID_VALUE_TYPE_TABLE[tid]
if ln == 1 and ion_type is IonType.STRUCT:
handler = partial(_ordered_struct_start_handler, partial(user_handler, ion_type))
elif ln < _LENGTH_FIELD_FOLLOWS:
# Directly partially bind length.
handler = partial(user_handler, ion_type, ln)
else:
# Delegate to length field parsing first.
handler = partial(_var_uint_field_handler, partial(user_handler, ion_type))
_HANDLER_DISPATCH_TABLE[type_octet] = handler
def _bind_length_scalar_handlers(tids, scalar_factory, lns=_NON_ZERO_LENGTH_LNS):
"""Binds a set of scalar handlers for an inclusive range of low-nibble values.
Args:
tids (Sequence[int]): The Type IDs to bind to.
scalar_factory (Callable): The factory for the scalar parsing function.
This function can itself return a function representing a thunk to defer the
scalar parsing or a direct value.
lns (Sequence[int]): The low-nibble lengths to bind to.
"""
handler = partial(_length_scalar_handler, scalar_factory)
return _bind_length_handlers(tids, handler, lns)
# First seed all type byte handlers with invalid.
_bind_invalid_handlers()
# Populate the actual handlers.
_HANDLER_DISPATCH_TABLE[_IVM_START_OCTET] = _ivm_handler
_bind_null_handlers()
_bind_static_scalar_handlers()
_bind_length_scalar_handlers([_TypeID.POS_INT], partial(_int_factory, 1))
_bind_length_scalar_handlers([_TypeID.NEG_INT], partial(_int_factory, -1))
_bind_length_scalar_handlers([_TypeID.FLOAT], _float_factory, lns=_FLOAT_LN_TABLE.keys())
_bind_length_scalar_handlers([_TypeID.DECIMAL], _decimal_factory)
_bind_length_scalar_handlers([_TypeID.TIMESTAMP], _timestamp_factory)
_bind_length_scalar_handlers([_TypeID.STRING], _string_factory)
_bind_length_scalar_handlers([_TypeID.SYMBOL], _symbol_factory)
_bind_length_scalar_handlers([_TypeID.CLOB, _TypeID.BLOB], _lob_factory)
_bind_length_handlers(_CONTAINER_TIDS, _container_start_handler, _ALL_LENGTH_LNS)
_bind_length_handlers([_TypeID.ANNOTATION], _annotation_handler, _ANNOTATION_LENGTH_LNS)
_bind_length_handlers([_TypeID.NULL], _nop_pad_handler, _ALL_LENGTH_LNS)
# Make immutable.
_HANDLER_DISPATCH_TABLE = tuple(_HANDLER_DISPATCH_TABLE)
def raw_reader(queue=None):
"""Returns a raw binary reader co-routine.
Args:
queue (Optional[BufferQueue]): The buffer read data for parsing, if ``None`` a
new one will be created.
Yields:
IonEvent: parse events, will have an event type of ``INCOMPLETE`` if data is needed
in the middle of a value or ``STREAM_END`` if there is no data **and** the parser
is not in the middle of parsing a value.
Receives :class:`DataEvent`, with :class:`ReadEventType` of ``NEXT`` or ``SKIP``
to iterate over values, or ``DATA`` if the last event was a ``INCOMPLETE``
or ``STREAM_END`` event type.
``SKIP`` is only allowed within a container. A reader is *in* a container
when the ``CONTAINER_START`` event type is encountered and *not in* a container
when the ``CONTAINER_END`` event type for that container is encountered.
"""
if queue is None:
queue = BufferQueue()
ctx = _HandlerContext(
position=0,
limit=None,
queue=queue,
field_name=None,
annotations=None,
depth=0,
whence=None
)
return reader_trampoline(_container_handler(None, ctx))
binary_reader = raw_reader
```
#### File: ion-python/tests/test_decimal.py
```python
from decimal import Decimal
from amazon.ion.simpleion import dumps, loads
# regression test for https://github.com/amzn/ion-python/issues/132
def test_decimal_precision():
from decimal import localcontext
with localcontext() as ctx:
# ensure test executes with the default precision
# (see https://docs.python.org/3.7/library/decimal.html#decimal.DefaultContext):
ctx.prec = 28
# decimal with 29 digits
decimal = Decimal('1234567890123456789012345678.9')
assert decimal == loads(dumps(decimal))
assert decimal == loads(dumps(decimal, binary=False))
# negative decimal with 29 digits
decimal = Decimal('-1234567890123456789012345678.9')
assert decimal == loads(dumps(decimal))
assert decimal == loads(dumps(decimal, binary=False))
```
|
{
"source": "jeffwright13/faker",
"score": 2
}
|
#### File: address/de_CH/__init__.py
```python
from .. import Provider as AddressProvider
class Provider(AddressProvider):
city_formats = ('{{canton_name}}',)
building_number_formats = ('%', '%#', '%#', '%#', '%##')
street_suffixes = ['strasse']
street_name_formats = ('{{last_name}}{{street_suffix}}', )
street_address_formats = ('{{street_name}} {{building_number}}', )
address_formats = ('{{street_address}}\n{{postcode}} {{city}}', )
postcode_formats = ('1###', '2###', '3###', '4###', '5###', '6###', '7###',
'8###', '9###')
cantons = (('AG', 'Aargau'), ('AI', 'Appenzell Innerrhoden'),
('AR', 'Appenzell Ausserrhoden'), ('BE', 'Bern'),
('BL', 'Basel-Landschaft'), ('BS', 'Basel-Stadt'), ('FR', 'Freiburg'),
('GE', 'Genf'), ('GL', 'Glarus'), ('GR', 'Graubünden'), ('JU', 'Jura'),
('LU', 'Luzern'), ('NE', 'Neuenburg'), ('NW', 'Nidwalden'), ('OW', 'Obwalden'),
('SG', 'St. Gallen'), ('SH', 'Schaffhausen'), ('SO', 'Solothurn'),
('SZ', 'Schwyz'), ('TG', 'Thurgau'), ('TI', 'Tessin'), ('UR', 'Uri'),
('VD', 'Waadt'), ('VS', 'Wallis'), ('ZG', 'Zug'), ('ZH', 'Zürich'))
def canton(self):
"""
Randomly returns a swiss canton ('Abbreviated', 'Name').
:example ('ZH', 'Zürich')
"""
return self.random_element(self.cantons)
def administrative_unit(self):
"""
Randomly returns a Swiss canton name.
:example 'Zürich'
"""
return self.canton()[1]
canton_name = administrative_unit
def canton_code(self):
"""
Randomly returns a Swiss canton code.
:example 'ZH'
"""
return self.canton()[0]
```
#### File: company/ro_RO/__init__.py
```python
from .. import Provider as CompanyProvider
class Provider(CompanyProvider):
formats = (
'{{last_name}} {{company_suffix}}',
'{{last_name}} {{last_name}} {{company_suffix}}',
'{{last_name}}',
)
company_suffixes = (
'SRL', 'SA', 'SCA', 'SNC', 'SCS', 'AFJ', 'ASF', 'CON', 'CRL', 'INC',
'LOC', 'OC1', 'OC2', 'OC3', 'PFA', 'RA', 'SCS', 'SPI', 'URL',
)
def company_suffix(self):
return self.random_element(self.company_suffixes)
```
#### File: faker/sphinx/documentor.py
```python
import importlib
import inspect
import os
from pathlib import Path
from faker.config import AVAILABLE_LOCALES
from faker.config import PROVIDERS as STANDARD_PROVIDER_NAMES
from faker.providers import BaseProvider
if os.environ.get("READTHEDOCS", False):
version = os.environ["READTHEDOCS_VERSION"]
HOME = Path("/home/docs/checkouts/readthedocs.org/user_builds/faker/checkouts") / version
DOCS_ROOT = HOME / "docs"
else:
DOCS_ROOT = Path(__file__).resolve().parents[2] / 'docs'
SECTION_ADORNMENTS = '#*=-~'
PROVIDER_AUTODOC_TEMPLATE = """
.. autoclass:: {provider_class}
:members: {provider_methods}
:undoc-members:
:show-inheritance:
"""
BASE_PROVIDER_METHOD_NAMES = [
name for name, method in inspect.getmembers(BaseProvider, inspect.isfunction)
if not name.startswith('_')
]
def _get_provider_methods(provider_class):
try:
provider_module_name, obj_name = provider_class.rsplit('.', 1)
provider_module = importlib.import_module(provider_module_name)
provider = getattr(provider_module, obj_name, None)
except (ModuleNotFoundError, AttributeError):
return ''
else:
return ', '.join([
name for name, method in inspect.getmembers(provider, inspect.isfunction)
if not name.startswith('_') and name not in BASE_PROVIDER_METHOD_NAMES
])
def _get_localized_provider_info(locale):
info = []
for provider_name in STANDARD_PROVIDER_NAMES:
try:
locale_module_path = f'{provider_name}.{locale}'
locale_module = importlib.import_module(locale_module_path)
provider = getattr(locale_module, 'Provider')
except (ModuleNotFoundError, AttributeError):
continue
else:
provider_class = f'{provider.__module__}.Provider'
info.append((provider_class, provider_name))
return info
def _write(fh, s):
return fh.write(s.encode('utf-8'))
def _hide_edit_on_github(fh):
_write(fh, ':github_url: hide\n\n')
def _write_title(fh, title, level=1):
if not isinstance(level, int) or level < 1 or level > 5:
raise ValueError('`level` must be an integer from 1 to 5')
if level <= 2:
_write(fh, SECTION_ADORNMENTS[level - 1] * len(title))
_write(fh, '\n')
_write(fh, f'{title}\n')
_write(fh, SECTION_ADORNMENTS[level - 1] * len(title))
_write(fh, '\n\n')
def _write_includes(fh):
_write(fh, '.. include:: ../includes/substitutions.rst')
_write(fh, '\n\n')
def _write_standard_provider_index():
with (DOCS_ROOT / 'providers.rst').open('wb') as fh:
_hide_edit_on_github(fh)
_write_title(fh, 'Standard Providers')
_write(fh, '.. toctree::\n')
_write(fh, ' :maxdepth: 2\n\n')
_write(fh, ' providers/baseprovider\n')
for provider_name in STANDARD_PROVIDER_NAMES:
_write(fh, f' providers/{provider_name}\n')
def _write_base_provider_docs():
(DOCS_ROOT / 'providers').mkdir(parents=True, exist_ok=True)
with (DOCS_ROOT / 'providers' / 'baseprovider.rst').open('wb') as fh:
_hide_edit_on_github(fh)
_write_title(fh, '``faker.providers``')
_write_includes(fh)
_write(fh, PROVIDER_AUTODOC_TEMPLATE.format(
provider_class='faker.providers.BaseProvider',
provider_methods=','.join(BASE_PROVIDER_METHOD_NAMES),
))
def _write_standard_provider_docs():
(DOCS_ROOT / 'providers').mkdir(parents=True, exist_ok=True)
for provider_name in STANDARD_PROVIDER_NAMES:
with (DOCS_ROOT / 'providers' / f'{provider_name}.rst').open('wb') as fh:
provider_class = f'{provider_name}.Provider'
provider_methods = _get_provider_methods(provider_class)
_hide_edit_on_github(fh)
_write_title(fh, f'``{provider_name}``')
_write_includes(fh)
_write(fh, PROVIDER_AUTODOC_TEMPLATE.format(
provider_class=provider_class,
provider_methods=provider_methods,
))
def _write_localized_provider_index():
with(DOCS_ROOT / 'locales.rst').open('wb') as fh:
_hide_edit_on_github(fh)
_write_title(fh, 'Localized Providers')
_write(fh, '.. toctree::\n')
_write(fh, ' :maxdepth: 2\n\n')
for locale in AVAILABLE_LOCALES:
_write(fh, f' locales/{locale}\n')
def _write_localized_provider_docs():
(DOCS_ROOT / 'locales').mkdir(parents=True, exist_ok=True)
for locale in AVAILABLE_LOCALES:
info = _get_localized_provider_info(locale)
with (DOCS_ROOT / 'locales' / '{}.rst'.format(locale)).open('wb') as fh:
_hide_edit_on_github(fh)
_write_title(fh, f'Locale {locale}')
_write_includes(fh)
for provider_class, standard_provider_name in info:
provider_methods = _get_provider_methods(provider_class)
_write_title(fh, f'``{standard_provider_name}``', level=2)
_write(fh, PROVIDER_AUTODOC_TEMPLATE.format(
provider_class=provider_class,
provider_methods=provider_methods,
))
def write_provider_docs():
DOCS_ROOT.mkdir(parents=True, exist_ok=True)
_write_standard_provider_index()
_write_base_provider_docs()
_write_standard_provider_docs()
_write_localized_provider_index()
_write_localized_provider_docs()
```
|
{
"source": "jeffwright13/faker_music",
"score": 3
}
|
#### File: faker_music/faker_music/music.py
```python
from random import choice
from faker.providers import BaseProvider
from .genres import genre_list
from .instruments import instrument_list
class MusicProvider(BaseProvider):
"""
A Provider for music-related data.
Typical use:
>>> from faker import Faker
>>> from faker_music import MusicProvider
>>> fake = Faker()
>>> fake.add_provider(MusicProvider)
>>> fake.music_genre()
>>> fake.music_subgenre()
>>> etc...
"""
def music_genre_object(self):
"""
Returns a randomly-chosen genre dictionary. This is a
dictionary whose first key is 'genre', a string;
and whose second key is 'subgenres', a list of strings.
Example:
{
"genre": "Metal",
"subgenres": [
"Black Metal",
"Goth Metal",
"Thrash Metal"
]
}
"""
return choice(genre_list)
def music_genre(self):
"""
Returns a string representing a musical genre.
"""
my_choice = choice(genre_list)
return my_choice["genre"]
def music_subgenre(self):
"""
Returns a string representing a musical subgenre.
"""
while True:
my_choice = choice(genre_list)
if len(my_choice["subgenres"]) == 0:
continue
return choice(my_choice["subgenres"])
def music_instrument_object(self):
"""
Returns a randomly-chosen instrument dictionary. This is a
dictionary whose first key is 'category', a string;
and whose second key, "instruments", refers to a list of
instruments in that category.
Example:
{
"category": "electronics",
"instruments": [
"Drum machine",
"Electric piano",
"Synthesizer"
]
}
"""
return choice(instrument_list)
def music_instrument(self):
"""
Returns a musical instrument in string format.
"""
my_choice = choice(instrument_list)
return choice(my_choice["instruments"])
def music_instrument_category(self):
"""
Returns an instrument category in string format.
"""
my_choice = choice(instrument_list)
return my_choice["category"]
```
#### File: faker_music/tests/test_music.py
```python
def test_music_genre_object(fake):
test_genre_obj = fake.music_genre_object()
assert isinstance(test_genre_obj, dict)
assert "genre" in test_genre_obj.keys()
assert "subgenres" in test_genre_obj.keys()
def test_music_genre(fake):
test_genre = fake.music_genre()
assert isinstance(test_genre, str)
assert len(test_genre) > 0
def test_music_subgenre(fake):
test_subgenre = fake.music_subgenre()
assert isinstance(test_subgenre, str)
assert len(test_subgenre) > 0
def test_music_instrument_object(fake):
test_instrument_obj = fake.music_instrument_object()
assert isinstance(test_instrument_obj, dict)
assert "category" in test_instrument_obj.keys()
assert "instruments" in test_instrument_obj.keys()
def test_music_instrument(fake):
test_instrument = fake.music_instrument()
assert isinstance(test_instrument, str)
assert len(test_instrument) > 0
def test_music_instrument_category(fake):
test_instrument_category = fake.music_instrument_category()
assert isinstance(test_instrument_category, str)
assert len(test_instrument_category) > 0
```
|
{
"source": "jeffwright13/pytest-fold",
"score": 2
}
|
#### File: pytest-fold/pytest_fold/plugin.py
```python
import re
import pickle
import tempfile
import pytest
from _pytest.config import Config
from _pytest._io.terminalwriter import TerminalWriter
from _pytest.reports import TestReport
from pytest_fold.tui_pytermtk import main as tuitk
from pytest_fold.tui_textual1 import main as tuitxt1
from pytest_fold.tui_textual2 import main as tuitxt2
from pytest_fold.utils import (
test_session_starts_matcher,
errors_section_matcher,
failures_section_matcher,
warnings_summary_matcher,
passes_section_matcher,
short_test_summary_matcher,
lastline_matcher,
MARKERS,
REPORTFILE,
MARKEDTERMINALOUTPUTFILE,
UNMARKEDTERMINALOUTPUTFILE,
)
# Don't collect tests from any of these files
collect_ignore = [
"setup.py",
"plugin.py",
]
# A list of TestReport objects generated by Pytest during test run.
# Each TestReport represents a single test's operation during one of
# Pytest's three phases: setup | call | teardown
reports = []
def pytest_addoption(parser):
"""Define the plugin's option flags as presented by Pytest"""
group = parser.getgroup("fold")
group.addoption(
"--fold",
action="store_true",
help="fold failed test output sections",
)
group.addoption(
"--fold-tui",
"--ft",
action="store",
default="pytermtk",
help="specify user interface ('pytermtk' ' k' | 'textual1' 't1' | 'textual2' 't2' | 'none' 'n')",
choices=["pytermtk", "k", "textual1", "t1", "textual2", "t2", "none", "n"],
)
def pytest_report_teststatus(report: TestReport, config: Config):
"""Construct list(s) of individial TestReport instances"""
reports.append(report)
@pytest.hookimpl(trylast=True)
def pytest_configure(config: Config) -> None:
"""
Write console output to a file for use by TUI
This code works by looking at every line sent by Pytest to the terminal,
and based on its category, marking or not marking it
"""
config.option.verbose = (
1 # force verbose mode for easier parsing of final test results
)
config.option.reportchars = (
"A" # force "display all" mode so all results can be shown
)
if config.option.fold:
tr = config.pluginmanager.getplugin("terminalreporter")
if tr is not None:
# identify and mark the very first line of terminal output
try:
config._pyfoldfirsttime
except AttributeError:
config._pyfoldfirsttime = True
config._pyfold_unmarked_outputfile = tempfile.TemporaryFile("wb+")
config._pyfold_marked_outputfile = tempfile.TemporaryFile("wb+")
oldwrite = tr._tw.write
# identify and mark each results section
def tee_write(s, **kwargs):
if re.search(test_session_starts_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_test_session_starts"] + "\n").encode(
"utf-8"
)
)
if re.search(errors_section_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_errors_section"] + "\n").encode("utf-8")
)
if re.search(failures_section_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_failures_section"] + "\n").encode("utf-8")
)
if re.search(warnings_summary_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_warnings_summary"] + "\n").encode("utf-8")
)
if re.search(passes_section_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_passes_section"] + "\n").encode("utf-8")
)
if re.search(short_test_summary_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_short_test_summary"] + "\n").encode(
"utf-8"
)
)
if re.search(lastline_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_last_line"] + "\n").encode("utf-8")
)
# Write this line's text along with its markup info to console
oldwrite(s, **kwargs)
# Mark up this line's text by passing it to an instance of TerminalWriter's
# 'markup' method. Do not pass "flush" to the method or it will throw an error.
s1 = s
kwargs.pop("flush") if "flush" in kwargs.keys() else None
s1 = TerminalWriter().markup(s, **kwargs)
# Encode the marked up line so it can be written to the config object.
# The Pytest config object can be used by plugins for conveying staeful
# info across an entire test run session.
if isinstance(s1, str):
marked_up = s1.encode("utf-8")
config._pyfold_marked_outputfile.write(marked_up)
# Write this line's original (unmarked) text to unmarked file
s_orig = s
kwargs.pop("flush") if "flush" in kwargs.keys() else None
s_orig = TerminalWriter().markup(s, **kwargs)
if isinstance(s_orig, str):
unmarked_up = s_orig.encode("utf-8")
config._pyfold_unmarked_outputfile.write(unmarked_up)
# Write to both terminal/console and tempfiles:
# _pyfold_marked_outputfile, _pyfold_unmarked_outputfile
tr._tw.write = tee_write
def pytest_unconfigure(config: Config):
"""
Write terminal and test results info to files for use by TUI
"""
# Write terminal output to file
if hasattr(config, "_pyfold_marked_outputfile"):
# get terminal contents, then write file
config._pyfold_marked_outputfile.seek(0)
markedsessionlog = config._pyfold_marked_outputfile.read()
config._pyfold_marked_outputfile.close()
if hasattr(config, "_pyfold_unmarked_outputfile"):
# get terminal contents, then write file
config._pyfold_unmarked_outputfile.seek(0)
unmarkedsessionlog = config._pyfold_unmarked_outputfile.read()
config._pyfold_unmarked_outputfile.close()
# Undo our patching in the terminal reporter
config.pluginmanager.getplugin("terminalreporter")
# Write marked-up results to file
with open(MARKEDTERMINALOUTPUTFILE, "wb") as marked_file:
marked_file.write(markedsessionlog)
# Write un-marked-up results to file
with open(UNMARKEDTERMINALOUTPUTFILE, "wb") as unmarked_file:
unmarked_file.write(unmarkedsessionlog)
# Write the reports list to file
with open(REPORTFILE, "wb") as report_file:
pickle.dump(reports, report_file)
# Launch the TUI
if config.getoption("--fold") == True:
pyfold_tui(config)
def pyfold_tui(config: Config) -> None:
"""
Final code invocation after Pytest run has completed.
This method calls the Pyfold TUI to display final results.
"""
# disable capturing while TUI runs to avoid error `redirected stdin is pseudofile, has
# no fileno()`; adapted from https://githubmemory.com/repo/jsbueno/terminedia/issues/25
if not config.getoption("--fold"):
return
capmanager = config.pluginmanager.getplugin("capturemanager")
try:
capmanager.suspend_global_capture(in_=True)
finally:
if config.getoption("--ft") in ["k", "pytermtk"]:
tuitk()
elif config.getoption("--ft") in ["t1", "textual1"]:
tuitxt1()
elif config.getoption("--ft") in ["t2", "textual2"]:
tuitxt2()
elif config.getoption("--ft") not in ["n", "none"]:
print(f"Incorrect choice for fold-tui: {config.getoption('--ft')}")
capmanager.resume_global_capture()
```
#### File: pytest_fold/stash/tuit.py
```python
from pathlib import Path
from rich.console import RenderableType
from rich.text import Text
from textual import events
from textual.app import App
from textual.views import DockView
from textual.widgets import Header, TreeControl, ScrollView, TreeClick
from pytest_fold.utils import OUTFILE, sectionize, Results
TREE_WIDTH = 30
SECTIONS = {
"FIRSTLINE": "bold blue underline",
"FAILURES": "bold red underline",
"ERRORS": "bold magenta underline",
"WARNINGS_SUMMARY": "bold yellow underline",
"TERMINAL_SUMMARY": "bold green underline",
"LASTLINE": "bold blue underline",
}
class ResultsData:
"""
Class to read in results from a 'pytest --fold' session (which inserts markers
around each failed test), and sectionize the results into individual sections for
display on the TUI. Relies on utils.py.
"""
def __init__(self, path: Path = OUTFILE) -> None:
self.results_file = path
self.sections = []
self.parsed_sections = []
def _sectionize_results(self) -> None:
with open(self.results_file, "r") as results_file:
results_lines = results_file.readlines()
self.sections = sectionize(results_lines)
def get_results(self) -> list:
self._sectionize_results()
return self.sections
def get_results_dict(self) -> dict:
self.results = self.get_results()
d = {}
for section in self.results:
if section["test_title"]:
d[section["test_title"]] = section["content"]
else:
d[section["name"]] = section["content"]
return d
class FoldApp(App):
"""
Textual class inherited from App
Provides docking and data population for test session headers and results
"""
async def on_load(self, event: events.Load) -> None:
# Load results from OUTFILE; bind actions to heaader/footer widgets
self.results = ResultsData().get_results_dict()
self.summary_text = (
Text.from_ansi(self.results["LASTLINE"]).markup.replace("=", "").strip()
)
await self.bind("b", "view.toggle('sidebar')", "Toggle sidebar")
await self.bind("q", "quit", "Quit")
async def on_mount(self) -> None:
# Create and dock header and footer widgets
self.title = self.summary_text
header1 = Header(tall=False, style="white on black underline")
header2 = Header(tall=False, style="white on black", clock = False)
await self.view.dock(header1, edge="top", size=1)
await self.view.dock(header2, edge="bottom", size=1)
# Stylize the results-tree section headers
tree = TreeControl("SESSION RESULTS:", {})
for results_key in self.results.keys():
await tree.add(tree.root.id, Text(results_key), {"results": self.results})
for k, v in SECTIONS.items():
if tree.nodes[tree.id].label.plain == k:
tree.nodes[tree.id].label.stylize(v)
continue
else:
tree.nodes[tree.id].label.stylize("italic")
await tree.root.expand()
# Create and dock the results header tree, and individual results
self.body = ScrollView()
self.sections = DockView()
await self.view.dock(
ScrollView(tree), edge="left", size=TREE_WIDTH, name="sidebar"
)
await self.view.dock(self.sections)
await self.sections.dock(self.body, edge="top")
async def handle_tree_click(self, message: TreeClick[dict]) -> None:
# Display results in body when section header is clicked
label = message.node.label
self.text = message.node.data.get("results")[label._text[0]]
text: RenderableType
text = Text.from_ansi(self.text)
await self.body.update(text)
def main():
app = FoldApp()
app.run()
if __name__ == "__main__":
main()
```
#### File: jeffwright13/pytest-fold/setup.py
```python
import os
import codecs
from setuptools import setup, find_packages
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding="utf-8").read()
setup(
name="pytest-fold",
version="0.8.4",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
url="https://github.com/jeffwright13/pytest-fold",
description="Capture Pytest output and when test run is complete, drop user into interactive text user interface",
long_description=read("README.md"),
long_description_content_type="text/markdown",
# packages=["pytest_fold"],
packages=find_packages(),
py_modules=["pytest_fold"],
python_requires=">=3.8",
install_requires=[
"Faker>=13.0.0",
"pytest>=6.2.5",
"pyTermTk>=0.9.0a43",
"single-source>=0.2.0",
"strip-ansi>=0.1.1",
"textual>=0.1.17",
],
classifiers=[
"Framework :: Pytest",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Testing",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
],
keywords="pytest testing fold output logs fail pytermtk asciimatics textual single-source",
entry_points={
"pytest11": ["pytest_fold = pytest_fold.plugin"],
"console_scripts": [
"tuitxt = pytest_fold.tui_textual1:main",
"tuitxt2 = pytest_fold.tui_textual2:main",
"tuitk = pytest_fold.tui_pytermtk:main",
],
},
)
```
#### File: pytest-fold/tests/test_pytest_fold_2.py
```python
import pytest
import logging
import sys
LOG_LEVELS = ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL")
logger = logging.getLogger()
logger.setLevel(logging.NOTSET)
logger.propagate = True
stdout_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stdout_handler)
logging.getLogger("faker").setLevel(logging.ERROR)
@pytest.fixture
def error_fixture():
assert 0
def test_i_ok():
print("ok")
def test_ii_fail():
assert 0
def test_iii_error(error_fixture):
pass
def test_iv_skip():
pytest.skip("skipping this test")
def test_v_xfail():
pytest.xfail("xfailing this test")
def test_vi_fail_compare_dicts_for_pytest_icdiff():
listofStrings = ["Hello", "hi", "there", "at", "this"]
listofInts = [7, 10, 45, 23, 77]
assert len(listofStrings) == len(listofInts)
assert listofStrings == listofInts
@pytest.mark.xfail(reason="always xfail")
def test_vi_xpass():
pass
```
|
{
"source": "jeffxhansen/RCR",
"score": 3
}
|
#### File: jeffxhansen/RCR/testing.py
```python
import numpy as np
reds = np.array([[100, 110, 250]])
RED = np.array([100, 110, 250])
reds = np.array([RED])
pixel = np.array([70,90,250])
print(reds)
print(pixel)
reds = np.append(reds, [pixel], axis=0)
print(reds)
reds = np.average(reds, axis=0)
print(reds)
'''
a = np.array([1,1,1])
b = np.array([3,3,3])
c = np.empty((3))
print(c)
c = np.append([c],[a],axis=0)
c = np.append(c,[b],axis=0)
c = np.average(c,axis=0)
print(c)
'''
'''
from cube import Cube
cube = Cube()
algorithm = "U L R' x U2 y R' U2 R U2 F R x' U'"
print(algorithm)
print(cube.reverseAlgorithm(algorithm))
name = "Jeff"
name += " Hansen"
print(name)
def updateTranslation(rotation_command: str, prime: bool):
global translation
global patterns
oldTranslation = translation.copy()
increment = 1
if prime:
increment = -1
pattern = patterns[rotation_command]
index = pattern.index(translation[pattern[0]])
for i in range(len(pattern)):
curr = pattern[index]
trans = oldTranslation[pattern[(index+increment) %4]]
translation[curr] = trans
index = (index + 1) % 4
print(translation)
translation = {"L": "L", "R": "R", "B": "B",
"U": "U", "D": "D", "F": "F",
"x": "x", "y": "y", "z": "z"}
patterns = {"y": ["F", "R", "B", "L"],
"x": ["F", "U", "B", "D"],
"z": ["L", "U", "R", "D"]}
movements = "F R F R"
movements = movements.split(" ")
for movement in movements:
print(movement, end=" : ")
movement = translation[movement]
print(movement)
if movement == "F" or movement == "B":
updateTranslation("y", False)
import cv2 as cv
import numpy as np
print(cv.__version__)
img = cv.imread('Capture.png',1)
print(type(img[0, 0]))
height, width, colors = img.shape
print(height)
print(width)
print(colors)
square = height // 5
startPoint = (square*2+2, square*2+2)
endPoint = (square*3-2, square*3-2)
color = (255,50,50)
thickness = 2
gradientChange = 255 // 9
gradient = 0
centerSpace = square // 2
img2 = cv.imread("Capture.png", 1)
for i in range(1,4):
for j in range(1,4):
startx = (square*j) + centerSpace
endx = (square*j + 2) + centerSpace
starty = (square*i) + centerSpace
endy = (square*i + 2) + centerSpace
startPoint = (startx, starty)
endPoint = (endx, endy)
color = (gradient, gradient, gradient)
print(startPoint)
img2 = cv.rectangle(img2, startPoint, endPoint, color, thickness)
gradient += gradientChange
cv.imwrite("Edit.png", img2)
current = img[0, 0]
print(current)
print(current[0])
print(current[1])
print(current[2])
white = np.array([255, 255, 255])
print(type(white))
print(img[0][0])
#print(white == img[0][0])
truth = (white == current)
print("Truth: " + str(truth))
print("NPEqualCommand: " + str(np.array_equal(white, img[0][0])))
def inside(val, array):
for p in array:
if np.array_equal(p, val):
return True
return False
a = np.array([[1,1,1],[2,2,2],[3,3,3]])
b = np.array([2,2,2])
c = np.array([4,4,4])
print(inside(b,a))
print(inside(c, a))
current = np.copy(img[0][0])
pixels = np.zeros([1,3], dtype=int)
counter = 0
for row in img:
for pixel in row:
blackOrWhite = np.all(pixel == pixel[0])
if not blackOrWhite:
if not np.array_equal(pixel, current):
current = np.copy(pixel)
if (not inside(pixel, pixels)):
#print(pixel)
pixels = np.append(pixels, [pixel], axis=0)
counter += 1
if counter < 1:
break
for p in pixels:
print(p)
# Testing defaultClose and defaultOpen - accept cube
time.sleep(1)
robot.defaultClose()
time.sleep(1)
robot.defaultOpen()
# Testing acceptCube method
robot.acceptCube()
time.sleep(1)
robot.defaultOpen()
# robot.defaultOpen()
dictionary = {"a":1, "b":2, "c":3, "d":4}
print(2 in dictionary.values())
list = [1,2,3,4,5,6]
def updateList():
global list
copy = list.copy()
pivot = 1
for i in range(len(list)):
old = i
new = pivot
list[new] = copy[old]
pivot = (pivot + 1) % len(list)
for num in list:
print(num)
updateList()
lst = [1,2,3,4]
lst2 = [1,2,3,4]
print(lst == lst2)
print(0xfffff)
print(bin(0xfffff))
start = 0x00000
one = 0x00001
print(bin(start))
print(bin(one))
print(bin(start | one))
print(bin((start | one) << 1))
values = [0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,1,1,1]
start = 0x00000
for i in range(len(values)):
val = values[i]
if val == 1:
start = start | 0x00001
if i < len(values)-1:
start = start << 1
print(bin(start))
hexVersion = hex(int("00100010000001011111", 2))
print(hexVersion)
stringVersion = str(hexVersion)
print(stringVersion)
print("0x2205f" == stringVersion)
print(len("00100010000001011111"))
def listToString(lst: list):
returnString = ""
for item in lst:
returnString += str(item)
return returnString
print(listToString(values))
word1 = "1111"
word2 = "2223"
translation = {}
result = True
for i in range(len(word1)):
ch1 = word1[i]
ch2 = word2[i]
if ch1 not in translation:
if ch2 not in translation.values():
translation[ch1] = ch2
else:
result = False
break
if ch2 != translation[ch1]:
result = False
break
print(result)
file = open("test.txt", "w+")
file.write("a\nb\nc\n")
file.close()
file = open("test.txt", "r")
a = file.readline().strip()
b = file.readline().strip()
print(a)
print(b)
file.close()
file = open("test.txt", "r")
c = file.readline().strip()
print(c)
people = {}
people["Jeff"] = 22
people["Emilee"] = 23
people["A"] = 1
people["B"] = 2
people["C"] = 3
people2 = people.copy()
people2["Jeff"] = 666
print(people)
print(type(people))
print(people.keys())
print(type(people.keys()))
pKeys = people.keys()
def strangeFunction(n):
print(n)
if n > 2:
strangeFunction(n-1)
strangeFunction(n-2)
strangeFunction(4)
stack = []
def makeWord(word):
pass
def modular_exp(x, y, N):
if y == 0:
return 1
z = modular_exp(x, y//2, N)
print("x:{} y:{} yo:{}".format(x, y, y % 2 == 1), end=" ")
if y % 2 == 0:
print("z:{} rv: z^2 = {}^2 = {} mod {} = {}".format(z, z, z**2, N, z**2 % N))
return z**2 % N
else:
print("z:{} rv: x*z^2 = {}*{}^2 = {} mod {} = {}".format(z, x, z, x * z**2, N, x * z**2 % N))
return x * z**2 % N
modular_exp(2,21,18)
a = "hello"
b = a
a += "!"
print(a)
print(b)
stack.append("J")
stack.append("e")
stack.append("f")
stack.pop()
stack.append("f")
print("".join(stack))
print(len(".................................................................................................................................................................................................."))
names = ["Jeff", "TrevDawg", "Jacob", "Brian"]
for name, i in enumerate(names):
print(str(name) + " " + str(i))
'''
```
|
{
"source": "jeffxtang/MetalCNNWeights",
"score": 2
}
|
#### File: jeffxtang/MetalCNNWeights/convert_doggy.py
```python
import argparse
import errno
import math
import os
import struct
import sys
import tarfile
import urllib
import numpy
import tensorflow
#
# Constants
#
URL_INCEPTION3 = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
#
# Arguments
#
arg_parser = argparse.ArgumentParser(description='Convert Inception v3 batch-normalized weights into weights and biases for MPSCNNConvolution.')
arg_parser.add_argument('--inception3-url', default=URL_INCEPTION3, help='URL to Inception v3 model [%(default)s]')
arg_parser.add_argument('--input-dir', default='./input_doggy', help='Directory to download model [%(default)s]')
arg_parser.add_argument('--output-dir', default='./output_doggy', help='Directory to generate weights and biases [%(default)s]')
arg_parser.add_argument('--dat-dir', default='./dat', help='Directory of MetalImageRecognition .dat files [%(default)s]')
#
# ===== UTILITY
# ----- OS
#
def dir_create(dir_path):
try:
os.makedirs(dir_path)
except OSError as e:
if errno.EEXIST == e.errno:
return # success
raise
#
# ----- INPUT
#
def url_download_extract(download_dir, url):
# check
filename = url.split('/')[-1]
filepath = os.path.join(download_dir, filename)
if os.path.exists(filepath):
return # no-op
# download
def _reporthook(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename,
100.0 * float(count*block_size) / float(total_size),
))
sys.stdout.flush()
urllib.urlretrieve(
url,
filepath,
_reporthook,
)
print ''
fileinfo = os.stat(filepath)
print 'Downloaded %s %s bytes.' % (
filename,
fileinfo.st_size,
)
# extract
tarfile.open(filepath, 'r:gz').extractall(download_dir)
#
# ----- OUTPUT
#
def graph_create(graphpath):
with tensorflow.python.platform.gfile.FastGFile(graphpath, 'r') as graphfile:
graphdef = tensorflow.GraphDef()
graphdef.ParseFromString(graphfile.read())
tensorflow.import_graph_def(graphdef, name='')
def dat_readformat(data_len):
return dat_writeformat(data_len/struct.calcsize('<f'))
def dat_writeformat(data_count):
return '<' + str(data_count) + 'f'
def conv_write(output_dir, dat_dir, sess, name):
# read
beta = sess.graph.get_tensor_by_name(name + '/batchnorm/beta:0').eval()
gamma = sess.graph.get_tensor_by_name(name + '/batchnorm/gamma:0').eval()
mean = sess.graph.get_tensor_by_name(name + '/batchnorm/moving_mean:0').eval()
var = sess.graph.get_tensor_by_name(name + '/batchnorm/moving_variance:0').eval()
weights = sess.graph.get_tensor_by_name(name + '/conv2d_params:0').eval()
# calculate
weight_modifiers = gamma / numpy.sqrt(var+0.001) # BN transform scale
weights = weights * weight_modifiers
biases = beta - (weight_modifiers * mean)
# write
name_output = name.replace('/', '_')
# - weights
weights_output = numpy.zeros(reduce(lambda x,y: x*y, weights.shape), numpy.float32)
output_i = 0
for l in xrange(weights.shape[3]):
for i in xrange(weights.shape[0]):
for j in xrange(weights.shape[1]):
for k in xrange(weights.shape[2]):
weights_output[output_i] = weights[i][j][k][l]
output_i += 1
weights_filename = 'weights_%s.dat' % (name_output,)
weights_filepath = os.path.join(output_dir, weights_filename)
with open(weights_filepath, 'wb') as f:
f.write(struct.pack(dat_writeformat(len(weights_output)), *weights_output))
# - biases
biases_filename = 'bias_%s.dat' % (name_output,)
biases_filepath = os.path.join(output_dir, biases_filename)
with open(biases_filepath, 'wb') as f:
f.write(struct.pack(dat_writeformat(len(biases)), *biases))
# check
weights_dat_filepath = os.path.join(dat_dir, weights_filename)
biases_dat_filepath = os.path.join(dat_dir, biases_filename)
if not os.path.exists(weights_dat_filepath) or \
not os.path.exists(biases_dat_filepath):
print '%-40s' % (name_output,)
return
weights_maxdelta = '?'
with open(weights_dat_filepath, 'rb') as f:
weights_dat = numpy.fromstring(f.read(), dtype='<f4')
weights_maxdelta = max(map(abs, weights_output - weights_dat))
biases_maxdelta = '?'
with open(biases_dat_filepath) as f:
biases_dat = numpy.fromstring(f.read(), dtype='<f4')
biases_maxdelta = max(map(abs, biases - biases_dat))
print '%-40s [max delta: w=%-8f b=%-8f]' % (name_output, weights_maxdelta, biases_maxdelta,)
def softmax_write(output_dir, dat_dir, sess):
name = 'final_training_ops'
# read
weights = sess.graph.get_tensor_by_name('final_training_ops/weights/final_weights:0').eval()
biases = sess.graph.get_tensor_by_name('final_training_ops/biases/final_biases:0' ).eval()
# write
# - weights
# weights.shape is 2048x120
weights_output = numpy.zeros(reduce(lambda x,y: x*y, weights.shape), numpy.float32)
output_i = 0
for l in xrange(weights.shape[1]):
for k in xrange(weights.shape[0]):
weights_output[output_i] = weights[k][l]
output_i += 1
weights_filename = 'weights_%s.dat' % (name,)
weights_filepath = os.path.join(output_dir, weights_filename)
with open(weights_filepath, 'wb') as f:
f.write(struct.pack(dat_writeformat(len(weights_output)), *weights_output))
# - biases
biases_filename = 'bias_%s.dat' % (name,)
biases_filepath = os.path.join(output_dir, biases_filename)
with open(biases_filepath, 'wb') as f:
f.write(struct.pack(dat_writeformat(len(biases)), *biases))
# check
weights_dat_filepath = os.path.join(dat_dir, weights_filename)
biases_dat_filepath = os.path.join(dat_dir, biases_filename)
if not os.path.exists(weights_dat_filepath) or \
not os.path.exists(biases_dat_filepath):
print '%-40s' % (name,)
return
weights_maxdelta = '?'
with open(weights_dat_filepath, 'rb') as f:
weights_dat = numpy.fromstring(f.read(), dtype='<f4')
weights_maxdelta = max(map(abs, weights_output - weights_dat))
biases_maxdelta = '?'
with open(biases_dat_filepath) as f:
biases_dat = numpy.fromstring(f.read(), dtype='<f4')
biases_maxdelta = max(map(abs, biases - biases_dat))
print '%-40s [max delta: w=%-8f b=%-8f]' % (name, weights_maxdelta, biases_maxdelta,)
#
# ===== MAIN
#
def main():
# ===== ARGUMENTS
args = arg_parser.parse_args()
inception3_url = args.inception3_url
input_dir = args.input_dir
output_dir = args.output_dir
dat_dir = args.dat_dir
# ===== INPUT
#dir_create(input_dir)
#url_download_extract(input_dir, inception3_url)
# ===== OUTPUT
dir_create(output_dir)
# ----- LOAD
graph_create(os.path.join(input_dir, 'dog_retrained.pb'))
with tensorflow.Session() as sess:
# filters
conv_write(output_dir, dat_dir, sess, 'conv')
conv_write(output_dir, dat_dir, sess, 'conv_1')
conv_write(output_dir, dat_dir, sess, 'conv_2')
# pool
conv_write(output_dir, dat_dir, sess, 'conv_3')
conv_write(output_dir, dat_dir, sess, 'conv_4')
# pool_1
# inceptions with 1x1, 3x3, 5x5 convolutions
conv_write(output_dir, dat_dir, sess, 'mixed/conv')
conv_write(output_dir, dat_dir, sess, 'mixed/tower/conv')
conv_write(output_dir, dat_dir, sess, 'mixed/tower/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed/tower_1/conv')
conv_write(output_dir, dat_dir, sess, 'mixed/tower_1/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed/tower_1/conv_2')
# mixed/tower_2/pool
conv_write(output_dir, dat_dir, sess, 'mixed/tower_2/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_1/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_1/tower/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_1/tower/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_1/tower_1/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_1/tower_1/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_1/tower_1/conv_2')
# mixed_1/tower_2/pool
conv_write(output_dir, dat_dir, sess, 'mixed_1/tower_2/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_2/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_2/tower/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_2/tower/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_2/tower_1/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_2/tower_1/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_2/tower_1/conv_2')
# mixed_2/tower_2/pool
conv_write(output_dir, dat_dir, sess, 'mixed_2/tower_2/conv')
# inceptions with 1x1, 3x3(in sequence) convolutions
conv_write(output_dir, dat_dir, sess, 'mixed_3/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_3/tower/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_3/tower/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_3/tower/conv_2')
# mixed_3/pool
# inceptions with 1x1, 7x1, 1x7 convolutions
conv_write(output_dir, dat_dir, sess, 'mixed_4/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_4/tower/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_4/tower/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_4/tower/conv_2')
conv_write(output_dir, dat_dir, sess, 'mixed_4/tower_1/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_4/tower_1/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_4/tower_1/conv_2')
conv_write(output_dir, dat_dir, sess, 'mixed_4/tower_1/conv_3')
conv_write(output_dir, dat_dir, sess, 'mixed_4/tower_1/conv_4')
# mixed_4/tower_2/pool
conv_write(output_dir, dat_dir, sess, 'mixed_4/tower_2/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_5/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_5/tower/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_5/tower/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_5/tower/conv_2')
conv_write(output_dir, dat_dir, sess, 'mixed_5/tower_1/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_5/tower_1/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_5/tower_1/conv_2')
conv_write(output_dir, dat_dir, sess, 'mixed_5/tower_1/conv_3')
conv_write(output_dir, dat_dir, sess, 'mixed_5/tower_1/conv_4')
# mixed_5/tower_2/pool
conv_write(output_dir, dat_dir, sess, 'mixed_5/tower_2/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_6/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_6/tower/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_6/tower/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_6/tower/conv_2')
conv_write(output_dir, dat_dir, sess, 'mixed_6/tower_1/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_6/tower_1/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_6/tower_1/conv_2')
conv_write(output_dir, dat_dir, sess, 'mixed_6/tower_1/conv_3')
conv_write(output_dir, dat_dir, sess, 'mixed_6/tower_1/conv_4')
# mixed_6/tower_2/pool
conv_write(output_dir, dat_dir, sess, 'mixed_6/tower_2/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_7/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_7/tower/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_7/tower/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_7/tower/conv_2')
conv_write(output_dir, dat_dir, sess, 'mixed_7/tower_1/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_7/tower_1/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_7/tower_1/conv_2')
conv_write(output_dir, dat_dir, sess, 'mixed_7/tower_1/conv_3')
conv_write(output_dir, dat_dir, sess, 'mixed_7/tower_1/conv_4')
# mixed_7/tower_2/pool
conv_write(output_dir, dat_dir, sess, 'mixed_7/tower_2/conv')
# inceptions with 1x1, 3x3, 1x7, 7x1 filters
conv_write(output_dir, dat_dir, sess, 'mixed_8/tower/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_8/tower/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_8/tower_1/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_8/tower_1/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_8/tower_1/conv_2')
conv_write(output_dir, dat_dir, sess, 'mixed_8/tower_1/conv_3')
# mixed_8/pool
conv_write(output_dir, dat_dir, sess, 'mixed_9/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_9/tower/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_9/tower/mixed/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_9/tower/mixed/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_9/tower_1/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_9/tower_1/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_9/tower_1/mixed/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_9/tower_1/mixed/conv_1')
# mixed_9/tower_2/pool
conv_write(output_dir, dat_dir, sess, 'mixed_9/tower_2/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_10/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_10/tower/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_10/tower/mixed/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_10/tower/mixed/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_10/tower_1/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_10/tower_1/conv_1')
conv_write(output_dir, dat_dir, sess, 'mixed_10/tower_1/mixed/conv')
conv_write(output_dir, dat_dir, sess, 'mixed_10/tower_1/mixed/conv_1')
# mixed_10/tower_2/pool
conv_write(output_dir, dat_dir, sess, 'mixed_10/tower_2/conv')
# pool_3
softmax_write(output_dir, dat_dir, sess)
if '__main__' == __name__:
main()
```
|
{
"source": "jeffxtang/serve",
"score": 2
}
|
#### File: serve/ts_scripts/torchserve_grpc_client.py
```python
import grpc
import inference_pb2
import inference_pb2_grpc
import management_pb2
import management_pb2_grpc
import sys
def get_inference_stub():
channel = grpc.insecure_channel('localhost:9090')
stub = inference_pb2_grpc.InferenceAPIsServiceStub(channel)
return stub
def get_management_stub():
channel = grpc.insecure_channel('localhost:9091')
stub = management_pb2_grpc.ManagementAPIsServiceStub(channel)
return stub
def infer(stub, model_name, model_input):
with open(model_input, 'rb') as f:
data = f.read()
input_data = {'data': data}
response = stub.Predictions(
inference_pb2.PredictionsRequest(model_name=model_name, input=input_data))
try:
prediction = response.prediction.decode('utf-8')
except grpc.RpcError as e:
exit(1)
def register(stub, model_name):
params = {
'url': "https://torchserve.s3.amazonaws.com/mar_files/{}.mar".format(model_name),
'initial_workers': 1,
'synchronous': True,
'model_name': model_name
}
try:
response = stub.RegisterModel(management_pb2.RegisterModelRequest(**params))
print(f"Model {model_name} registered successfully")
except grpc.RpcError as e:
print(f"Failed to register model {model_name}.")
print(str(e.details()))
exit(1)
def unregister(stub, model_name):
try:
response = stub.UnregisterModel(management_pb2.UnregisterModelRequest(model_name=model_name))
print(f"Model {model_name} unregistered successfully")
except grpc.RpcError as e:
print(f"Failed to unregister model {model_name}.")
print(str(e.details()))
exit(1)
if __name__ == '__main__':
# args:
# 1-> api name [infer, register, unregister]
# 2-> model name
# 3-> model input for prediction
args = sys.argv[1:]
if args[0] == "infer":
infer(get_inference_stub(), args[1], args[2])
else:
api = globals()[args[0]]
api(get_management_stub(), args[1])
```
|
{
"source": "jeffyan2000/cocclient",
"score": 2
}
|
#### File: jeffyan2000/cocclient/config.py
```python
import sys, threading, os, random, time, json
from tkinter import *
from tkinter import font as tkFont
from PIL import Image, ImageTk
from threading import Thread
screen_width, screen_height = 600, 500
player_deme = (77, 77)
screen_offset = (int(screen_width/2 - player_deme[0]/2), int(screen_height/2 - player_deme[1]/2))
window = Tk()
window.resizable(False, False)
window.title("test")
arial36 = tkFont.Font(family='Arial', size=36)
arial14 = tkFont.Font(family='Arial', size=9)
arial15 = tkFont.Font(family='Arial', size=15)
import socket
import socketio
#udp receiving port
UDP_PORT_RECEIVE = random.randint(7000, 8000)
#udp sending port
UDP_PORT_SEND = 6002
#tcp sending port
TCP_PORT_SEND = 6001
#destination IP vps197548.vps.ovh.ca
HOST_IP = "localhost"
#udp sender socket
# udp receiver socket
sock_receive = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock_receive.bind(("0.0.0.0", UDP_PORT_RECEIVE))
speech_width = 20
sio = socketio.Client()
image_lib = {}
def load(path, n):
img = Image.open(os.path.join(path, n + ".png"))
image_lib[os.path.join(path, n + ".png")] = img
return ImageTk.PhotoImage(img)
def loadTools(name):
return load(os.path.join("lib", "items", "tools"), name)
def loadFrames(path, name, size, frames):
animation_lib[name+"_frames"] = []
for y in range(frames[1]):
for x in range(frames[0]):
cropped = image_lib[os.path.join(path, name + ".png")].crop((x * size[0], y * size[1], (x+1) * size[0], (y+1) * size[1]))
animation_lib[name+"_frames"].append(ImageTk.PhotoImage(cropped))
def loadBlockFrames(path, name, nick_name, size, frames):
block_lib[nick_name+"_frames"] = []
for y in range(frames[1]):
for x in range(frames[0]):
cropped = image_lib[os.path.join(path, name + ".png")].crop((x * size[0], y * size[1], (x+1) * size[0], (y+1) * size[1]))
block_lib[nick_name+"_frames"].append(ImageTk.PhotoImage(cropped))
#texture for characters
character_lib = {}
#texuure for animations
animation_lib = {}
#texture for room background
background_lib = {}
#texture for items
item_lib = {}
#texture for gui
gui_lib = {}
#texture for blocks
block_lib = {}
tools = os.listdir(os.path.join("lib", "items", "tools"))
skins = os.listdir(os.path.join("lib", "characters"))
blocks = os.listdir(os.path.join("lib", "blocks"))
texture_names = []
gui_names = ["backpack_bg", "slot"]
background_names = ["default"]
for character_texture in skins:
texture_names.append(character_texture[:-4])
for tool_texture in tools:
item_lib["tool"+tool_texture[:-4]] = loadTools(tool_texture[:-4])
for block_texture in blocks:
block_texture = block_texture[:-4]
temp = block_texture.split("@")
temp2 = temp[0].split("-")
load(os.path.join("lib", "blocks"), block_texture)
loadBlockFrames(os.path.join("lib", "blocks"), block_texture, temp[1], (int(temp2[0]), int(temp2[1])), (int(temp2[2]), int(temp2[3])))
for name in texture_names:
character_lib[name] = load(os.path.join("lib", "characters"), name)
for name in gui_names:
gui_lib[name] = load(os.path.join("lib", "gui"), name)
for name in background_names:
background_lib[name] = load(os.path.join("lib", "room", "floor"), name)
for character_texture in texture_names:
loadFrames(os.path.join("lib", "characters"), character_texture, player_deme, (8, 4))
class GCanvas(Canvas):
def __init__(self):
Canvas.__init__(self, window, width=screen_width, height=screen_height, background="#222222")
screen = GCanvas()
titles = {"chat":Label(window, text="Press [T] to Chat, [Return] to finish typing"),
"name":Label(window, text="CocTool Alpha")}
chat = Text(width=int(screen_width/8), height=int(screen_height/50), wrap=WORD, background="#FFFFFF")
chat.config(state='disabled')
screen.pack(fill=BOTH, expand=1)
titles["chat"].pack()
chat.pack(fill=BOTH, expand=1)
screen.focus_set()
with open('player_info.json') as f:
my_info = json.load(f)
IDS = {"id":None, "name":my_info["info"][0]["name"], "skin":my_info["info"][0]["skin"]}
WORD_LIMIT = 100
mouse_pos = [0, 0]
def motion(event):
mouse_pos[0], mouse_pos[1] = event.x, event.y
window.bind('<Motion>', motion)
item_id_list = {}
```
#### File: jeffyan2000/cocclient/player.py
```python
from config import *
from items import *
class Player:
def __init__(self, room, id, name, skin):
self.room = room
self.pos = [0, 0]
self.previous_state = 0
self.state = 0
self.name = name
self.texture = skin + "_frames"
self.size = (77, 77)
self.frames = (8, 4)
self.image = screen.create_image(0, 0, image=animation_lib[self.texture][0], anchor=NW)
self.id = id
self.name_box = screen.create_text(player_deme[0]/2, player_deme[1] + 10, text=self.name)
self.current_text = ""
self.bubble = None
self.speech = None
self.speech_start_time = 0
self.speech_total_time = 0
def start_speech(self, text):
self.stop_speech()
size = arial14.measure(text)
if size > 300:
size = 300
self.bubble = screen.create_rectangle(int(player_deme[0] / 2) - size/2 - 5 + self.pos[0],
-55 + self.pos[1], int(player_deme[0] / 2) + self.pos[0] + size/2 + 5,
5 + self.pos[1],
fill="#FFFFFF")
self.speech = screen.create_text(int(player_deme[0] / 2) + self.pos[0],
-25 + self.pos[1], font=arial14, justify=CENTER,
text=text, width=300)
screen.itemconfig(self.speech, text=text)
self.current_text = text
self.speech_start_time = time.time()
self.speech_total_time = arial14.measure(text)/50
def stop_speech(self):
screen.delete(self.speech)
screen.delete(self.bubble)
self.current_text = ""
def update(self):
if self.current_text:
if time.time() - self.speech_start_time > self.speech_total_time:
self.stop_speech()
def set_pos(self, pos):
dx, dy = int(pos[0]) - self.pos[0], int(pos[1]) - self.pos[1]
self.move((dx, dy))
if self.state != self.previous_state:
self.previous_state = self.state
screen.itemconfig(self.image, image=animation_lib[self.texture][self.state])
def move(self, pos):
if self.speech and self.bubble:
screen.move(self.speech, pos[0], pos[1])
screen.move(self.bubble, pos[0], pos[1])
screen.move(self.image, pos[0], pos[1])
screen.move(self.name_box, pos[0], pos[1])
self.pos[0] += int(pos[0])
self.pos[1] += int(pos[1])
def delete(self):
screen.delete(self.image)
screen.delete(self.name_box)
if self.speech:
screen.delete(self.speech)
if self.bubble:
screen.delete(self.bubble)
del self.room.players[self.id]
```
#### File: jeffyan2000/cocclient/room.py
```python
from player import *
class TriggerBlock:
def __init__(self, x, y, texture):
self.image = None
self.pos = [x, y]
self.texture = texture
class Room:
def __init__(self):
self.players = {}
self.chatting = False
self.my_x, self.my_y = 0, 0
self.my_ox, self.my_oy = 0, 0
self.dropped_items = {}
self.background = screen.create_image(screen_offset[0], screen_offset[1], anchor="nw", image=background_lib["default"])
def update_items(self):
flag = False
for itemKey in self.dropped_items:
if self.dropped_items[itemKey].getDistance((self.my_ox+screen_offset[0],
self.my_oy+screen_offset[1])) < 25 and not flag:
flag = True
if not self.dropped_items[itemKey].item_name_image:
self.dropped_items[itemKey].showName()
else:
if self.dropped_items[itemKey].item_name_image:
self.dropped_items[itemKey].hideName()
def drop_item(self, item, pos):
temp = [0, 0]
temp[0] = self.my_x + pos[0] + screen_offset[0] + player_deme[0]/2
temp[1] = self.my_y + pos[1] + screen_offset[1] + player_deme[1]/2
self.dropped_items[item.id] = item
self.dropped_items[item.id].create_image(temp)
self.dropped_items[item.id].dropped = True
def set_pos(self, pos):
dx, dy = pos[0] - self.my_x, pos[1] - self.my_y
if dx or dy:
screen.move(self.background, dx, dy)
for itemKey in self.dropped_items:
self.dropped_items[itemKey].move(dx, dy)
self.my_x = pos[0]
self.my_y = pos[1]
def is_chatting(self):
return self.chatting
def set_chatting(self, isChatting):
self.chatting = isChatting
def add_player(self, id, pname, skin):
self.players[id] = Player(self, id, pname, skin)
def pop_player(self, id):
self.players[id].delete()
def draw(self):
for player in self.players:
self.players[player].draw()
def update_players(self):
for player in self.players:
self.players[player].update()
def update(self, data):
tempmyxy = [0, 0]
if data[:3] == "000":
data = data[3:].split('@')
for i in range(len(data)):
if data[i]:
temp = data[i].split('*')
if temp[0] == "!":
tempmyxy[0] = -int(temp[1])
tempmyxy[1] = -int(temp[2])
elif temp[0] in self.players:
if temp[0] == IDS["id"]:
self.my_ox = int(temp[1])
self.my_oy = int(temp[2])
self.players[temp[0]].set_pos((int(temp[1])+screen_offset[0], int(temp[2])+screen_offset[1]))
self.players[temp[0]].state = int(temp[3])
self.set_pos((tempmyxy[0] + self.my_ox, tempmyxy[1] + self.my_oy))
screen.update()
elif data[:3] == "001":
self.players[data[3:5]].start_speech(data[5:])
elif data[:3] == "100":
temp = data[3:].split("@")
self.drop_item(item_id_list[temp[0]], (int(temp[1]), int(temp[2])))
def enable_chat(self):
self.set_chatting(True)
chat.config(state='normal')
chat.focus_set()
def disable_chat(self):
self.set_chatting(False)
chat.delete('1.0', END)
chat.config(state='disabled')
screen.focus_set()
```
|
{
"source": "jeffyan2000/jeffsarcade",
"score": 3
}
|
#### File: jeffsarcade/ms-dos/MS-DOS1.0.py
```python
import sys
import pygame
import random
current_dir = ['C']
files = ['diskname',['C'],['D']]
start = pygame.image.load("ms-dos.png")
game = pygame.image.load("game.jpg")
a = ""
print("welcome to M$-DO$ 1.0.00:00")
def search_dir_sub():
for i in range(len(junk-1)):
if junk[i+1][1] == current_dir[count]:
count += 1
junk = junk[i+1][1]
while True:
try:
a = raw_input(current_dir[0] + ":\>")
except (EOFError):
print("EOOOOOF")
if a == "dir":
for File in files:
print(File+".COM")
elif a == "ver":
print("M$-DO$ 1.0.00:00 Copyright Micro$oft enterprise 70BC all rights reserved")
elif a == "help":
print ("Welcome to the Micro$oft M$-DO$ Help File. Unfourtunately, there is no help available at the moment. Try again later")
elif a == "tree":
print ("This is the directory listing application for M$-DO$. Sorry, it appears that a virus has cleaned up your hard drive. Try again later.")
elif a == "shut down":
print("it will turn off in a second")
pygame.time.wait(1000)
sys.exit()
elif a == "you are stupid":
print ("I may be $tupid, but $o are you!")
elif a[:2] == "md":
files.append(a[3:])
elif a == "win":
print ("$tarting Micro$oft Window$ ver$ion 1.0. Please wait a few years while it boot$$$$$$$$$$$$$$$$$$$$.......")
pygame.time.wait(5000)
screen = pygame.display.set_mode((640,480))
screen.blit(start,(0,0))
pygame.display.flip()
pygame.time.wait(5000)
pygame.quit()
print("this program is performing an ILLEGAL OPERATION and will be promptly TERMINATED.")
print("Micro$oft Window$")
print("please contact 911 for more information")
elif a == "format":
print("Formatting the content$ of your hard drive. Plea$e wait a moment while all your hard work i$ utterly annihlated.")
pygame.time.wait(3500)
print("Micro$oft M$-DO$ has now de$troyed all your file$. >:D")
elif a == "hello":
print("sup bro")
elif a == "how are you":
print("I'm fine")
else:
ine = random.randint(0,2)
if ine == 0:
print("Bad command or file name")
else:
print("SyntaxError: invalid syntax")
for letters in a:
if letters == "s" or letters == "S":
print("loving reminder : you ju$t entered an illegal charactor, please use $ in$tead. It i$ important to know that we at Micro$oft care about your money.")
if random.randint(0,10) <= 1:
print("Your $y$tem ha$ performed an ILLEGAL OPERATION and ha$ CRA$HED. Please contact the Micro$oft Technician$ at 9-1-1 to $end $ome emeregeny vehicle$ to re$olve thi$ i$$ue")
pygame.time.wait(5000)
sys.exit()
if random.randint(0,20) <= 1:
print("do you want to play a game?")
pygame.time.wait(5000)
screen = pygame.display.set_mode((1272,858))
screen.blit(game,(0,0))
pygame.display.flip()
pygame.time.wait(5000)
pygame.quit()
```
|
{
"source": "jeffyan2000/platformer",
"score": 3
}
|
#### File: platformer/client/client_socket.py
```python
from client_pre_socket import *
from client_packet import *
from threading import Thread
import time
class ClientSocket(Thread):
def __init__(self):
Thread.__init__(self)
self.daemon = True
self.dead = False
self.start()
def run(self):
udp_listen_socket.bind(("localhost", udp_listen_port))
print("udp listening at " + str(udp_listen_port))
while not self.dead:
try:
data, addr = udp_listen_socket.recvfrom(1024)
except:
print("udp stopped")
packets = disassemble_packet(data.decode())
if packets[0] == "map_player_position":
test_world.update_player_position_by_packets(packets[1])
elif packets[0] == "map_new_player":
test_world.add_new_players_by_packets(packets[1])
elif packets[0] == "info_my_id":
env_vars["my_id"] = packets[1][0].get("my_id")
elif packets[0] == "map_load":
test_world.add_map_items(packets[1])
elif packets[0] == "map_remove_player":
test_world.remove_player(packets[1])
elif packets[0] == "player_backpack_info":
test_backpack.update_items(packets[1])
elif packets[0] == "map_remove_item":
test_world.remove_item(packets[1][0].get("id"))
elif packets[0] == "pack_add_item":
test_backpack.add_item(packets[1][0].get("name"), packets[1][0].get("count"))
def stop(self):
self.dead = True
udp_listen_socket.close()
class TcpSocket(Thread):
def __init__(self):
Thread.__init__(self)
self.daemon = True
self.dead = False
self.server_tcp_port = 13412
self.start()
def run(self):
print("tcp socket started")
tcp_listen = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_listen.connect((host, self.server_tcp_port))
hello_packet = "udp_port=" + str(udp_listen_port) + ";Player"
tcp_listen.send(hello_packet.encode())
print("sent udp port")
while not self.dead:
for i in range(len(tcp_queue)):
tcp_listen.send(tcp_queue.pop().encode())
time.sleep(0.03)
print("tcp stopped")
def stop(self):
self.dead = True
```
#### File: platformer/server/server_map_generator.py
```python
import random
setting_1 = {
"map_items": (
("tree1", 4),
("rock1", 1),
("tall_grass1", 8),
("flower1", 2),
("melon1", 2),
("melon2", 1)
),
"map_range": (-800, 800)
}
settings = [setting_1]
def generate_by_setting(setting_id):
setting = settings[setting_id]
result = {"map_range": setting["map_range"], "map_items": []}
item_id = 0
for item in setting["map_items"]:
for count in range(item[1]):
result["map_items"].append((str(item_id), item[0],
random.randint(setting["map_range"][0], setting["map_range"][1]), 275))
item_id += 1
return result
```
|
{
"source": "jeffyan2000/snakequestgame",
"score": 3
}
|
#### File: jeffyan2000/snakequestgame/hud.py
```python
from config import *
class InfoBar:
"""
HUD,Head up display用于显示当前血量,经验值,经验等级
"""
def __init__(self, snake):
self.snake = snake
self.hp_lag = 300
self.level_text = None
self.update()
# 根据蛇来更新当前HUD数值
def update(self):
self.level_text = chat_font.render("Lv." + str(self.snake.level), False, (255, 255, 255))
if self.hp_lag > self.snake.hp*3:
self.hp_lag -= 2
else:
self.hp_lag = self.snake.hp*3
# 将当前数值绘制出来
def draw(self):
screen.blit(self.level_text, (30, 15))
screen.blit(texture_lib["exp_bar"], (6, 50), pygame.Rect(0, 10, self.snake.current_exp, 10))
screen.blit(texture_lib["exp_bar"], (6, 50), pygame.Rect(0, 0, 100, 10))
screen.fill(-1, pygame.Rect(150, 370, self.hp_lag, 20))
screen.blit(texture_lib["hp_bar"], (150, 370),
pygame.Rect(300 - self.snake.hp * 3, 20, self.snake.hp * 3, 20))
screen.blit(texture_lib["hp_bar"], (150, 370), pygame.Rect(0, 0, 300, 20))
```
#### File: jeffyan2000/snakequestgame/map.py
```python
from fights import *
class Level:
"""
关卡父类
"""
def __init__(self):
pass
# 判断是否过关
def is_passed(self):
return False
class Map(Level):
"""
地图,存储数据有所有攻击,经验球,动画以及物品(大炮,激光炮等)
per_exp为每个经验球的经验,food_count是场上经验球数量,pass_level是过关等级
"""
def __init__(self, game):
Level.__init__(self)
self.game = game
self.objects = []
self.exp_balls = []
self.deme = [300, 300]
self.snake = None
self.attacks = []
self.food_count = 5
self.per_exp = 10
self.pass_level = 1
self.boarder_cycle = ReCycle(6, 1)
self.animations = []
# 判断当前蛇是否死亡
def is_dead(self):
return self.snake.hp < 0
# 绘制四周边框,是动画
def draw_boarder(self, offset):
c = self.boarder_cycle.get()
pygame.draw.line(screen, (255, 255, 255),
(-self.deme[0] + offset[0] + c, -self.deme[1] + offset[1]),
(-self.deme[0] + offset[0] + c, self.deme[1] + offset[1]), 2)
pygame.draw.line(screen, (255, 255, 255),
(-self.deme[0] + offset[0] - c, -self.deme[1] + offset[1]),
(-self.deme[0] + offset[0] - c, self.deme[1] + offset[1]), 2)
pygame.draw.line(screen, (255, 255, 255),
(self.deme[0] + offset[0] + c, -self.deme[1] + offset[1]),
(self.deme[0] + offset[0] + c, self.deme[1] + offset[1]), 2)
pygame.draw.line(screen, (255, 255, 255),
(self.deme[0] + offset[0] - c, -self.deme[1] + offset[1]),
(self.deme[0] + offset[0] - c, self.deme[1] + offset[1]), 2)
pygame.draw.line(screen, (255, 255, 255),
(self.deme[0] + offset[0], self.deme[1] + offset[1] + c),
(-self.deme[0] + offset[0], self.deme[1] + offset[1] + c), 2)
pygame.draw.line(screen, (255, 255, 255),
(self.deme[0] + offset[0], self.deme[1] + offset[1] - c),
(-self.deme[0] + offset[0], self.deme[1] + offset[1] - c), 2)
pygame.draw.line(screen, (255, 255, 255),
(self.deme[0] + offset[0], -self.deme[1] + offset[1] + c),
(-self.deme[0] + offset[0], -self.deme[1] + offset[1] + c), 2)
pygame.draw.line(screen, (255, 255, 255),
(self.deme[0] + offset[0], -self.deme[1] + offset[1] - c),
(-self.deme[0] + offset[0], -self.deme[1] + offset[1] - c), 2)
pygame.draw.rect(screen, (255, 255, 255),
pygame.Rect(offset[0] - self.deme[0] - 15, offset[1] - self.deme[1] - 15, 30,
30))
pygame.draw.rect(screen, (255, 255, 255),
pygame.Rect(offset[0] + self.deme[0] - 15, offset[1] - self.deme[1] - 15, 30,
30))
pygame.draw.rect(screen, (255, 255, 255),
pygame.Rect(offset[0] - self.deme[0] - 15, offset[1] + self.deme[1] - 15, 30,
30))
pygame.draw.rect(screen, (255, 255, 255),
pygame.Rect(offset[0] + self.deme[0] - 15, offset[1] + self.deme[1] - 15, 30,
30))
# 添加一个激光,具体参数清参考fights里的Beam类
def add_beam(self, position, length, direction, width=30):
self.attacks.append(Beam(self, position, length, direction, width))
# 添加一个子弹,具体参数清参考fights里的Bullet类
def add_bullet(self, position, speed, radius, damage):
self.attacks.append(Bullet(self, damage, radius, position[0], position[1], speed[0], speed[1]))
# 设置当前蛇
def set_snake(self, snake):
self.snake = snake
# 绘制背景底色
def draw_background(self, offset):
pygame.draw.rect(screen, (70, 70, 70),
pygame.Rect(offset[0] - self.deme[0], offset[1] - self.deme[1], self.deme[0] * 2,
self.deme[1] * 2))
# 绘制地图上的所有物件
def draw(self, offset):
for i in range(len(self.exp_balls)-1, -1, -1):
self.exp_balls[i].draw(offset)
for i in range(len(self.attacks) - 1, -1, -1):
self.attacks[i].draw(offset)
for i in range(len(self.objects) - 1, -1, -1):
self.objects[i].draw(offset)
for i in range(len(self.animations)-1, -1, -1):
self.animations[i].draw(offset)
if self.snake.outofbound:
screen.blit(texture_lib["danger"], (0, 0))
self.draw_boarder(offset)
pass_render = chat_font.render("Target:"+str(self.pass_level), False, (255, 255, 255))
screen.blit(pass_render, (450, 30))
# 更新地图上的所有物品,若有物品为dead则删除物品,同时判断攻击tick,以及经验球与蛇的碰撞
def update(self):
for i in range(len(self.exp_balls)-1, -1, -1):
if get_distance(self.snake.position, self.exp_balls[i].pos) < 20:
self.snake.current_exp += self.exp_balls[i].points
ses[2].play_once()
del self.exp_balls[i]
for i in range(len(self.animations) - 1, -1, -1):
if self.animations[i].dead:
del self.animations[i]
else:
self.animations[i].update()
for i in range(len(self.attacks)-1, -1, -1):
self.attacks[i].update()
if self.attacks[i].attack:
if self.attacks[i].collide_with(self.snake):
self.snake.hp -= self.attacks[i].damage
if self.attacks[i].bounded:
if self.attacks[i].pos[0] > self.deme[0] or \
self.attacks[i].pos[0] < -self.deme[0] or \
self.attacks[i].pos[1] > self.deme[1] or \
self.attacks[i].pos[1] < -self.deme[1]:
self.attacks[i].dead = True
if self.attacks[i].dead:
del self.attacks[i]
for i in range(len(self.objects)-1, -1, -1):
self.objects[i].update()
if self.objects[i].dead:
del self.objects[i]
if len(self.exp_balls) < self.food_count:
self.exp_balls.append(ExpBall((random.randint(-self.deme[0], self.deme[0]),
random.randint(-self.deme[1], self.deme[1])), self.per_exp))
if (self.snake.position[0] < -self.deme[0] or
self.snake.position[0] > self.deme[0] or
self.snake.position[1] < -self.deme[1] or
self.snake.position[1] > self.deme[1]):
self.snake.outofbound = True
else:
self.snake.outofbound = False
class Level1(Map):
"""
第一关,场地为200,200
"""
def __init__(self, game):
Map.__init__(self, game)
self.deme[0] = 200
self.deme[1] = 200
self.pass_level = 3
# 同父类
def is_passed(self):
if self.snake.level >= self.pass_level:
return True
return False
# 同父类
def update(self):
Map.update(self)
class Level2(Map):
"""
第二关,有两个大炮
"""
def __init__(self, game):
Map.__init__(self, game)
self.deme[0] = 250
self.deme[1] = 250
self.pass_level = 5
self.per_exp = 15
self.objects.append(OneCanon((-310, 100)))
self.objects.append(OneCanon((-310, -100)))
# 同父类
def is_passed(self):
if self.snake.level >= self.pass_level:
return True
return False
# 大炮发射炮弹
def update(self):
Map.update(self)
if self.objects[0].cast:
self.add_bullet((self.objects[0].pos[0]+70, self.objects[0].pos[1]+40), (5, 0), 15, 10)
ses[5].play_once()
if self.objects[1].cast:
self.add_bullet((self.objects[1].pos[0]+70, self.objects[1].pos[1]+40), (5, 0), 15, 10)
ses[5].play_once()
class Level3(Map):
"""
第三关,一个十字激光
"""
def __init__(self, game):
Map.__init__(self, game)
self.deme[0] = 300
self.deme[1] = 300
self.per_exp = 30
self.pass_level = 10
self.objects.append(CrossCanon((-40, -40)))
# 同父类
def is_passed(self):
if self.snake.level >= self.pass_level:
return True
return False
# 发射十字激光
def update(self):
Map.update(self)
if self.objects[0].cast:
self.add_beam((-300, 0), 600, "h")
self.add_beam((0, -300), 600, "v")
class Level4(Map):
"""
第四关,一个移动激光
"""
def __init__(self, game):
Map.__init__(self, game)
self.deme[0] = 350
self.deme[1] = 350
self.per_exp = 30
self.beam_cycle = Cycle(80, 0)
self.pass_level = 10
# 同父类
def is_passed(self):
if self.snake.level >= self.pass_level:
return True
return False
# 一共有八种激光移动方式,随机生成一个
def update(self):
Map.update(self)
if not self.attacks:
if self.beam_cycle.get() == 0:
generated = random.randint(0, 7)
if generated == 0:
self.objects.append(MovingBeamCannon(self, (-350, -350), "h", 1, 300, 700))
self.attacks.append(self.objects[-1].get_beam())
elif generated == 1:
self.objects.append(MovingBeamCannon(self, (-350, 0), "h", 1, 300, 700))
self.attacks.append(self.objects[-1].get_beam())
elif generated == 2:
self.objects.append(MovingBeamCannon(self, (-350, 350), "h", -1, 300, 700))
self.attacks.append(self.objects[-1].get_beam())
elif generated == 3:
self.objects.append(MovingBeamCannon(self, (-350, 0), "h", -1, 300, 700))
self.attacks.append(self.objects[-1].get_beam())
elif generated == 4:
self.objects.append(MovingBeamCannon(self, (-350, -350), "v", 1, 300, 700))
self.attacks.append(self.objects[-1].get_beam())
elif generated == 5:
self.objects.append(MovingBeamCannon(self, (0, -350), "v", 1, 300, 700))
self.attacks.append(self.objects[-1].get_beam())
elif generated == 6:
self.objects.append(MovingBeamCannon(self, (350, -350), "v", -1, 300, 700))
self.attacks.append(self.objects[-1].get_beam())
elif generated == 7:
self.objects.append(MovingBeamCannon(self, (0, -350), "v", -1, 300, 700))
self.attacks.append(self.objects[-1].get_beam())
class Level5(Map):
"""
第五关,有一个旋转激光
"""
def __init__(self, game):
Map.__init__(self, game)
self.deme[0] = 400
self.deme[1] = 400
self.per_exp = 30
self.count_down = 80
self.pass_level = 10
# 同父类
def is_passed(self):
if self.snake.level >= self.pass_level:
return True
return False
# 在最开始延时一段时间再生成激光
def update(self):
Map.update(self)
self.count_down -= 1
if self.count_down == 0:
self.attacks.append(CenterSlice(self, 1, 0.005, 400))
class Level6(Map):
"""
第六关,有一个巨型激光
"""
def __init__(self, game):
Map.__init__(self, game)
self.deme[0] = 300
self.deme[1] = 300
self.per_exp = 25
self.attacked = True
self.current_attack_pos = (-150, -300)
self.pass_level = 10
self.attack_countdown = 90
self.attack_tick = self.attack_countdown
# 同父类
def is_passed(self):
if self.snake.level >= self.pass_level:
return True
return False
# 在激光前摇动画结束的时候生成一个巨型激光,否则根据攻击tick来生成一个前摇
def update(self):
Map.update(self)
if not self.attacked and not self.animations:
self.attack_tick = self.attack_countdown
self.attacked = True
self.attacks.append(Beam(self, self.current_attack_pos, 600, "v", last_t=10, pre_t=0, width=300, damage=40))
ses[0].play_once()
self.game.shake("v", 60)
elif not self.animations and not self.attacks:
self.attack_tick -= 1
if self.attack_tick < 0:
self.attacked = False
if random.randint(0, 1) == 0:
self.current_attack_pos = (-150, -300)
else:
self.current_attack_pos = (150, -300)
self.animations.append(Charge(self.current_attack_pos, 150, 600, 20))
class Level7(Map):
"""
第七关,有四条随机横向或者竖向的激光
"""
def __init__(self, game):
Map.__init__(self, game)
self.deme[0] = 400
self.deme[1] = 400
self.per_exp = 40
self.pass_level = 15
self.current_wave = ((300, -400), (100, -400), (-100, -400), (-300, -400), "v")
self.attacking = True
self.attack_tick = 0
self.attack_speed = 20
self.attack_cycle = Cycle(120, 0)
# 同父类
def is_passed(self):
if self.snake.level >= self.pass_level:
return True
return False
# 生成四条横向或者竖向的激光
def update(self):
Map.update(self)
if self.attacking:
self.attack_tick += 1
if self.attack_tick == self.attack_speed:
self.attacks.append(Beam(self, self.current_wave[0], 800, self.current_wave[4],
pre_t=80, width=50, damage=20))
elif self.attack_tick == self.attack_speed*2:
self.attacks.append(Beam(self, self.current_wave[1], 800, self.current_wave[4],
pre_t=80, width=50, damage=20))
elif self.attack_tick == self.attack_speed*3:
self.attacks.append(Beam(self, self.current_wave[2], 800, self.current_wave[4],
pre_t=80, width=50, damage=20))
elif self.attack_tick == self.attack_speed*4:
self.attacks.append(Beam(self, self.current_wave[3], 800, self.current_wave[4],
pre_t=80, width=50, damage=20))
elif self.attack_tick > self.attack_speed * 4:
self.attacking = False
self.attack_tick = 0
elif self.attack_cycle.get() == 0:
self.attacking = True
mode = random.randint(1, 4)
if mode == 1:
self.current_wave = ((300, -400), (100, -400), (-100, -400), (-300, -400), "v")
elif mode == 2:
self.current_wave = ((-300, -400), (-100, -400), (100, -400), (300, -400), "v")
elif mode == 3:
self.current_wave = ((-400, -300), (-400, -100), (-400, 100), (-400, 300), "h")
elif mode == 4:
self.current_wave = ((-400, 300), (-400, 100), (-400, -100), (-400, -300), "h")
class Level8(Map):
"""
第八关,每吃一个果子升一级,可以(基本上)无限玩下去
"""
def __init__(self, game):
Map.__init__(self, game)
self.deme[0] = 400
self.deme[1] = 400
self.per_exp = 101
self.food_count = 20
self.pass_level = 99999
def is_passed(self):
return False
def update(self):
Map.update(self)
```
|
{
"source": "jeffyboh/flask-rest",
"score": 3
}
|
#### File: flask-rest/app/models.py
```python
from app import db
class Book(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100))
author = db.Column(db.String(100))
genre = db.Column(db.String(40))
summary = db.Column(db.String(1000))
def __init__(self, title, author, genre, summary):
self.title = title
self.author = author
self.genre = genre
self.summary = summary
```
|
{
"source": "jeffyjkang/DS-Unit-3-Sprint-2-SQL-and-Databases",
"score": 3
}
|
#### File: module1-introduction-to-sql/assignment/sqlite_assignment.py
```python
import sqlite3 as sql
import queries as qrs
import pandas as pd
# assignment 1
def connect_db(db='../rpg_db.sqlite3'):
return sql.connect(db)
def exec(conn, query):
curs = conn.cursor()
curs.execute(query)
res = curs.fetchall()
return res
# assignment 2
df = pd.DataFrame(pd.read_csv('../buddymove_holidayiq.csv'))
print(df.shape)
print(df.isnull().count())
conn = sql.connect('../buddymove_holidayiq.sqlite3')
# df.to_sql('review', conn)
# how many rows
row_count = 'SELECT COUNT(*) FROM review'
# how many users who reviewed at least 100 'Nature' and at least 100 in 'Shopping'
nature_and_shopping = 'SELECT COUNT(*) FROM review WHERE Nature >= 100 AND Shopping >= 100'
print(exec(conn, row_count))
print(exec(conn, nature_and_shopping))
```
|
{
"source": "jeffykle/kf-public",
"score": 2
}
|
#### File: kf-public/shop/wagtail_hooks.py
```python
from django.utils.html import escape
from wagtail.contrib.modeladmin.helpers import ButtonHelper
from wagtail.contrib.modeladmin.mixins import ThumbnailMixin
from wagtail.contrib.modeladmin.options import ModelAdmin, modeladmin_register
from wagtail.core import hooks
from wagtail.core.rich_text import LinkHandler
from .models import Order
class NewWindowExternalLinkHandler(LinkHandler):
# This specifies to do this override for external links only.
# Other identifiers are available for other types of links.
identifier = 'external'
@classmethod
def expand_db_attributes(cls, attrs):
href = attrs["href"]
# Let's add the target attr, and also rel="noopener" + noreferrer fallback.
# See https://github.com/whatwg/html/issues/4078.
return '<a href="%s" target="_blank" rel="noopener noreferrer">' % escape(href)
class OrderButtonHelper(ButtonHelper):
# Define classes for our button, here we can set an icon for example
edit_button_classnames = ['button-small', 'icon', 'icon-edit']
exclude = ['edit', 'delete']
def get_buttons_for_obj(self, obj, exclude=None, classnames_add=None,
classnames_exclude=None):
if exclude is None:
exclude = []
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
ph = self.permission_helper
usr = self.request.user
pk = getattr(obj, self.opts.pk.attname)
btns = []
if('inspect' not in exclude and ph.user_can_inspect_obj(usr, obj)):
btns.append(
self.inspect_button(pk, classnames_add, classnames_exclude)
)
if('edit' not in exclude and ph.user_can_edit_obj(usr, obj)):
btns.append(
self.edit_button(pk, classnames_add, classnames_exclude)
)
if('delete' not in exclude and ph.user_can_delete_obj(usr, obj)):
btns.append(
self.delete_button(pk, classnames_add, classnames_exclude)
)
return btns
@hooks.register('register_rich_text_features')
def register_external_link(features):
features.register_link_type(NewWindowExternalLinkHandler)
class OrderAdmin(ThumbnailMixin, ModelAdmin):
model = Order
menu_label = 'Orders' # ditch this to use verbose_name_plural from model
menu_icon = 'list-ul' # change as required
menu_order = 10 # will put in 3rd place (000 being 1st, 100 2nd)
add_to_settings_menu = False # or True to add your model to the Settings sub-menu
exclude_from_explorer = False # or True to exclude pages of this type from Wagtail's explorer view
list_display = ('user_username', 'user_email', 'date', 'status',)
list_per_page = 50
# search_fields = ('title', 'description')
thumb_image_field_name = 'main_image'
thumb_image_width = 100
# ordering = ('date')
inspect_view_enabled = True
button_helper_class = OrderButtonHelper
modeladmin_register(OrderAdmin)
```
|
{
"source": "jeffykle/linkedlistpractice",
"score": 3
}
|
#### File: jeffykle/linkedlistpractice/app.py
```python
from flask import Flask, jsonify, render_template, request, url_for, session
from LinkedList import *
import os, uuid
app = Flask(__name__)
app.secret_key = os.environ.get('sessionKey')
savedLists = dict()
@app.route('/clear')
def clear():
session.clear()
return "Session cleared."
@app.route('/')
def index():
if 'id' not in session:
session['id'] = str(uuid.uuid1())
savedLists[session['id']] = LinkedList()
else:
if session['id'] not in savedLists:
savedLists[session['id']] = LinkedList()
array = {
'array': savedLists[session['id']].list()
}
return render_template("index.html", myList = json.loads(savedLists[session['id']].json()), array = array)
@app.route('/insert-node')
def insertNode():
savedLists[session['id']].insertNode(savedLists[session['id']].getTail().value + 1 if savedLists[session['id']].head else 0)
return savedLists[session['id']].json()
@app.route('/pop-node')
def popNode():
savedLists[session['id']].pop()
return savedLists[session['id']].json()
@app.route('/get-head')
def getHead():
savedLists[session['id']].current = savedLists[session['id']].head
return savedLists[session['id']].json()
@app.route('/get-next')
def getNext():
savedLists[session['id']].selectNext()
return savedLists[session['id']].json()
@app.route('/get-list')
def getList():
return savedLists[session['id']].json()
@app.route('/delete-list')
def deleteList():
savedLists[session['id']].deleteList()
return savedLists[session['id']].json()
@app.route('/modify-list')
def modifyList():
var = request.args.get('var').split('.') # [previous, next]
oldvalue = getattr(savedLists[session['id']], var[0]) if len(var) == 1 else getattr(getattr(savedLists[session['id']], var[0]), var[1])
expr = request.args.get('expr').split('.') # [current, next]
if expr[0].isnumeric(): #clicking a node will pass in a numeric value to set as current
selected = savedLists[session['id']].head
while selected.next and selected.value != int(expr[0]):
selected = selected.next
right = selected if selected.value == int(expr[0]) else None
else:
right = getattr(savedLists[session['id']], expr[0]) if len(expr) == 1 else getattr(getattr(savedLists[session['id']], expr[0]), expr[1]) # previous OR previous.next
if(len(var)==1):
setattr(savedLists[session['id']], var[0], right)
else:
setattr(getattr(savedLists[session['id']],var[0]),"next",right)
result = json.loads(savedLists[session['id']].json())
result['diff'] = {".".join(var): oldvalue.json() if oldvalue else oldvalue}
result = json.dumps(result)
return result
if __name__ == "__main__":
port = int(os.environ.get('PORT',33507))
app.run(host='0.0.0.0', port=port)
```
#### File: jeffykle/linkedlistpractice/LinkedList.py
```python
import json
class Node:
def __init__(self, value):
self.value = value
self.next = None
def json(self):
next = self.next.value if self.next else None
return dict(value = self.value, next = next)
def __eq__(self, other):
a = self.value if self else self
b = other.value if other else other
return a == b
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
self.current = None
self.previous = None
self.next = None
def getTail(self):
history = []
current = self.head
while (current and current.next):
history.append(current)
for h in history:
if(current.next == h):
return current
current = current.next
return current
def insertNode(self, value):
if(self.head):
self.getTail().next = Node(value)
else:
self.head = Node(value)
return self.selectNode(self.getTail())
def pop(self):
current = self.head
previous = None
while (current and current.next):
previous = current
current = current.next
if(previous):
if(self.current == current):
self.current = previous
current = previous
current.next = None
else:
self.head = self.current = current = None
return current
def selectNode(self, node):
self.current = node
return self.current
def deleteList(self):
self.head = self.current = self.previous = self.next = None
return self
def reverse(self):
prev = None
current = self.head
while(current):
next = current.next
current.next = prev
prev = current
current = next
self.head = prev
return self
def __str__(self):
string = ""
current = self.head
tail = self.getTail()
while(current != tail):
string += str(current.value) + " -> "
previous = current
current = current.next
string += str(current.value)+" -> null" if current else "null"
return string
def list(self):
history = []
result = []
current = self.head
while(current and current.next):
result.append(current.value)
history.append(current)
for h in history:
if(current.next == h):
result.append(current.next.value)
return result
current = current.next
result.append(current.value if current else current)
return result
def selectHead(self):
return self.selectNode(self.head)
def selectNext(self):
next = self.selectNode(self.current.next) if self.current and self.current.next else None
return next
def dict(self):
result = dict()
result['head'] = self.head
result['current'] = self.current
result['previous'] = self.previous
result['next'] = self.next
result['tail'] = self.getTail()
return result
def json(self):
result = json.dumps(self.dict(), cls=ComplexEncoder, sort_keys=True, indent=4)
return result
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj,'json'):
return obj.json()
else:
return json.JSONEncoder.default(self, obj)
if __name__ == '__main__':
ll = LinkedList()
print(ll)
nums = [0,1,2,3,4,5]
for n in nums:
ll.insertNode(n)
print(ll)
t = ll.getTail()
print(ll)
print(ll.getTail().value)
```
|
{
"source": "Jeffyrao/translate",
"score": 3
}
|
#### File: research/unsupervised_morphology/ibm_model1.py
```python
from collections import defaultdict
class IBMModel1(object):
def __init__(self):
"""
translation_prob is the translation probability in the IBM model 1.
the full pseudo-code is available at https://fburl.com/yvp31kuw
"""
self.translation_prob = defaultdict()
self.null_str = "<null>"
def initialize_translation_probs(self, src_path, dst_path):
"""
Direction of translation is conditioned on the source text: t(dst|src).
"""
with open(src_path) as src_file, open(dst_path) as dst_file:
for src_line, dst_line in zip(src_file, dst_file):
src_words = set(src_line.strip().split() + [self.null_str])
dst_words = set(dst_line.strip().split())
for src_word in src_words:
if src_word not in self.translation_prob:
self.translation_prob[src_word] = defaultdict(float)
for dst_word in dst_words:
self.translation_prob[src_word][dst_word] = 1.0
for src_word in self.translation_prob.keys():
denom = len(self.translation_prob[src_word])
for dst_word in self.translation_prob[src_word].keys():
self.translation_prob[src_word][dst_word] = 1.0 / denom
```
|
{
"source": "jeffyus/GroupMyStocks",
"score": 3
}
|
#### File: jeffyus/GroupMyStocks/groupstock.py
```python
import pandas_datareader.data as web
import pandas as pd
import datetime
from datetime import date,timedelta
import matplotlib.pyplot as plt
from yahoo_fin.stock_info import get_data
import yahoo_fin.stock_info as si
import yfinance as yf
from forex_python.converter import CurrencyRates
import math
import webbrowser
from webbrowser import os
class stock:
def __init__(self, name):
quote_table = si.get_quote_table(name, dict_result=False)
open=quote_table.at[12,'value']
price=quote_table.at[15,'value']
prePrice=quote_table.at[14,'value']
pe=quote_table.at[13,'value']
avgVol=quote_table.at[3,'value']
vol=quote_table.at[6,'value']
ticker=yf.Ticker(name)
info=ticker.info
mktCap=info['marketCap']
mktCapNum=mktCap
mktCap=mktCap/1000000000
mktCap="{:.1f}".format(mktCap)
self.mktCap=mktCap+'B'
symbol=info['symbol']
self.ave50=info['fiftyDayAverage']
self.name=info['shortName']
country=get_dict_item(info,'country')
employeeRaw=get_dict_item(info,'fullTimeEmployees')
if employeeRaw is not None:
employee=format (employeeRaw, ',d')
else:
employee='-'
instHoldPctRaw=get_dict_item(info,'heldPercentInstitutions')
if instHoldPctRaw is not None:
instHoldPct="{:.1%}".format(instHoldPctRaw)
else:
instHoldPct='-'
fin=si.get_financials(name)
# fin_bs_q=fin["quarterly_balance_sheet"]
fin_bs_q=fin["yearly_balance_sheet"]
fin_bs_q_dates=fin_bs_q.columns.values
date=fin_bs_q_dates[0]
dateStr=str(date)
self.finDate=dateStr[0:10]
fin_year_y=fin["yearly_balance_sheet"]
fin_year_dates_y=fin_year_y.columns.values
date_y=fin_year_dates_y[0]
dateStr_y=str(date_y)
self.finDate_y=dateStr_y[0:10]
sharesOutstandingRaw=get_dict_item(info,'sharesOutstanding')
sharesOutstanding=number2M_pure(sharesOutstandingRaw)
## General
# Total Asset
totalAssetsRaw,totalAssets = get_dataframe_item(fin_bs_q,'totalAssets',country,date,0)
# Total Liabilities
totalLiabRaw,totalLiab = get_dataframe_item(fin_bs_q,'totalLiab',country,date,0)
totalLiab_pct = addPct(totalLiabRaw, totalLiab, totalAssetsRaw)
# Total Equity
totalEquityRaw = totalAssetsRaw - totalLiabRaw
totalEquity=number2M(totalEquityRaw,country,date)
totalEquityRaw_pct = addPct(totalEquityRaw, totalEquity, totalAssetsRaw)
## ASSET
# Total Current Assets
totalCurrentAssetsRaw,totalCurrentAssets = get_dataframe_item(fin_bs_q,'totalCurrentAssets',country,date,0)
if totalCurrentAssetsRaw is not None:
pct="{:.1%}".format(totalCurrentAssetsRaw/totalAssetsRaw)
totalCurrentAssets = totalCurrentAssets + ' (' + pct +')'
# Cash
cashRaw,cash = get_dataframe_item(fin_bs_q,'cash',country,date,0)
cash_pct = addPct(cashRaw, cash, totalCurrentAssetsRaw)
# Short Term Investment
shortTermInvestmentsRaw,shortTermInvestments = get_dataframe_item(fin_bs_q,'shortTermInvestments',country,date,0)
shortTermInvestments_pct = addPct(shortTermInvestmentsRaw, shortTermInvestments, totalCurrentAssetsRaw)
# Receivables
netReceivablesRaw,netReceivables=get_dataframe_item(fin_bs_q,'netReceivables',country,date,0)
netReceivables_pct = addPct(netReceivablesRaw, netReceivables, totalCurrentAssetsRaw)
# Inventory
inventoryRaw,inventory=get_dataframe_item(fin_bs_q,'inventory',country,date,0)
inventory_pct=addPct(inventoryRaw, inventory, totalCurrentAssetsRaw)
# Other Current Asset
otherCurrentAssetsRaw, otherCurrentAssets = get_dataframe_item(fin_bs_q,'otherCurrentAssets',country,date,0)
otherCurrentAssets_pct = addPct(otherCurrentAssetsRaw, otherCurrentAssets, totalCurrentAssetsRaw)
# Total Long Term Asset
totalLongTermAssetRaw = totalAssetsRaw - totalCurrentAssetsRaw
totalLongTermAsset = number2M(totalLongTermAssetRaw,country,date)
totalLongTermAsset_pct = addPct(totalLongTermAssetRaw, totalLongTermAsset, totalAssetsRaw)
# Property, Plant, and Equipment
propertyPlantEquipmentRaw, propertyPlantEquipment = get_dataframe_item(fin_bs_q,'propertyPlantEquipment',country,date,0)
propertyPlantEquipment_pct = addPct(propertyPlantEquipmentRaw, propertyPlantEquipment, totalLongTermAssetRaw)
# Long-term Investment
longTermInvestmentsRaw,longTermInvestments = get_dataframe_item(fin_bs_q,'longTermInvestments',country,date,0)
longTermInvestments_pct = addPct(longTermInvestmentsRaw, longTermInvestments, totalLongTermAssetRaw)
# Net Intangible Asset
netIntangibleAssetsRaw,netIntangibleAssets=get_dataframe_item(fin_bs_q,'intangibleAssets',country,date,0)
netIntangibleAssets_pct = addPct(netIntangibleAssetsRaw, netIntangibleAssets, totalLongTermAssetRaw)
# Goodwill
goodWillRaw,goodWill=get_dataframe_item(fin_bs_q,'goodWill',country,date,0)
goodWill_pct = addPct(goodWillRaw, goodWill, totalLongTermAssetRaw)
# Intangible Asset
intangibleAssetsRaw = netIntangibleAssetsRaw + goodWillRaw
intangibleAssets=number2M(intangibleAssetsRaw,country,date)
intangibleAssets_pct = addPct(intangibleAssetsRaw, intangibleAssets, totalLongTermAssetRaw)
# Other Long-term Asset
otherAssetsRaw, otherAssets = get_dataframe_item(fin_bs_q,'otherAssets',country,date,0)
otherAssets_pct = addPct(otherAssetsRaw, otherAssets, totalLongTermAssetRaw)
# Tangible
tangibleAssetsRaw=totalAssetsRaw-intangibleAssetsRaw
tangibleAssets=number2M(tangibleAssetsRaw,country,date)
tangibleAssets_pct = addPct(tangibleAssetsRaw, tangibleAssets, totalAssetsRaw)
## LIABILITY
# Total Current Liabilities
totalCurrentLiabilitiesRaw, totalCurrentLiabilities = get_dataframe_item(fin_bs_q,'totalCurrentLiabilities',country,date,0)
totalCurrentLiabilities_pct = addPct(totalCurrentLiabilitiesRaw, totalCurrentLiabilities, totalLiabRaw)
# Account Payable
accountsPayableRaw, accountsPayable = get_dataframe_item(fin_bs_q,'accountsPayable',country,date,0)
accountsPayable_pct = addPct(accountsPayableRaw, accountsPayable, totalCurrentLiabilitiesRaw)
# Other Current Liabilities
otherCurrentLiabRaw, otherCurrentLiab = get_dataframe_item(fin_bs_q,'otherCurrentLiab',country,date,0)
otherCurrentLiab_pct = addPct(otherCurrentLiabRaw, otherCurrentLiab, totalCurrentLiabilitiesRaw)
# Total Long-term Liablities
totalLongTermLiabRaw = totalLiabRaw - totalCurrentLiabilitiesRaw
totalLongTermLiab=number2M(totalLongTermLiabRaw,country,date)
totalLongTermLiab_pct = addPct(totalLongTermLiabRaw, totalLongTermLiab, totalLiabRaw)
# Long-term Debt
longTermDebtRaw, longTermDebt = get_dataframe_item(fin_bs_q,'longTermDebt',country,date,0)
longTermDebt_pct = addPct(longTermDebtRaw, longTermDebt, totalLongTermLiabRaw)
shortLongTermDebtRaw, shortLongTermDebt = get_dataframe_item(fin_bs_q,'shortLongTermDebt',country,date,0)
## EQUITY
# Minority Interest
minorityInterestRaw,minorityInterest = get_dataframe_item(fin_bs_q,'minorityInterest',country,date,0)
minorityInterest_pct = addPct(minorityInterestRaw, minorityInterest, totalEquityRaw)
# Total Shareholder's Equity
totalShareholderEquityRaw = totalEquityRaw - minorityInterestRaw
totalShareholderEquity=number2M(totalShareholderEquityRaw,country,date)
totalShareholderEquity_pct = addPct(totalShareholderEquityRaw, totalShareholderEquity, totalEquityRaw)
# Common Stock
commonStockRaw,commonStock=get_dataframe_item(fin_bs_q,'commonStock',country,date,0)
# Retained Earnings
retainedEarningsRaw,retainedEarnings=get_dataframe_item(fin_bs_q,'retainedEarnings',country,date,0)
# Gains Losses Not Affecting Retained Earnings (Treasury Stock)
treasuryStockRaw,treasuryStock=get_dataframe_item(fin_bs_q,'treasuryStock',country,date,0)
# Common Stock Equity
commonStockEquityRaw, commonStockEquity = get_dataframe_item(fin_bs_q,'totalStockholderEquity',country,date,0)
commonStockEquity_pct = addPct(commonStockEquityRaw, commonStockEquity, totalShareholderEquityRaw)
# Preferred Stock Equity
preferredStockEquityRaw = totalShareholderEquityRaw - commonStockEquityRaw
preferredStockEquity=number2M(preferredStockEquityRaw,country,date)
preferredStockEquity_pct = addPct(preferredStockEquityRaw, preferredStockEquity, totalShareholderEquityRaw)
# Book Value
bookValueRaw = tangibleAssetsRaw - totalLiabRaw
bookValue = number2M(bookValueRaw,country,date)
# Common Book Value
commonBookValueRaw = commonStockEquityRaw - intangibleAssetsRaw
commonBookValue = number2M(commonBookValueRaw,country,date)
capitalSurplusRaw,capitalSurplus=get_dataframe_item(fin_bs_q,'capitalSurplus',country,date,0)
floatSharesRaw=info["floatShares"]
floatShares=number2M(floatSharesRaw,country,date)
floatSharesPct="{:.1%}".format(floatSharesRaw/sharesOutstandingRaw)
# FUNDAMENTALS
workingCapitalRaw=totalCurrentAssetsRaw - totalCurrentLiabilitiesRaw
if (workingCapitalRaw is not None) & (not math.isnan(workingCapitalRaw)):
workingCapital=number2M(workingCapitalRaw,country,date)
# Basic Ratios
currentRatioRaw=totalCurrentAssetsRaw/totalCurrentLiabilitiesRaw
currentRatio="{:.2f}".format(currentRatioRaw)
quickRatioRaw=(totalCurrentAssetsRaw-inventoryRaw)/totalCurrentLiabilitiesRaw
quickRatio="{:.2f}".format(quickRatioRaw)
deRaw=totalLiabRaw/totalShareholderEquityRaw
de="{:.2f}".format(deRaw)
# BVPS
bvpsRaw=commonStockEquityRaw/sharesOutstandingRaw
bvps="{:.2f}".format(bvpsRaw)
tanBvpsRaw=(commonStockEquityRaw - intangibleAssetsRaw)/sharesOutstandingRaw
tanBvps="{:.2f}".format(tanBvpsRaw)
## Income
in_quart=fin["quarterly_income_statement"]
netIncomeRaw,netIncome=get_dataframe_item(in_quart,'netIncome',country,date,0)
# roeRaw=4*netIncomeRaw/((totalStockholderEquityRaw+totalStockholderEquityRawPre1)/2)
# roe="{:.1%}".format(roeRaw)
totalRevenueRaw,totalRevenue=get_dataframe_item(in_quart,'totalRevenue',country,date,0)
# dfsize=in_quart.shape
# colNum=dfsize[1]
# if colNum>1:
# sum = totalRevenueRaw
# for i in range(1,colNum):
# tempRaw,temp=get_dataframe_item(in_quart,'totalRevenue',country,date,i)
# sum = sum + tempRaw
# totalRevenueRawTTM = sum / colNum
# totalRevenueTTM=number2M(totalRevenueRawTTM,country,date)
grossProfitRaw,grossProfit=get_dataframe_item(in_quart,'grossProfit',country,date,0)
rd_q0Raw,rd_q0=get_dataframe_item(in_quart,'researchDevelopment',country,date,0)
in_year=fin["yearly_income_statement"]
rd_y=in_year.loc['researchDevelopment']
rd_y0=rd_y.iloc[0]
if rd_y0 is not None:
rd_y0=convert_currency(rd_y0,country,date)
rd_y0=int(rd_y0/1000000)
rd_y0=format (rd_y0, ',d')
rd_y0=str(rd_y0)+'M'
BalanceSheetBasic={
'Symbol':symbol,
'MktCapNum':[mktCapNum], # Used for data reorder
'Tot Asset': totalAssets,
'Tot Liab': totalLiab_pct,
'Tot Equity': totalEquityRaw_pct
}
df_BalanceSheetBasic=pd.DataFrame(BalanceSheetBasic,index=[0])
d={
'Symbol':symbol,
'MktCapNum':mktCapNum, # Used for data reorder
'Price':[("{:.2f}".format(price))],
'EMPL No.':employee,
'Qtly Date':self.finDate,
'Annu Date':self.finDate_y,
'Shares Outsdg': sharesOutstanding
}
df_Old=pd.DataFrame(d,index=[0])
incomeDetail = {
'Symbol':symbol,
'MktCapNum':mktCapNum, # Used for data reorder
'Net Income': netIncome
}
df_incomeDetail=pd.DataFrame(incomeDetail,index=[0])
assetDetail = {
'Symbol':symbol,
'MktCapNum':mktCapNum, # Used for data reorder
'Tot Asset': totalAssets,
'Total Current / Tot': totalCurrentAssets,
'Cash / Cr': cash_pct,
'ShrtT Invest / Cr': shortTermInvestments_pct,
'Receivables / Cr': netReceivables_pct,
'Inventory / Cr':inventory_pct,
'Other Curr Asset / Cr':otherCurrentAssets_pct,
'Total Long-term / Tot': totalLongTermAsset_pct,
'Property,ect / Lng': propertyPlantEquipment_pct,
'LongT Invest / Lng': longTermInvestments_pct,
'Intangible / Lng': intangibleAssets_pct,
'Net Intangible / Lng': netIntangibleAssets_pct,
'Goodwill / Lng':goodWill_pct,
'Other LongT Asset / Lng': otherAssets_pct,
'Tangible / Tot':tangibleAssets_pct,
}
df_assetDetail=pd.DataFrame(assetDetail,index=[0])
liabilityDetail = {
'Symbol':symbol,
'MktCapNum':mktCapNum, # Used for data reorder
'Tot Liab': totalLiab,
'Total Current / Tot': totalCurrentLiabilities_pct,
'Acc Payable / Cr': accountsPayable_pct,
'Other Curr / Cr': otherCurrentLiab_pct,
'Total Long / Tot': totalLongTermLiab_pct,
'Long Debt / Lng': longTermDebt_pct,
'shortLongTermDebt': shortLongTermDebt
}
df_liabilityDetail=pd.DataFrame(liabilityDetail,index=[0])
equityDetail = {
'Symbol':symbol,
'MktCapNum':mktCapNum, # Used for data reorder
'Tot Eqty': totalEquity,
'Mnrty Int / Tot': minorityInterest_pct,
'Tot Sh Eqty / Tot': totalShareholderEquity_pct,
'Commn Eqty / ShH': commonStockEquity_pct,
'Prffd Eqty / ShH': preferredStockEquity_pct,
'Book Val': bookValue,
'Comn Book Val': commonBookValue,
'Cap Surplus': capitalSurplus
}
df_equityDetail=pd.DataFrame(equityDetail,index=[0])
fundamentals = {
'Symbol':symbol,
'MktCapNum':mktCapNum, # Used for data reorder
'Wrk Cap':workingCapital,
}
df_fundamentals=pd.DataFrame(fundamentals,index=[0])
baiscRatios = {
'Symbol':symbol,
'MktCapNum':mktCapNum, # Used for data reorder
'Current Rt': currentRatio,
'Quick Rt': quickRatio,
'Debt-Equity': de,
'BVPS': bvps,
'TanBVPS': tanBvps
}
df_baiscRatios=pd.DataFrame(baiscRatios,index=[0])
self.output={
'General Information': df_Old,
'Balance Sheet Basic': df_BalanceSheetBasic,
'Income': df_incomeDetail,
'Assets Details': df_assetDetail,
'Liability Details': df_liabilityDetail,
'Equity Details': df_equityDetail,
'Fundamentals': df_fundamentals,
'Basic Ratios': df_baiscRatios
}
def print_stocks_list(StockLists,htmlName):
htmlBody=''
f = open(htmlName, "w")
for sList in StockLists:
f.write('<H1>'+sList+'</H1>')
htmlBody=htmlBody+'<H1>'+sList+'</H1>'
thisList=StockLists[sList]
stockNum=len(thisList)
stockObjects=[]
for company in thisList:
stockObjects.append(stock(company))
# Get first stock information
outputData=stockObjects[0].output
for subTable in outputData:
f.write('<H3>'+subTable+'</H3>')
htmlBody=htmlBody+'<H3>'+subTable+'</H3>'
df=outputData[subTable]
if stockNum>1:
compaylist=[]
compaylist.append(df)
for i_stock in range(1,stockNum):
otherOutputData=stockObjects[i_stock].output
dfNew=otherOutputData[subTable]
compaylist.append(dfNew)
df=pd.concat(compaylist)
df=df.sort_values(by='MktCapNum',ascending=False)
df=df.drop(columns='MktCapNum')
sym=df.iloc[:, lambda df:0]
symVal=sym.values
df.index=symVal
df=df.drop(columns='Symbol')
s=df.to_html().replace("[bug]","<br />")
htmlBody=htmlBody+'\n'+s
f.write(s)
f.close()
return htmlBody
def convert_currency(number,country,date64):
date=datetime.datetime.utcfromtimestamp(date64.tolist()/1e9)
if country=='United States':
return number
else:
c = CurrencyRates()
if country=='China':
return c.convert('CNY', 'USD', number, date)
elif country=='Japan':
return c.convert('JPY', 'USD', number, date)
elif country=='India':
return c.convert('INR', 'USD', number, date)
elif country=='Canada':
return c.convert('CAD', 'USD', number, date)
elif country=='Germany':
return c.convert('EUR', 'USD', number, date)
else:
return 0
def number2M(number,country,date):
number=convert_currency(number,country,date)
number=int(number/1000000)
number=format (number, ',d')
number=str(number)+'M'
return number
def number2M_pure(number):
number=(number/1000000)
if number>100:
number=round(number)
elif number>10:
number="{:.1f}".format(number)
else:
number="{:.2f}".format(number)
number=format (number, ',d')
number=str(number)+'M'
return number
def get_dataframe_item(theDataFrame,item,country,dateValue,num):
if item in theDataFrame.index:
itemValueRaw=theDataFrame.loc[item]
itemValueRaw=itemValueRaw[num]
if (itemValueRaw is not None):
if (not math.isnan(itemValueRaw)):
itemValue=number2M(itemValueRaw,country,dateValue)
else:
itemValueRaw=0
itemValue='-'
else:
itemValueRaw=0
itemValue='-'
else:
itemValueRaw=0
itemValue='-'
return itemValueRaw,itemValue
def get_dict_item(theDict,item):
if item in theDict:
employeeRaw=theDict[item]
else:
employeeRaw=None
return employeeRaw
def get_stat_value(st,attStr):
for i in range(len(st)):
if st.at[i,'Attribute']==attStr:
itemValue = st.at[i,'Value']
itemValueRaw=float(itemValue)
break
return itemValueRaw,itemValue
def addPct(dataRaw,data,motherRaw):
if dataRaw is not None:
if (dataRaw!=0) & (motherRaw!=0):
pct="{:.1%}".format(dataRaw/motherRaw)
newData = data + ' (' + pct +')'
else:
newData = data
else:
newData = data
return newData
```
|
{
"source": "Jeffz615/auto_bili_recorder",
"score": 2
}
|
#### File: Jeffz615/auto_bili_recorder/run.py
```python
from Live import BiliBiliLive
import os
import requests
import time
import config
import utils
import re
import multiprocessing
import urllib3
from bypy import ByPy
urllib3.disable_warnings()
class BiliBiliLiveRecorder(BiliBiliLive):
def __init__(self, room_id, queue, onlyAudio=False, qn=10000, check_interval=1 * 60):
super().__init__(room_id)
self.inform = utils.inform
self.print = utils.print_log
self.check_interval = check_interval
self.onlyAudio = onlyAudio
self.qn = qn
self.queue = queue
def check(self, interval):
while True:
try:
room_info = self.get_room_info()
if room_info['status']:
self.inform(room_id=self.room_id,
desp=room_info['roomname'])
self.print(self.room_id, room_info['roomname'])
break
except Exception as e:
self.print(self.room_id, 'Error:' + str(e))
time.sleep(interval)
return self.get_live_urls(onlyAudio=self.onlyAudio, qn=self.qn)
def record(self, record_urls, output_filename):
self.print(self.room_id, '√ 正在录制...' + self.room_id)
headers = dict()
headers['Accept-Encoding'] = 'identity'
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko'
for record_url in record_urls:
try:
flag = False
headers['Referer'] = re.findall(
r'(http://.*\/).*\.flv',
record_url)[0]
self.print(self.room_id, record_url)
resp = requests.get(record_url, stream=True, headers=headers)
with open(output_filename, "wb") as f:
for chunk in resp.iter_content(chunk_size=1024):
f.write(chunk) if chunk else None
flag = True
if flag and os.path.getsize(output_filename) > 256:
self.queue.put(output_filename)
else:
os.remove(output_filename)
break
except Exception as e:
self.print(self.room_id, 'Error while recording:' + str(e))
continue
def run(self):
while True:
try:
self.print(self.room_id, '等待开播')
urls = self.check(interval=self.check_interval)
filename = utils.generate_filename(self.room_id)
self.record(urls, filename)
self.print(self.room_id, '录制完成' + filename)
except Exception as e:
self.print(self.room_id,
'Error while checking or recording:' + str(e))
class autoUpload():
def __init__(self, queue, delAfterUpload, forceDelAfterUpload):
self.queue = queue
self.delAfterUpload = delAfterUpload
self.forceDelAfterUpload = forceDelAfterUpload
def uploadApi(self, uploadFilepath):
uploadFilename = os.path.basename(
uploadFilepath)
uploadFilenameSplit = os.path.basename(
uploadFilepath).rstrip('.flv').split('_')
roomid = uploadFilenameSplit[-1]
recordDate = uploadFilenameSplit[0]
bp = ByPy(deletesource=self.delAfterUpload, debug=True)
if bp.upload(uploadFilepath, f'{roomid}/{recordDate}/{uploadFilename}') != 0:
if self.forceDelAfterUpload and os.path.isfile(uploadFilepath):
os.remove(uploadFilepath)
raise Exception('upload fail.')
def run(self):
while True:
try:
utils.print_log('uploader', '等待录制完成')
while self.queue.empty():
time.sleep(60)
uploadFilepath = self.queue.get(True)
fsize = os.path.getsize(uploadFilepath)
utils.print_log(
'uploader', f'获取录制文件 {uploadFilepath} ({fsize})')
if fsize > 256:
utils.print_log('uploader', '文件上传中')
self.uploadApi(uploadFilepath)
utils.print_log('uploader', '文件上传完成')
else:
os.remove(uploadFilepath)
except Exception as e:
utils.print_log('uploader', 'Error while upload:' + str(e))
if __name__ == '__main__':
input_id = config.rooms
onlyAudio = config.onlyAudio
qn = config.qn
delAfterUpload = config.delAfterUpload
forceDelAfterUpload = config.forceDelAfterUpload
mp = multiprocessing.Process
q = multiprocessing.Queue()
tasks = [
mp(target=BiliBiliLiveRecorder(str(room_id), queue=q, onlyAudio=onlyAudio, qn=qn).run) for room_id in input_id
]
tasks.append(mp(target=autoUpload(
queue=q, delAfterUpload=delAfterUpload, forceDelAfterUpload=forceDelAfterUpload).run))
for i in tasks:
i.start()
for i in tasks:
i.join()
```
|
{
"source": "Jeffz615/blivedm",
"score": 2
}
|
#### File: blivedm/blivedm/client.py
```python
import asyncio
import collections
import enum
import json
import logging
import ssl as ssl_
import struct
from typing import *
import aiohttp
import brotli
from . import handlers
__all__ = (
'BLiveClient',
)
logger = logging.getLogger('blivedm')
ROOM_INIT_URL = 'https://api.live.bilibili.com/xlive/web-room/v1/index/getInfoByRoom'
DANMAKU_SERVER_CONF_URL = 'https://api.live.bilibili.com/xlive/web-room/v1/index/getDanmuInfo'
DEFAULT_DANMAKU_SERVER_LIST = [
{'host': 'broadcastlv.chat.bilibili.com', 'port': 2243, 'wss_port': 443, 'ws_port': 2244}
]
HEADER_STRUCT = struct.Struct('>I2H2I')
HeaderTuple = collections.namedtuple('HeaderTuple', ('pack_len', 'raw_header_size', 'ver', 'operation', 'seq_id'))
# WS_BODY_PROTOCOL_VERSION
class ProtoVer(enum.IntEnum):
NORMAL = 0
HEARTBEAT = 1
DEFLATE = 2
BROTLI = 3
# go-common\app\service\main\broadcast\model\operation.go
class Operation(enum.IntEnum):
HANDSHAKE = 0
HANDSHAKE_REPLY = 1
HEARTBEAT = 2
HEARTBEAT_REPLY = 3
SEND_MSG = 4
SEND_MSG_REPLY = 5
DISCONNECT_REPLY = 6
AUTH = 7
AUTH_REPLY = 8
RAW = 9
PROTO_READY = 10
PROTO_FINISH = 11
CHANGE_ROOM = 12
CHANGE_ROOM_REPLY = 13
REGISTER = 14
REGISTER_REPLY = 15
UNREGISTER = 16
UNREGISTER_REPLY = 17
# B站业务自定义OP
# MinBusinessOp = 1000
# MaxBusinessOp = 10000
# WS_AUTH
class AuthReplyCode(enum.IntEnum):
OK = 0
TOKEN_ERROR = -101
class InitError(Exception):
"""初始化失败"""
class AuthError(Exception):
"""认证失败"""
class BLiveClient:
"""
B站直播弹幕客户端,负责连接房间
:param room_id: URL中的房间ID,可以用短ID
:param uid: B站用户ID,0表示未登录
:param session: cookie、连接池
:param heartbeat_interval: 发送心跳包的间隔时间(秒)
:param ssl: True表示用默认的SSLContext验证,False表示不验证,也可以传入SSLContext
:param loop: 协程事件循环
"""
def __init__(
self,
room_id,
uid=0,
session: Optional[aiohttp.ClientSession] = None,
heartbeat_interval=30,
ssl: Union[bool, ssl_.SSLContext] = True,
loop: Optional[asyncio.BaseEventLoop] = None,
):
# 用来init_room的临时房间ID,可以用短ID
self._tmp_room_id = room_id
self._uid = uid
if loop is not None:
self._loop = loop
elif session is not None:
self._loop = session.loop # noqa
else:
self._loop = asyncio.get_event_loop()
if session is None:
self._session = aiohttp.ClientSession(loop=self._loop, timeout=aiohttp.ClientTimeout(total=10))
self._own_session = True
else:
self._session = session
self._own_session = False
if self._session.loop is not self._loop: # noqa
raise RuntimeError('BLiveClient and session must use the same event loop')
self._heartbeat_interval = heartbeat_interval
self._ssl = ssl if ssl else ssl_._create_unverified_context() # noqa
# 消息处理器,可动态增删
self._handlers: List[handlers.HandlerInterface] = []
# 在调用init_room后初始化的字段
# 真实房间ID
self._room_id = None
# 房间短ID,没有则为0
self._room_short_id = None
# 主播用户ID
self._room_owner_uid = None
# 弹幕服务器列表
# [{host: "tx-bj4-live-comet-04.chat.bilibili.com", port: 2243, wss_port: 443, ws_port: 2244}, ...]
self._host_server_list: Optional[List[dict]] = None
# 连接弹幕服务器用的token
self._host_server_token = None
# 在运行时初始化的字段
# websocket连接
self._websocket: Optional[aiohttp.ClientWebSocketResponse] = None
# 网络协程的future
self._network_future: Optional[asyncio.Future] = None
# 发心跳包定时器的handle
self._heartbeat_timer_handle: Optional[asyncio.TimerHandle] = None
@property
def is_running(self) -> bool:
"""
本客户端正在运行,注意调用stop后还没完全停止也算正在运行
"""
return self._network_future is not None
@property
def room_id(self) -> Optional[int]:
"""
房间ID,调用init_room后初始化
"""
return self._room_id
@property
def room_short_id(self) -> Optional[int]:
"""
房间短ID,没有则为0,调用init_room后初始化
"""
return self._room_short_id
@property
def room_owner_uid(self) -> Optional[int]:
"""
主播用户ID,调用init_room后初始化
"""
return self._room_owner_uid
def add_handler(self, handler: 'handlers.HandlerInterface'):
"""
添加消息处理器
注意多个处理器是并发处理的,不要依赖处理的顺序
消息处理器和接收消息运行在同一协程,如果处理消息耗时太长会阻塞接收消息,这种情况建议将消息推到队列,让另一个协程处理
:param handler: 消息处理器
"""
if handler not in self._handlers:
self._handlers.append(handler)
def remove_handler(self, handler: 'handlers.HandlerInterface'):
"""
移除消息处理器
:param handler: 消息处理器
"""
try:
self._handlers.remove(handler)
except ValueError:
pass
def start(self):
"""
启动本客户端
"""
if self.is_running:
logger.warning('room=%s client is running, cannot start() again', self.room_id)
return
self._network_future = asyncio.ensure_future(self._network_coroutine_wrapper(), loop=self._loop)
def stop(self):
"""
停止本客户端
"""
if not self.is_running:
logger.warning('room=%s client is stopped, cannot stop() again', self.room_id)
return
self._network_future.cancel()
async def stop_and_close(self):
"""
便利函数,停止本客户端并释放本客户端的资源,调用后本客户端将不可用
"""
if self.is_running:
self.stop()
await self.join()
await self.close()
async def join(self):
"""
等待本客户端停止
"""
if not self.is_running:
logger.warning('room=%s client is stopped, cannot join()', self.room_id)
return
await asyncio.shield(self._network_future)
async def close(self):
"""
释放本客户端的资源,调用后本客户端将不可用
"""
if self.is_running:
logger.warning('room=%s is calling close(), but client is running', self.room_id)
# 如果session是自己创建的则关闭session
if self._own_session:
await self._session.close()
async def init_room(self):
"""
初始化连接房间需要的字段
:return: True代表没有降级,如果需要降级后还可用,重载这个函数返回True
"""
res = True
if not await self._init_room_id_and_owner():
res = False
# 失败了则降级
self._room_id = self._room_short_id = self._tmp_room_id
self._room_owner_uid = 0
if not await self._init_host_server():
res = False
# 失败了则降级
self._host_server_list = DEFAULT_DANMAKU_SERVER_LIST
self._host_server_token = None
return res
async def _init_room_id_and_owner(self):
try:
async with self._session.get(ROOM_INIT_URL, params={'room_id': self._tmp_room_id},
ssl=self._ssl) as res:
if res.status != 200:
logger.warning('room=%d _init_room_id_and_owner() failed, status=%d, reason=%s', self._tmp_room_id,
res.status, res.reason)
return False
data = await res.json()
if data['code'] != 0:
logger.warning('room=%d _init_room_id_and_owner() failed, message=%s', self._tmp_room_id,
data['message'])
return False
if not self._parse_room_init(data['data']):
return False
except (aiohttp.ClientConnectionError, asyncio.TimeoutError):
logger.exception('room=%d _init_room_id_and_owner() failed:', self._tmp_room_id)
return False
return True
def _parse_room_init(self, data):
room_info = data['room_info']
self._room_id = room_info['room_id']
self._room_short_id = room_info['short_id']
self._room_owner_uid = room_info['uid']
return True
async def _init_host_server(self):
try:
async with self._session.get(DANMAKU_SERVER_CONF_URL, params={'id': self._room_id, 'type': 0},
ssl=self._ssl) as res:
if res.status != 200:
logger.warning('room=%d _init_host_server() failed, status=%d, reason=%s', self._room_id,
res.status, res.reason)
return False
data = await res.json()
if data['code'] != 0:
logger.warning('room=%d _init_host_server() failed, message=%s', self._room_id, data['message'])
return False
if not self._parse_danmaku_server_conf(data['data']):
return False
except (aiohttp.ClientConnectionError, asyncio.TimeoutError):
logger.exception('room=%d _init_host_server() failed:', self._room_id)
return False
return True
def _parse_danmaku_server_conf(self, data):
self._host_server_list = data['host_list']
self._host_server_token = data['token']
if not self._host_server_list:
logger.warning('room=%d _parse_danmaku_server_conf() failed: host_server_list is empty', self._room_id)
return False
return True
@staticmethod
def _make_packet(data: dict, operation: int) -> bytes:
"""
创建一个要发送给服务器的包
:param data: 包体JSON数据
:param operation: 操作码,见Operation
:return: 整个包的数据
"""
body = json.dumps(data).encode('utf-8')
header = HEADER_STRUCT.pack(*HeaderTuple(
pack_len=HEADER_STRUCT.size + len(body),
raw_header_size=HEADER_STRUCT.size,
ver=1,
operation=operation,
seq_id=1
))
return header + body
async def _network_coroutine_wrapper(self):
"""
负责处理网络协程的异常,网络协程具体逻辑在_network_coroutine里
"""
try:
await self._network_coroutine()
except asyncio.CancelledError:
# 正常停止
pass
except Exception as e: # noqa
logger.exception('room=%s _network_coroutine() finished with exception:', self.room_id)
finally:
logger.debug('room=%s _network_coroutine() finished', self.room_id)
self._network_future = None
async def _network_coroutine(self):
"""
网络协程,负责连接服务器、接收消息、解包
"""
# 如果之前未初始化则初始化
if self._host_server_token is None:
if not await self.init_room():
raise InitError('init_room() failed')
retry_count = 0
while True:
try:
# 连接
host_server = self._host_server_list[retry_count % len(self._host_server_list)]
async with self._session.ws_connect(
f"wss://{host_server['host']}:{host_server['wss_port']}/sub",
receive_timeout=self._heartbeat_interval + 5,
ssl=self._ssl
) as websocket:
self._websocket = websocket
await self._on_ws_connect()
# 处理消息
message: aiohttp.WSMessage
async for message in websocket:
await self._on_ws_message(message)
# 至少成功处理1条消息
retry_count = 0
except (aiohttp.ClientConnectionError, asyncio.TimeoutError):
# 掉线重连
pass
except AuthError:
# 认证失败了,应该重新获取token再重连
logger.exception('room=%d auth failed, trying init_room() again', self.room_id)
if not await self.init_room():
raise InitError('init_room() failed')
except ssl_.SSLError:
logger.error('room=%d a SSLError happened, cannot reconnect', self.room_id)
raise
finally:
self._websocket = None
await self._on_ws_close()
# 准备重连
retry_count += 1
logger.warning('room=%d is reconnecting, retry_count=%d', self.room_id, retry_count)
await asyncio.sleep(1, loop=self._loop)
async def _on_ws_connect(self):
"""
websocket连接成功
"""
await self._send_auth()
self._heartbeat_timer_handle = self._loop.call_later(self._heartbeat_interval, self._on_send_heartbeat)
async def _on_ws_close(self):
"""
websocket连接断开
"""
if self._heartbeat_timer_handle is not None:
self._heartbeat_timer_handle.cancel()
self._heartbeat_timer_handle = None
async def _send_auth(self):
"""
发送认证包
"""
auth_params = {
'uid': self._uid,
'roomid': self._room_id,
'protover': 3,
'platform': 'web',
'type': 2
}
if self._host_server_token is not None:
auth_params['key'] = self._host_server_token
await self._websocket.send_bytes(self._make_packet(auth_params, Operation.AUTH))
def _on_send_heartbeat(self):
"""
定时发送心跳包的回调
"""
if self._websocket is None or self._websocket.closed:
self._heartbeat_timer_handle = None
return
self._heartbeat_timer_handle = self._loop.call_later(self._heartbeat_interval, self._on_send_heartbeat)
asyncio.ensure_future(self._send_heartbeat(), loop=self._loop)
async def _send_heartbeat(self):
"""
发送心跳包
"""
if self._websocket is None or self._websocket.closed:
return
try:
await self._websocket.send_bytes(self._make_packet({}, Operation.HEARTBEAT))
except (ConnectionResetError, aiohttp.ClientConnectionError) as e:
logger.warning('room=%d _send_heartbeat() failed: %r', self.room_id, e)
except Exception: # noqa
logger.exception('room=%d _send_heartbeat() failed:', self.room_id)
async def _on_ws_message(self, message: aiohttp.WSMessage):
"""
收到websocket消息
:param message: websocket消息
"""
if message.type != aiohttp.WSMsgType.BINARY:
logger.warning('room=%d unknown websocket message type=%s, data=%s', self.room_id,
message.type, message.data)
return
try:
await self._parse_ws_message(message.data)
except (asyncio.CancelledError, AuthError):
# 正常停止、认证失败,让外层处理
raise
except Exception: # noqa
logger.exception('room=%d _parse_ws_message() error:', self.room_id)
async def _parse_ws_message(self, data: bytes):
"""
解析websocket消息
:param data: websocket消息数据
"""
offset = 0
try:
header = HeaderTuple(*HEADER_STRUCT.unpack_from(data, offset))
except struct.error:
logger.exception('room=%d parsing header failed, offset=%d, data=%s', self.room_id, offset, data)
return
if header.operation in (Operation.SEND_MSG_REPLY, Operation.AUTH_REPLY):
# 业务消息,可能有多个包一起发,需要分包
while True:
body = data[offset + header.raw_header_size: offset + header.pack_len]
await self._parse_business_message(header, body)
offset += header.pack_len
if offset >= len(data):
break
try:
header = HeaderTuple(*HEADER_STRUCT.unpack_from(data, offset))
except struct.error:
logger.exception('room=%d parsing header failed, offset=%d, data=%s', self.room_id, offset, data)
break
elif header.operation == Operation.HEARTBEAT_REPLY:
# 服务器心跳包,前4字节是人气值,后面是客户端发的心跳包内容
# pack_len不包括客户端发的心跳包内容,不知道是不是服务器BUG
body = data[offset + header.raw_header_size: offset + header.raw_header_size + 4]
popularity = int.from_bytes(body, 'big')
# 自己造个消息当成业务消息处理
body = {
'cmd': '_HEARTBEAT',
'data': {
'popularity': popularity
}
}
await self._handle_command(body)
else:
# 未知消息
body = data[offset + header.raw_header_size: offset + header.pack_len]
logger.warning('room=%d unknown message operation=%d, header=%s, body=%s', self.room_id,
header.operation, header, body)
async def _parse_business_message(self, header: HeaderTuple, body: bytes):
"""
解析业务消息
"""
if header.operation == Operation.SEND_MSG_REPLY:
# 业务消息
if header.ver == ProtoVer.BROTLI:
# 压缩过的先解压,为了避免阻塞网络线程,放在其他线程执行
body = await self._loop.run_in_executor(None, brotli.decompress, body)
await self._parse_ws_message(body)
elif header.ver == ProtoVer.NORMAL:
# 没压缩过的直接反序列化,因为有万恶的GIL,这里不能并行避免阻塞
if len(body) != 0:
try:
body = json.loads(body.decode('utf-8'))
await self._handle_command(body)
except asyncio.CancelledError:
raise
except Exception:
logger.error('room=%d, body=%s', self.room_id, body)
raise
else:
# 未知格式
logger.warning('room=%d unknown protocol version=%d, header=%s, body=%s', self.room_id,
header.ver, header, body)
elif header.operation == Operation.AUTH_REPLY:
# 认证响应
body = json.loads(body.decode('utf-8'))
if body['code'] != AuthReplyCode.OK:
raise AuthError(f"auth reply error, code={body['code']}, body={body}")
await self._websocket.send_bytes(self._make_packet({}, Operation.HEARTBEAT))
else:
# 未知消息
logger.warning('room=%d unknown message operation=%d, header=%s, body=%s', self.room_id,
header.operation, header, body)
async def _handle_command(self, command: dict):
"""
解析并处理业务消息
:param command: 业务消息
"""
# 外部代码可能不能正常处理取消,所以这里加shield
import sys
if sys.version_info.major == 3 and sys.version_info.minor >= 10:
results = await asyncio.shield(
asyncio.gather(
*(handler.handle(self, command) for handler in self._handlers),
return_exceptions=True
)
)
else:
results = await asyncio.shield(
asyncio.gather(
*(handler.handle(self, command) for handler in self._handlers),
loop=self._loop,
return_exceptions=True
),
loop=self._loop
)
for res in results:
if isinstance(res, Exception):
logger.exception('room=%d _handle_command() failed, command=%s', self.room_id, command, exc_info=res)
```
|
{
"source": "jeffzh3ng/fuxi-scanner",
"score": 2
}
|
#### File: api/config/settings.py
```python
from flask import session
from flask_restful import Resource, reqparse
from fuxi.core.auth.token import auth
from fuxi.core.data.response import Response
from fuxi.common.utils.time_format import timestamp_to_str
from fuxi.core.databases.orm.auth.user_orm import DBFuxiAdmin
from fuxi.core.databases.orm.configuration.config import DBFuxiConfiguration
from fuxi.common.utils.logger import logger
parser = reqparse.RequestParser()
parser.add_argument('username', type=str)
parser.add_argument('password', type=str)
parser.add_argument('nick', type=str)
parser.add_argument('email', type=str)
parser.add_argument('key', type=str)
parser.add_argument('value', type=str)
class ConfigManageV1(Resource):
@auth
def get(self):
"""
GET /api/v1/settings
:return:
"""
try:
# pass
return Response.success()
except Exception as e:
msg = "setup configuration failed: {}".format(e)
logger.warning(msg)
return Response.failed(message=msg)
class AccountManageV1(Resource):
@auth
def get(self):
"""
GET /api/v1/settings/user
"""
data = []
try:
items = DBFuxiAdmin.get_user_list()
for item in items:
item['uid'] = str(item['_id'])
item['date'] = timestamp_to_str(item['date'])
if item['role'] == 0:
item['role'] = "admin"
else:
item['role'] = "user"
del item['_id']
data.append(item)
return Response.success(data=data)
except Exception as e:
msg = "setup configuration failed: {}".format(e)
logger.warning(msg)
return Response.failed(message=msg, data=data)
@auth
def delete(self, uid):
"""
DELETE /api/v1/settings/user/<uid>
"""
try:
if session.get("authority") == 0 and not DBFuxiAdmin.is_admin(uid):
DBFuxiAdmin.delete_by_id(uid)
return Response.success(message="successfully deleted")
else:
return Response.failed(message="Delete user failed: Permission denied")
except Exception as e:
msg = "delete user failed: {} {}".format(uid, e)
logger.warning(msg)
return Response.failed(message=msg)
@auth
def put(self, uid):
"""
PUT /api/v1/settings/user/<uid>
"""
try:
if session.get("authority") != 0:
return Response.failed(message="Failed to modify user information: Permission denied")
args = parser.parse_args()
username = args['username']
nick = args['nick']
email = args['email']
DBFuxiAdmin.update_by_id(uid, {
"username": username,
"nick": nick,
"email": email,
})
return Response.success(message="Modify user information successfully")
except Exception as e:
logger.warning("failed to modify user information: {}".format(e))
return Response.failed(message=e)
class BasicConfigMangeV1(Resource):
@auth
def get(self):
"""
GET /api/v1/settings/basic
"""
data = []
try:
# pass
item = DBFuxiConfiguration.find_one()
if item:
data.append({"key": "whatweb_exe", "desc": "Whatweb"})
data.append({"key": "nmap_exe", "desc": "Nmap"})
data.append({"key": "sqlmap_api", "desc": "SQLMAP API"})
for i in data:
i['cid'] = str(item['_id'])
i['value'] = item[i['key']]
return Response.success(data=data)
except Exception as e:
msg = "setup configuration failed: {}".format(e)
logger.warning(msg)
return Response.failed(message=msg, data=data)
@auth
def put(self, cid):
try:
args = parser.parse_args()
key = args['key']
value = args['value']
if not key or not value:
return Response.failed(message="Illegal input!")
if not DBFuxiConfiguration.setting_item_check(key.strip()):
return Response.failed(message="Configuration item is invalid")
d = {key.strip(): value.strip()}
DBFuxiConfiguration.update_by_id(cid, d)
return Response.success(message="Update successful")
except Exception as e:
msg = "Update failed: {}".format(e)
logger.warning(msg)
return Response.failed(message=msg)
```
|
{
"source": "jeffzhengye/pylearn",
"score": 2
}
|
#### File: finance/gm/main.py
```python
from __future__ import print_function, absolute_import
from gm.api import *
"""
本策略采用布林线进行均值回归交易。当价格触及布林线上轨的时候进行卖出,当触及下轨的时候,进行买入。
使用600004在 2009-09-17 13:00:00 到 2020-03-21 15:00:00 进行了回测。
注意:
1:实盘中,如果在收盘的那一根bar或tick触发交易信号,需要自行处理,实盘可能不会成交。
"""
# 策略中必须有init方法
def init(context):
# 设置布林线的三个参数
context.maPeriod = 26 # 计算BOLL布林线中轨的参数
context.stdPeriod = 26 # 计算BOLL 标准差的参数
context.stdRange = 1 # 计算BOLL 上下轨和中轨距离的参数
# 设置要进行回测的合约
context.symbol = 'SHSE.600004' # 订阅&交易标的, 此处订阅的是600004
context.period = max(context.maPeriod, context.stdPeriod, context.stdRange) + 1 # 订阅数据滑窗长度
# 订阅行情
subscribe(symbols= context.symbol, frequency='1d', count=context.period)
def on_bar(context, bars):
# 获取数据滑窗,只要在init里面有订阅,在这里就可以取的到,返回值是pandas.DataFrame
data = context.data(symbol=context.symbol, frequency='1d', count=context.period, fields='close')
# 计算boll的上下界
bollUpper = data['close'].rolling(context.maPeriod).mean() \
+ context.stdRange * data['close'].rolling(context.stdPeriod).std()
bollBottom = data['close'].rolling(context.maPeriod).mean() \
- context.stdRange * data['close'].rolling(context.stdPeriod).std()
# 获取现有持仓
pos = context.account().position(symbol=context.symbol, side=PositionSide_Long)
# 交易逻辑与下单
# 当有持仓,且股价穿过BOLL上界的时候卖出股票。
if data.close.values[-1] > bollUpper.values[-1] and data.close.values[-2] < bollUpper.values[-2]:
if pos: # 有持仓就市价卖出股票。
order_volume(symbol=context.symbol, volume=100, side=OrderSide_Sell,
order_type=OrderType_Market, position_effect=PositionEffect_Close)
print('以市价单卖出一手')
# 当没有持仓,且股价穿过BOLL下界的时候买出股票。
elif data.close.values[-1] < bollBottom.values[-1] and data.close.values[-2] > bollBottom.values[-2]:
if not pos: # 没有持仓就买入一百股。
order_volume(symbol=context.symbol, volume=100, side=OrderSide_Buy,
order_type=OrderType_Market, position_effect=PositionEffect_Open)
print('以市价单买入一手')
if __name__ == '__main__':
'''
strategy_id策略ID,由系统生成
filename文件名,请与本文件名保持一致
mode实时模式:MODE_LIVE回测模式:MODE_BACKTEST
token绑定计算机的ID,可在系统设置-密钥管理中生成
backtest_start_time回测开始时间
backtest_end_time回测结束时间
backtest_adjust股票复权方式不复权:ADJUST_NONE前复权:ADJUST_PREV后复权:ADJUST_POST
backtest_initial_cash回测初始资金
backtest_commission_ratio回测佣金比例
backtest_slippage_ratio回测滑点比例
'''
run(strategy_id='38faa4f9-2fdc-11ec-<PASSWORD>',
filename='main.py',
mode=MODE_BACKTEST,
token='803ab887a9562b630907ce9a28367e280b463594',
backtest_start_time='2009-09-17 13:00:00',
backtest_end_time='2020-03-21 15:00:00',
backtest_adjust=ADJUST_PREV,
backtest_initial_cash=1000,
backtest_commission_ratio=0.0001,
backtest_slippage_ratio=0.0001)
```
#### File: pylearn/network/flaskTest.py
```python
__author__ = 'zheng'
from flask import Flask, request
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
@app.route('/post/<int:post_id>')
def post(post_id):
#return 'user: ' + username + request.path
#return request.args.get('q', '')
return "postid: %s" % post_id
@app.route('/para', methods=['GET', 'POST'])
def para_extract():
p1 = request.values.get('user', 'unknown')
p2 = request.values.get('pass', 'secret')
print request.args # get
print request.form # post
print request.values # combine
return "%s : %s " % (p1, p2)
#return '%s' % 0.5689
@app.route('/sim', methods=['GET', 'POST'])
def sim():
p1 = request.values.get('word1', 'unknown')
p2 = request.values.get('word2', 'unknown')
return '%s' % 0.5689
if __name__ == "__main__":
app.debug = True
#app.run()
app.run(host='0.0.0.0')
```
#### File: pylearn/pylucene/TestAnalysis.py
```python
__author__ = 'zheng'
import os, re, sys, lucene
from java.io import File
from org.apache.lucene.analysis.core import LowerCaseFilter, StopFilter, StopAnalyzer
from org.apache.lucene.analysis.en import PorterStemFilter
from org.apache.lucene.analysis.standard import StandardTokenizer, StandardFilter, StandardAnalyzer
from org.apache.pylucene.analysis import PythonAnalyzer
from org.apache.lucene.document import Document, Field, StringField, TextField
from org.apache.lucene.store import SimpleFSDirectory
from org.apache.lucene.index import IndexWriter, IndexWriterConfig
from org.apache.lucene.util import Version
from java.io import StringReader
from org.apache.lucene.analysis.tokenattributes import OffsetAttribute
from org.apache.lucene.analysis.tokenattributes import CharTermAttribute
#from org.apache.lucene.analysis.ja.tokenattributes import PartOfSpeechAttribute
from string import Template
from subprocess import *
class PorterStemmerAnalyzer(PythonAnalyzer):
def createComponents(self, fieldName, reader):
source = StandardTokenizer(Version.LUCENE_CURRENT, reader)
filter = StandardFilter(Version.LUCENE_CURRENT, source)
filter = LowerCaseFilter(Version.LUCENE_CURRENT, filter)
filter = PorterStemFilter(filter)
filter = StopFilter(Version.LUCENE_CURRENT, filter,
StopAnalyzer.ENGLISH_STOP_WORDS_SET)
return self.TokenStreamComponents(source, filter)
lucene.initVM(vmargs=['-Djava.awt.headless=true'])
analyzer = StandardAnalyzer(Version.LUCENE_CURRENT)
input = 'this is a test string for Analyzer'
ts = analyzer.tokenStream("dummy", StringReader(input))
#matchVersion = Version.LUCENE_XY; ##Substitute desired Lucene version for XY
offsetAtt = ts.addAttribute(OffsetAttribute.class_)
termAtt = ts.addAttribute(CharTermAttribute.class_)
#posAtt = ts.addAttribute(PartOfSpeechAttribute.class_)
def testStandard():
ts.reset(); ##Resets this stream to the beginning. (Required
while ts.incrementToken():
#print ts.r
#print ts.reflectAsString(True)
print offsetAtt.startOffset()
print offsetAtt.endOffset()
print termAtt.toString() #, posAtt.getPartOfSpeech()
ts.end()
ts.close()
def testPorter():
print 'lucene', lucene.VERSION, lucene.CLASSPATH
input = 'this is a test string for Analyzer'
input = "krasnoselskii organ distingish zalog injection injector gps information"
analyzer = PorterStemmerAnalyzer()
ts = analyzer.tokenStream("dummy", StringReader(input))
offsetAtt = ts.addAttribute(OffsetAttribute.class_)
termAtt = ts.addAttribute(CharTermAttribute.class_)
ts.reset(); ##Resets this stream to the beginning. (Required
while ts.incrementToken():
#print ts.r
#print ts.reflectAsString(True)
print termAtt.toString(), offsetAtt.startOffset(), offsetAtt.endOffset()
ts.end()
ts.close()
testPorter()
def indexFile(dir, filename):
path = os.path.join(dir, filename)
print " File: ", filename
if filename.endswith('.gz'):
child = Popen('gunzip -c ' + path + ' | groff -t -e -E -mandoc -Tascii | col -bx', shell=True, stdout=PIPE, cwd=os.path.dirname(dir)).stdout
command, section = re.search('^(.*)\.(.*)\.gz$', filename).groups()
else:
child = Popen('groff -t -e -E -mandoc -Tascii ' + path + ' | col -bx',
shell=True, stdout=PIPE, cwd=os.path.dirname(dir)).stdout
command, section = re.search('^(.*)\.(.*)$', filename).groups()
data = child.read()
err = child.close()
if err:
raise RuntimeError, '%s failed with exit code %d' %(command, err)
matches = re.search('^NAME$(.*?)^\S', data,
re.MULTILINE | re.DOTALL)
name = matches and matches.group(1) or ''
matches = re.search('^(?:SYNOPSIS|SYNOPSYS)$(.*?)^\S', data,
re.MULTILINE | re.DOTALL)
synopsis = matches and matches.group(1) or ''
matches = re.search('^(?:DESCRIPTION|OVERVIEW)$(.*?)', data,
re.MULTILINE | re.DOTALL)
description = matches and matches.group(1) or ''
doc = Document()
doc.add(Field("command", command, StringField.TYPE_STORED))
doc.add(Field("section", section, StringField.TYPE_STORED))
doc.add(Field("name", name.strip(), TextField.TYPE_STORED))
doc.add(Field("synopsis", synopsis.strip(), TextField.TYPE_STORED))
doc.add(Field("keywords", ' '.join((command, name, synopsis, description)),
TextField.TYPE_NOT_STORED))
doc.add(Field("filename", os.path.abspath(path), StringField.TYPE_STORED))
writer.addDocument(doc)
def indexDirectory(dir):
for name in os.listdir(dir):
path = os.path.join(dir, name)
if os.path.isfile(path):
indexFile(dir, name)
def TestIndex(index_path='./index'): ##from manindex.py
lucene.initVM(vmargs=['-Djava.awt.headless=true'])
directory = SimpleFSDirectory(index_path)
analyzer = StandardAnalyzer(Version.LUCENE_CURRENT)
analyzer = LimitTokenCountAnalyzer(analyzer, 10000)
config = IndexWriterConfig(Version.LUCENE_CURRENT, analyzer)
manpath = os.environ.get('MANPATH', '/usr/share/man').split(os.pathsep)
for dir in manpath:
print 'Crawling', dir
for name in os.listdir(dir):
path = os.path.join(dir, name)
if os.path.isdir(path):
indexDirectory(path)
def search_with_Similarity():
import datetime
from org.apache.lucene.search.similarities import BM25Similarity
class CustomTemplate(Template):
delimiter = '#'
template = CustomTemplate(format)
fsDir = SimpleFSDirectory(File(indexDir))
searcher = IndexSearcher(DirectoryReader.open(fsDir))
searcher.setSimilarity(BM25Similarity(1.2, 0.75)) ##BM25Similarity(float k1, float b)
analyzer = StandardAnalyzer(Version.LUCENE_CURRENT)
parser = QueryParser(Version.LUCENE_CURRENT, "keywords", analyzer)
parser.setDefaultOperator(QueryParser.Operator.AND)
query = parser.parse(' '.join(args))
start = datetime.now()
hits = searcher.search(query, 50)
scoreDocs = searcher.search(query, 50).scoreDocs
duration = datetime.now() - start
stats = True
if stats:
print >>sys.stderr, "Found %d document(s) (in %s) that matched query '%s':" %(len(scoreDocs), duration, query)
for h in hits:
hit = lucene.Hit.cast_(h)
id, doc = hit.getId(), hit.getDocument()
print (id, doc)
for scoreDoc in scoreDocs:
doc = searcher.doc(scoreDoc.doc)
table = dict((field.name(), field.stringValue())
for field in doc.getFields())
print template.substitute(table)
```
#### File: jeffzhengye/pylearn/RegexPattern.py
```python
__author__ = 'zheng'
import re
script = re.compile(r'<script.*?</script>', re.M | re.DOTALL)
def remove_adsense():
import urllib
url = "http://google-dictionary.so8848.com/meaning?word=maps"
f = urllib.urlopen(url)
content = f.read()
matches = script.finditer(content)
remove_list = []
for m in matches:
print m.span()
code = m.group(0)
if code.find('google_ad_slot') != -1:
print code
remove_list.append(m.span())
print '--'*100
begin = 0
content_new = []
for start, end in remove_list:
content_new.append(content[begin:start])
begin = end
content_new.append(content[end:])
print ''.join(content_new)
remove_adsense()
#ct = """
#"""
#adsense.match(ct, re.M | re.DOTALL)
#
#ms = re.match(r'.*(<script.*?google_ad_client.*?</script>).*', ct, re.M|re.DOTALL)
```
#### File: pylearn/spacy/custom_dict_chinese.py
```python
import spacy
"""
1. 如何加入自定义词典
2. 如何设置default segmenter
"""
def test_custom_dict():
nlp = spacy.load('zh_core_web_sm')
proper_nouns = ['给水流量', '蒸汽流量', '过热度', '主蒸汽']
nlp.tokenizer.pkuseg_update_user_dict(proper_nouns)
doc = nlp('调整给水,注意给水流量与蒸汽流量相匹配,注意过热度,保证主蒸汽温度不超限。')
print('/'.join([t.text for t in doc]))
def test_switch_segmenter():
"""
2. 如何设置default segmenter
:return:
"""
from spacy.lang.zh import Chinese
s = '调整给水,注意给水流量与蒸汽流量相匹配,注意过热度,保证主蒸汽温度不超限。This is not perfect'
# Character segmentation (default)
nlp = Chinese()
# char
cfg = {"segmenter": "char"}
nlp = Chinese.from_config({"nlp": {"tokenizer": cfg}})
doc = nlp(s)
print('/'.join([t.text for t in doc]))
# Jieba
cfg = {"segmenter": "jieba"}
nlp = Chinese.from_config({"nlp": {"tokenizer": cfg}})
doc = nlp(s)
print('/'.join([t.text for t in doc]))
# PKUSeg with "mixed" model provided by pkuseg
cfg = {"segmenter": "pkuseg"}
nlp = Chinese.from_config({"nlp": {"tokenizer": cfg}})
nlp.tokenizer.initialize(pkuseg_model="mixed")
doc = nlp(s)
print('/'.join([t.text for t in doc]))
test_switch_segmenter()
```
#### File: speed/cython/fib.py
```python
import cython
#can put all type declarations here
@cython.locals(n=cython.int)
def fib(n):
cython.declare(a=cython.int,
b=cython.int,
i=cython.int)
a,b = 1, 1
for i in range(n):
a, b = a+b, a
return a
```
#### File: cython/numpy_cython/timing.py
```python
import numpy as np
from numpy_array_sum import sum_0, sum_01, sum_1, sum_2,sum_3, sum_4, sum_5, sum_6, sum_7
from profilehooks import profile
# import pyximport; pyximport.install(reload_support=True)
input = np.random.ranf((2001, 200)).astype(np.float32)
def sum_np(vecs):
"""
numpy baseline
"""
return np.sum(vecs, axis=0)
@profile
def test():
for i in range(10):
sum_np(input)
sum_0(input)
sum_01(input)
sum_1(input)
sum_2(input)
# sum_3(input)
# sum_4(input)
# sum_5(input)
# sum_7(input)
# sum_6(input)
test()
```
#### File: demo/harmonograph/harmonograph_ui.py
```python
from time import time as clock_time
import numpy as np
from traits.api import HasTraits, Float, Instance, Array, on_trait_change, DelegatesTo, Property
from traitsui.api import View, Item, RangeEditor, HGroup
from chaco.api import Plot, ArrayPlotData
from enable.api import ComponentEditor
from harmonograph import compute
# --- Traits classes.
class Oscillator(HasTraits):
amp = Float(1.0)
freq = Float(5.0)
phase = Float(0.0)
damping = Float(0.01)
traits_view = View(
Item('amp', editor=RangeEditor(low=1.0, high=5.0, low_label='', high_label='', mode='slider'), show_label=False),
Item('freq', editor=RangeEditor(low=10.0, high=10.5, low_label='', high_label='', mode='slider'), show_label=False),
Item('phase', editor=RangeEditor(low=0.0, high=2. * np.pi, low_label='', high_label='', mode='slider'), show_label=False),
Item('damping', editor=RangeEditor(low=0.0, high=0.1, low_label='', high_label='', mode='slider'), show_label=False),
)
oscs = '[osc0, osc1, osc2, osc3]'
attrs = 'amp freq phase damping'.split()
depon = ', '.join('%s.%s' % (oscs, attr) for attr in attrs)
class Harmonograph(HasTraits):
runtime = Float()
time = Array()
xy = Property(depends_on=['time, ' + depon])
osc0 = Instance(Oscillator, args=())
osc1 = Instance(Oscillator, args=())
osc2 = Instance(Oscillator, args=())
osc3 = Instance(Oscillator, args=())
def _time_default(self):
return np.linspace(0, 10, 1000)
def _get_xy(self):
return self.compute()
def compute(self):
t0 = clock_time()
oscs = [self.osc0, self.osc1, self.osc2, self.osc3]
amps = [o.amp for o in oscs]
fs = [o.freq for o in oscs]
phs = [o.phase for o in oscs]
ds = [o.damping for o in oscs]
xy = compute(self.time, amps, fs, phs, ds)
t1 = clock_time() - t0
self.runtime = t1
return xy
@on_trait_change(depon)
def update(self):
self.xy = self.compute()
class HarmonographUI(HasTraits):
DELTA = Float(0.01)
model = Instance(Harmonograph)
runtime = Property(depends_on=['model.runtime'])
framerate = Property(depends_on=['model.runtime'])
xy = DelegatesTo('model')
osc0 = DelegatesTo('model')
osc1 = DelegatesTo('model')
osc2 = DelegatesTo('model')
osc3 = DelegatesTo('model')
plot = Instance(Plot)
totaltime = Float(20.)
starttime = Float(0.0)
traits_view = View(Item('plot', editor=ComponentEditor(), show_label=False),
Item('starttime', editor=RangeEditor(low=0.0, high=50, mode='slider')),
Item('totaltime', editor=RangeEditor(low=10, high=50, mode='slider')),
HGroup(
Item('osc0', style='custom', show_label=False),
Item('osc1', style='custom', show_label=False),
Item('osc2', style='custom', show_label=False),
Item('osc3', style='custom', show_label=False),
),
Item('framerate', style='readonly'),
width=800,
height=600,
resizable=True)
def _get_framerate(self):
return "{:d} FPS".format(int(1. / (self.model.runtime)))
@on_trait_change('starttime, totaltime')
def update(self):
self.model.time = np.linspace(self.starttime, self.starttime + self.totaltime, int(self.totaltime / self.DELTA))
def _xy_changed(self):
self.plot.data.set_data('x', self.xy[0])
self.plot.data.set_data('y', self.xy[1])
def _plot_default(self):
x, y = self.xy
apd = ArrayPlotData(x=x, y=y)
plot = Plot(apd)
plot.plot(('x', 'y'))
return plot
if __name__ == '__main__':
hg = Harmonograph()
hui = HarmonographUI(model=hg)
hui.configure_traits()
```
#### File: exercises/julia/utils.py
```python
from __future__ import print_function
from subprocess import check_call
import sys, platform
def compiler(setup_name):
# the Python binary full path.
exe = sys.executable
# figure out what platform we're on and adjust the commandline flags accordingly.
extras = []
if platform.system() == 'Windows':
extras = ['--compiler=mingw32']
# The distutils command to execute
cmd = [exe, setup_name, 'build_ext', '--inplace'] + extras
print(cmd)
# runs the command and raises an exception on failure.
check_call(cmd)
def importer(module_name, function_name):
# Remove any common ending, both for pure python and extension modules.
for ending in ('.py', '.pyc', '.so', '.pyd'):
module_name = module_name.rsplit(ending)[0]
mod = __import__(module_name)
# import the required function, re-raising an ImportError on failure.
try:
return getattr(mod, function_name)
except AttributeError:
raise ImportError("cannot import name %s" % function_name)
```
#### File: speed/numba/general.py
```python
import numpy as np
from numba import double
import numba as nb
from numba.decorators import jit, autojit
# from IPython import get_ipython
# ipython = get_ipython()
def pairwise_python(X):
M = X.shape[0]
N = X.shape[1]
D = np.empty((M, M), dtype=np.float)
for i in range(M):
for j in range(M):
d = 0.0
for k in range(N):
tmp = X[i, k] - X[j, k]
d += tmp * tmp
D[i, j] = np.sqrt(d)
return D
def pairwise_numpy(X):
return np.sqrt(((X[:, None, :] - X) ** 2).sum(-1))
pairwise_numba = autojit(pairwise_python)
```
#### File: tensorflow_learning/tf2/multi_inputs_outputs.py
```python
import tensorflow as tf
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import Dense, Concatenate
import numpy as np
from keras.utils import plot_model
from numpy import random as rd
samples_n = 3000
samples_dim_01 = 2
samples_dim_02 = 2
# 样本数据
x1 = rd.rand(samples_n, samples_dim_01)
x2 = rd.rand(samples_n, samples_dim_02)
y_1 = []
y_2 = []
y_3 = []
for x11, x22 in zip(x1, x2):
y_1.append(np.sum(x11) + np.sum(x22))
y_2.append(np.max([np.max(x11), np.max(x22)]))
y_3.append(np.min([np.min(x11), np.min(x22)]))
y_1 = np.array(y_1)
y_1 = np.expand_dims(y_1, axis=1)
y_2 = np.array(y_2)
y_2 = np.expand_dims(y_2, axis=1)
y_3 = np.array(y_3)
y_3 = np.expand_dims(y_3, axis=1)
# print(y_1.dtype, y_2.dtype, y_3.dtype)
# exit()
# 输入层
inputs_01 = Input((samples_dim_01,), name='input_1')
inputs_02 = Input((samples_dim_02,), name='input_2')
# 全连接层
dense_01 = Dense(units=3, name="dense_01", activation='softmax')(inputs_01)
dense_011 = Dense(units=3, name="dense_011", activation='softmax')(dense_01)
dense_02 = Dense(units=6, name="dense_02", activation='softmax')(inputs_02)
# 加入合并层
merge = Concatenate()([dense_011, dense_02])
# 分成两类输出 --- 输出01
output_01 = Dense(units=6, activation="relu", name='output01')(merge)
output_011 = Dense(units=1, activation=None, name='output011')(output_01)
# 分成两类输出 --- 输出02
output_02 = Dense(units=1, activation=None, name='output02')(merge)
# 分成两类输出 --- 输出03
output_03 = Dense(units=1, activation=None, name='output03')(merge)
# 构造一个新模型
model = Model(inputs=[inputs_01, inputs_02], outputs=[output_011,
output_02,
output_03
])
# 显示模型情况
# plot_model(model, show_shapes=True)
print(model.summary())
# # 编译
# model.compile(optimizer="adam", loss='mean_squared_error', loss_weights=[1,
# 0.8,
# 0.8
# ])
# # 训练
# model.fit([x1, x2], [y_1,
# y_2,
# y_3
# ], epochs=50, batch_size=32, validation_split=0.1)
def generator():
for i in range(len(x1)):
yield {'input_1': x1[i], 'inputs_02': x2[i]}, {'output011': y_1[i],
'output02': y_2[i],
'output03': y_3[i]}
dataset = tf.data.Dataset.from_generator(generator, ({'input_1': tf.float32, 'inputs_02': tf.float32},
{'output011': tf.float32,
'output02': tf.float32,
'output03': tf.float32
})
).batch(32)
# for one in dataset:
# # pass
# print(one, type(one))
# break
#
# exit()
# 以下的方法可灵活设置
model.compile(optimizer='adam',
loss={'output011': 'mean_squared_error',
'output02': 'mean_squared_error',
'output03': 'mean_squared_error'},
loss_weights={'output011': 1,
'output02': 0.8,
'output03': 0.8})
# model.fit({'input_1': x1,
# 'input_2': x2},
# {'output011': y_1,
# 'output02': y_2,
# 'output03': y_3},
# epochs=50, batch_size=32, validation_split=0.1)
model.fit(dataset, epochs=50)
# 预测
test_x1 = rd.rand(1, 2)
test_x2 = rd.rand(1, 2)
test_y = model.predict(x=[test_x1, test_x2])
# 测试
print("测试结果:")
print("test_x1:", test_x1, "test_x2:", test_x2, "y:", test_y, np.sum(test_x1) + np.sum(test_x2))
```
#### File: tensorflow_learning/tf2/structured_data.py
```python
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import feature_column
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
from tensorflow.python.framework import dtypes
tf.feature_column.numeric_column
keras.layers.DenseFeatures
tf.feature_column.embedding_column
tf.feature_column.categorical_column_with_hash_bucket
tf.feature_column.indicator_column
tf.feature_column.bucketized_column
# URL = 'https://storage.googleapis.com/applied-dl/heart.csv'
# dataframe = pd.read_csv(URL)
data_file = 'heart.csv'
dataframe = pd.read_csv(data_file)
dataframe = dataframe.replace({'thal': {0: 'normal', 1: "fixed", 2: "normal"}})
dataframe = dataframe.astype({'thal': str})
print(dataframe.head())
train, test = train_test_split(dataframe, test_size=0.2)
train, val = train_test_split(train, test_size=0.2)
print(len(train), 'train examples')
print(len(val), 'validation examples')
print(len(test), 'test examples')
print(train.head())
# 一种从 Pandas Dataframe 创建 tf.data 数据集的实用程序方法(utility method)
def df_to_dataset(dataframe, shuffle=True, batch_size=2):
dataframe = dataframe.copy()
labels = dataframe.pop('target')
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.batch(batch_size)
return ds
batch_size = 3 # 小批量大小用于演示
train_ds = df_to_dataset(train, shuffle=False, batch_size=batch_size)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)
for feature_batch, label_batch in train_ds.take(1):
print('Every feature:', list(feature_batch.keys()))
print('A batch of ages:', feature_batch['age'])
print('A batch of targets:', label_batch)
# 我们将使用该批数据演示几种特征列
example_batch = next(iter(train_ds))[0]
print('example_batch', example_batch)
# sparse_input = {'indices':[[0, 0], [0, 1], [1, 2]], 'values': ['fixed', 'reversible', 'normal'], 'dense_shape': [2, 4]}
sparse_input = {'indices':[[0, 0], [0, 1], [1, 2]], 'values': [1, 1, 1], 'dense_shape': [2, 4]}
input_sparse = tf.sparse.SparseTensor(**sparse_input)
# input_sparse = tf.sparse.SparseTensor(indices=[[0, 0], [0, 1], [1, 2]], values=['fixed', 'reversible', 'normal'], dense_shape=[2, 4])
# example_batch = {
# 'thal': input_sparse
# }
# 用于创建一个特征列
# 并转换一批次数据的一个实用程序方法
def demo(feature_column):
feature_layer = layers.DenseFeatures(feature_column)
name = feature_column.name.split('_')[0]
print('input:', example_batch[name])
print(feature_layer(example_batch).numpy())
age = feature_column.numeric_column("age")
demo(age)
#
age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
demo(age_buckets)
#
# thal = feature_column.categorical_column_with_vocabulary_list(
# 'thal', ['fixed', 'normal', 'reversible'])
thal = feature_column.categorical_column_with_hash_bucket('thal', 20, dtype=dtypes.int32)
#
# thal_one_hot = feature_column.indicator_column(thal)
# demo(thal_one_hot)
# demo(thal)
# 注意到嵌入列的输入是我们之前创建的类别列
thal_embedding = feature_column.embedding_column(thal, dimension=8, combiner='sum')
# demo(thal_embedding)
```
#### File: tensorflow_learning/tflite/tflite_mobilenet.py
```python
import numpy as np
import tensorflow as tf
# from: https://www.tensorflow.org/lite/convert/python_api?hl=zh-cn
print('version:', tf.__version__)
def from_saved_model():
"""
以下示例展示了如何将一个 SavedModel 转换为 TensorFlow Lite 中的 FlatBuffer格式。
:return:
"""
# 建立一个简单的模型。
root = tf.train.Checkpoint()
root.v1 = tf.Variable(3.)
root.v2 = tf.Variable(2.)
root.f = tf.function(lambda x: root.v1 * root.v2 * x)
# 保存模型。
export_dir = "/tmp/test_saved_model"
input_data = tf.constant(1., shape=[1, 1])
to_save = root.f.get_concrete_function(input_data)
tf.saved_model.save(root, export_dir, to_save)
# 转换模型。
converter = tf.lite.TFLiteConverter.from_saved_model(export_dir)
tflite_model = converter.convert()
# Save the model.
with open('model.tflite', 'wb') as f:
f.write(tflite_model)
def from_saved_model_support_specifying_input_dimension():
"""
以上 API 不支持指定输入向量的维度。 如果您的模型需要指定输入向量的维度,请使用 from_concrete_functions 来完成。 示例:
:return:
"""
root = tf.train.Checkpoint()
root.v1 = tf.Variable(3.)
root.v2 = tf.Variable(2.)
root.f = tf.function(lambda x: root.v1 * root.v2 * x)
# 保存模型。
export_dir = "/tmp/test_saved_model"
input_data = tf.constant(1., shape=[1, 1])
to_save = root.f.get_concrete_function(input_data)
tf.saved_model.save(root, export_dir, to_save)
# 转换模型。
model = tf.saved_model.load(export_dir)
concrete_func = model.signatures[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
concrete_func.inputs[0].set_shape([1, 256, 256, 3])
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
def from_keras_model():
# 创建一个简单的 Keras 模型。
x = [-1, 0, 1, 2, 3, 4]
y = [-3, -1, 1, 3, 5, 7]
model = tf.keras.models.Sequential(
[tf.keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=50)
# 转换模型。
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
def end_2_end_MobileNet():
"""
端到端 MobileNet 转换
以下示例展示了如何将将一个提前训练好的 tf.keras MobileNet 模型转换为 TensorFlow Lite 支持的类型并运行推断 (inference)。
随机数据分别在 TensorFlow 和 TensorFlow Lite 模型中运行的结果将被比较。如果是从文件加载模型,请使用 model_path 来代替 model_content。
:return:
"""
# 加载 MobileNet tf.keras 模型。
model = tf.keras.applications.MobileNetV2(
weights="imagenet", input_shape=(224, 224, 3))
# 转换模型。
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# 加载 TFLite 模型并分配张量(tensor)。
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
# 获取输入和输出张量。
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print('input_details', input_details)
print('output_details', output_details)
# 使用随机数据作为输入测试 TensorFlow Lite 模型。
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
# 函数 `get_tensor()` 会返回一份张量的拷贝。
# 使用 `tensor()` 获取指向张量的指针。
tflite_results = interpreter.get_tensor(output_details[0]['index'])
# 使用随机数据作为输入测试 TensorFlow 模型。
tf_results = model(tf.constant(input_data))
# 对比结果。
for tf_result, tflite_result in zip(tf_results, tflite_results):
np.testing.assert_almost_equal(tf_result, tflite_result, decimal=5)
```
#### File: pylearn/tests/gejun_sum.py
```python
__author__ = 'jeffye'
def sum_consecutives(s):
i = 1
li = []
if i < len(s):
n = 1
while s[i] != s[i + 1] and s[i] != s[i - 1]:
sum = s[i]
i = i + 1
return sum
while s[i] == s[i + 1]:
n = n + 1
sum = s[i] * n
i = i + 1
return sum
li.append(sum)
return li
def sum_consecutives_corrected(s):
start = 0
li = []
n = 1
while start < len(s):
if start == len(s) - 1: # last element
li.append(s[start])
break
elif s[start] == s[start + n]: # equal, just record the length
n += 1
else: # first not equal, sum all previous equal elements and append to li
li.append(sum(s[start: start + n]))
start += n
n = 1
return li
if __name__ == '__main__':
test_li = [-5, -5, 7, 7, 12, 0] # should return [-10,14,12,0]
print sum_consecutives_corrected(test_li)
```
#### File: pylearn/theano/debug_examples.py
```python
import theano
import numpy
import numpy as np
from theano import tensor as T
__author__ = '<NAME>'
def print_ndarray_shape():
"""
this shows how to print ndarray shape as well as the change before and after executing the function.
link: http://deeplearning.net/software/theano/tutorial/debug_faq.html#how-do-i-print-an-intermediate-value-in-a-function-method
"""
def inspect_inputs(i, node, fn):
print i, node, "\ninput(s) value(s):", [input[0].shape for input in fn.inputs],
def inspect_outputs(i, node, fn):
print "\noutput(s) value(s):", [output[0] for output in fn.outputs]
x = theano.tensor.matrix('x')
f = theano.function([x], [5 * x],
mode=theano.compile.MonitorMode(
pre_func=inspect_inputs,
post_func=inspect_outputs))
f(numpy.arange(10).reshape(2, 5))
def print_detect_nan():
def detect_nan(i, node, fn):
for output in fn.outputs:
if not isinstance(output[0], numpy.random.RandomState) and numpy.isnan(output[0]).any():
print '*** NaN detected ***'
theano.printing.debugprint(node)
print 'Inputs : %s' % [input[0] for input in fn.inputs]
print 'Outputs: %s' % [output[0] for output in fn.outputs]
break
x = theano.tensor.dscalar('x')
f = theano.function([x], [theano.tensor.log(x) * x],
mode=theano.compile.MonitorMode(
post_func=detect_nan))
f(0) # log(0) * 0 = -inf * 0 = NaN
def test_value():
from keras.layers.core import Dense
from theano import pp, function
theano.config.compute_test_value = 'warn'
# since the test input value is not aligned with the requirement in Dense,
# it will report error quickly. Change 100 to 1000 will be fine.
t_value = np.zeros((500, 1000), dtype=np.float32)
X = T.matrix()
X.tag.test_value = t_value
d = Dense(200, input_dim=1000)
# d1 = Dense(200, input_dim=1000)
d.build()
z = d(X)
f = function([X], z)
# turn it off after
theano.config.compute_test_value = 'off'
if __name__ == "__main__":
test_value()
```
#### File: pylearn/theano/scan_examples.py
```python
import theano
from theano import tensor as T
import numpy as np
__author__ = '<NAME>'
def scan_matrix():
def slide_sum(i, size, X):
return T.sum(X[i:i+size], axis=0), theano.scan_module.until(i >= X.shape[0] - size)
M = np.arange(20).reshape([5, 4])
print(M)
x = T.matrix('x')
window_size = T.iscalar('w')
sum_fun, _ = theano.scan(slide_sum, sequences=T.arange(x.shape[0]), non_sequences=[window_size, x])
f = theano.function([window_size, x], sum_fun)
print(f(2, M))
def scan_tensor3():
def slide_sum(i, size, X):
return T.sum(X[:, i:i+size, :], axis=1), theano.scan_module.until(i >= X.shape[0] - size)
M = np.arange(64).reshape([4, 4, 4])
print(M)
x = T.tensor3('x')
window_size = T.iscalar('w')
sum_fun, _ = theano.scan(slide_sum, sequences=T.arange(x.shape[0]), non_sequences=[window_size, x])
f = theano.function([window_size, x], sum_fun)
print(f(2, M))
def power_of_2(previous_power, max_value):
return previous_power*2, theano.scan_module.until(previous_power*2 > max_value)
def scan_util():
max_value = T.scalar()
values, _ = theano.scan(power_of_2,
outputs_info=T.constant(1.),
non_sequences=max_value,
n_steps=1024)
f = theano.function([max_value], values)
print f(45)
def scan_with_output():
def _step(x, y, z):
return x + y + z, y+z
k = T.vector("k", dtype='float64')
a = T.vector('a', dtype='float64')
o = theano.shared(value=np.asarray(0, dtype='float64'), strict=False)
results, _ = theano.scan(
_step,
sequences=[a, k],
outputs_info=[None, o])
f = theano.function([a, k], outputs=results)
print(f(np.arange(10), np.arange(10)))
if __name__ == "__main__":
# scan_matrix()
# scan_tensor3()
# scan_util()
scan_with_output()
```
#### File: pylearn/theano/tensor_opt.py
```python
import theano
from theano import tensor as T
import theano.tensor
import numpy as np
x = T.fmatrix('x')
v = T.fmatrix('v')
# y = v + T.sum(x, axis = 0)
# y = T.dot(x, v)
y = T.transpose(x) + v
# theano has better performance for matrix multiplication,
# but not as good for addition compared to numpy since numpy use multicores to do addition.
update_weight_theano = theano.function(inputs=[x, v],
outputs=[y])
def update_weight_np(x, v):
# return np.sum(x, axis=0) + v
# return np.dot(x, v)
return np.transpose(x) + v
def linear_combine():
x = T.fmatrix('x')
w = T.vector('w', 'float32')
# z = T.dot(w, x)
z = x * w
f = theano.function(inputs=[x, w], outputs=z)
print f(np.arange(4).reshape([2, 2]).astype('float32'), np.array([0.1, 0.2], dtype='float32'))
def linear_combine_shared():
x = T.fmatrix('x')
value = np.asarray(np.array([0.1, 0.2], dtype='float32'))
w = theano.shared(value=value, strict=False)
# w = T.vector('w', 'float32')
z = T.dot(w, x)
# z = x * w
f = theano.function(inputs=[x], outputs=z)
print f(np.arange(4).reshape([2, 2]).astype('float32'))
def weighting():
from theano.tensor import TensorType
x = T.ftensor3()
# w = TensorType('float32', (False, False, True))()
w = T.ftensor3()
# z = T.dot(w, x)
# y = T.addbroadcast(w, 2)
# y = w.reshape([w.shape[0], w.shape[1]])
y = T.flatten(w, 2)
z = x * y
f = theano.function(inputs=[x, w], outputs=z)
input1 = np.arange(8).reshape([2, 2, 2]).astype('float32')
input2 = np.array(
[
[
[0.1], [0.2]
],
[
[0.2], [0.4]
]
]
).astype('float32')
print input1, input1.shape
print
print input2, input2.shape
print
print f(input1, input2)
# print input1 * input2
def weighting1():
from theano.tensor import TensorType
input1 = np.arange(12).reshape([3, 2, 2]).astype('float32')
input2 = np.array(
[1., 2., 3.]
).astype('float32')
x = T.ftensor3()
# w = TensorType('float32', (False, False, True))()
w = theano.shared(value=input2, name='w', strict=False)
y = (w * x.T).T
f = theano.function(inputs=[x], outputs=y)
print input1, input1.shape
print
print input2, input2.shape
print
print f(input1)
# print input1 * input2
def concatenate():
input = np.arange(12).reshape([2, 3, 2]).astype('float32')
print input
print
pad_len = 1
Y = T.tensor3()
z = T.concatenate([Y[:, :pad_len, :], Y, Y[:, Y.shape[1] - pad_len:, :]], axis=1)
f = theano.function(inputs=[Y], outputs=z)
print f(input)
def arg_sort():
a, b, c = 2, 4, 4
input = np.arange(a*b*c).reshape([a, b, c]).astype('float32')
# print input
print
x = T.tensor3()
z = T.argsort(x, axis=2)[:, :, :2].astype('int64')
z = theano.printing.Print("z")(z)
z = x[z[0].flatten()]
# z = x[T.arange(x.shape[0], dtype='int32'), T.arange(x.shape[1], dtype='int32'), z]
f = theano.function(inputs=[x], outputs=z)
r = f(input)
def t_grad():
x = T.matrix()
y = T.mean(T.sum(x, axis=1))
z = T.grad(y, x)
print type(y)
def test_cache():
from keras.layers.core import Dense
from theano import pp, function
from theano import config
import cPickle as pkl
# Theano configuration
config.optimizer = 'fast_run'
X = T.matrix()
d = Dense(200, input_dim=1000)
# d1 = Dense(200, input_dim=1000)
d.build()
Y = d(X) + d(X)
z = d(X)
Y1 = z + z
f = function([X], Y)
f1 = function([X], Y1)
# print pp(Y)
# print pp(f.maker.fgraph.outputs[0])
print theano.printing.debugprint(f)
print
print theano.printing.debugprint(f1)
print
print theano.printing.debugprint(z)
pkl.dump(f, open('test.pkl', 'wb'))
pkl.dump(f1, open('test1.pkl', 'wb'))
if __name__ == '__main__':
# linear_combine()
# linear_combine_shared()
# weighting1()
# concatenate()
arg_sort()
# t_grad()
# test_cache()
```
|
{
"source": "jeffzi/featuretools",
"score": 2
}
|
#### File: featuretools/computational_backends/calculate_feature_matrix.py
```python
import logging
import math
import os
import shutil
import time
import warnings
from datetime import datetime
import cloudpickle
import numpy as np
import pandas as pd
from featuretools.computational_backends.feature_set import FeatureSet
from featuretools.computational_backends.feature_set_calculator import (
FeatureSetCalculator
)
from featuretools.computational_backends.utils import (
bin_cutoff_times,
create_client_and_cluster,
gather_approximate_features,
gen_empty_approx_features_df,
save_csv_decorator
)
from featuretools.entityset.relationship import RelationshipPath
from featuretools.feature_base import AggregationFeature, FeatureBase
from featuretools.utils import Trie
from featuretools.utils.gen_utils import make_tqdm_iterator
from featuretools.utils.wrangle import _check_time_type
from featuretools.variable_types import (
DatetimeTimeIndex,
NumericTimeIndex,
PandasTypes
)
logger = logging.getLogger('featuretools.computational_backend')
PBAR_FORMAT = "Elapsed: {elapsed} | Progress: {l_bar}{bar}"
FEATURE_CALCULATION_PERCENTAGE = .95 # make total 5% higher to allot time for wrapping up at end
def calculate_feature_matrix(features, entityset=None, cutoff_time=None, instance_ids=None,
entities=None, relationships=None,
cutoff_time_in_index=False,
training_window=None, approximate=None,
save_progress=None, verbose=False,
chunk_size=None, n_jobs=1,
dask_kwargs=None, progress_callback=None):
"""Calculates a matrix for a given set of instance ids and calculation times.
Args:
features (list[:class:`.FeatureBase`]): Feature definitions to be calculated.
entityset (EntitySet): An already initialized entityset. Required if `entities` and `relationships`
not provided
cutoff_time (pd.DataFrame or Datetime): Specifies at which time to calculate
the features for each instance. The resulting feature matrix will use data
up to and including the cutoff_time. Can either be a DataFrame with
'instance_id' and 'time' columns, DataFrame with the name of the
index variable in the target entity and a time column, or a single
value to calculate for all instances. If the dataframe has more than two columns, any additional
columns will be added to the resulting feature matrix.
instance_ids (list): List of instances to calculate features on. Only
used if cutoff_time is a single datetime.
entities (dict[str -> tuple(pd.DataFrame, str, str)]): dictionary of
entities. Entries take the format
{entity id: (dataframe, id column, (time_column))}.
relationships (list[(str, str, str, str)]): list of relationships
between entities. List items are a tuple with the format
(parent entity id, parent variable, child entity id, child variable).
cutoff_time_in_index (bool): If True, return a DataFrame with a MultiIndex
where the second index is the cutoff time (first is instance id).
DataFrame will be sorted by (time, instance_id).
training_window (Timedelta or str, optional):
Window defining how much time before the cutoff time data
can be used when calculating features. If ``None``, all data before cutoff time is used.
Defaults to ``None``.
approximate (Timedelta or str): Frequency to group instances with similar
cutoff times by for features with costly calculations. For example,
if bucket is 24 hours, all instances with cutoff times on the same
day will use the same calculation for expensive features.
verbose (bool, optional): Print progress info. The time granularity is
per chunk.
chunk_size (int or float or None): maximum number of rows of
output feature matrix to calculate at time. If passed an integer
greater than 0, will try to use that many rows per chunk. If passed
a float value between 0 and 1 sets the chunk size to that
percentage of all rows. if None, and n_jobs > 1 it will be set to 1/n_jobs
n_jobs (int, optional): number of parallel processes to use when
calculating feature matrix.
dask_kwargs (dict, optional): Dictionary of keyword arguments to be
passed when creating the dask client and scheduler. Even if n_jobs
is not set, using `dask_kwargs` will enable multiprocessing.
Main parameters:
cluster (str or dask.distributed.LocalCluster):
cluster or address of cluster to send tasks to. If unspecified,
a cluster will be created.
diagnostics port (int):
port number to use for web dashboard. If left unspecified, web
interface will not be enabled.
Valid keyword arguments for LocalCluster will also be accepted.
save_progress (str, optional): path to save intermediate computational results.
progress_callback (callable): function to be called with incremental progress updates.
Has the following parameters:
update: percentage change (float between 0 and 100) in progress since last call
progress_percent: percentage (float between 0 and 100) of total computation completed
time_elapsed: total time in seconds that has elapsed since start of call
"""
assert (isinstance(features, list) and features != [] and
all([isinstance(feature, FeatureBase) for feature in features])), \
"features must be a non-empty list of features"
# handle loading entityset
from featuretools.entityset.entityset import EntitySet
if not isinstance(entityset, EntitySet):
if entities is not None and relationships is not None:
entityset = EntitySet("entityset", entities, relationships)
target_entity = entityset[features[0].entity.id]
pass_columns = []
if not isinstance(cutoff_time, pd.DataFrame):
if isinstance(cutoff_time, list):
raise TypeError("cutoff_time must be a single value or DataFrame")
if cutoff_time is None:
if entityset.time_type == NumericTimeIndex:
cutoff_time = np.inf
else:
cutoff_time = datetime.now()
if instance_ids is None:
index_var = target_entity.index
df = target_entity._handle_time(target_entity.df,
time_last=cutoff_time,
training_window=training_window)
instance_ids = df[index_var].tolist()
cutoff_time = [cutoff_time] * len(instance_ids)
map_args = [(id, time) for id, time in zip(instance_ids, cutoff_time)]
cutoff_time = pd.DataFrame(map_args, columns=['instance_id', 'time'])
cutoff_time = cutoff_time.reset_index(drop=True)
# handle how columns are names in cutoff_time
# maybe add _check_time_dtype helper function
if "instance_id" not in cutoff_time.columns:
if target_entity.index not in cutoff_time.columns:
raise AttributeError('Name of the index variable in the target entity'
' or "instance_id" must be present in cutoff_time')
# rename to instance_id
cutoff_time.rename(columns={target_entity.index: "instance_id"}, inplace=True)
if "time" not in cutoff_time.columns:
# take the first column that isn't instance_id and assume it is time
not_instance_id = [c for c in cutoff_time.columns if c != "instance_id"]
cutoff_time.rename(columns={not_instance_id[0]: "time"}, inplace=True)
# Check that cutoff_time time type matches entityset time type
if entityset.time_type == NumericTimeIndex:
if cutoff_time['time'].dtype.name not in PandasTypes._pandas_numerics:
raise TypeError("cutoff_time times must be numeric: try casting "
"via pd.to_numeric(cutoff_time['time'])")
elif entityset.time_type == DatetimeTimeIndex:
if cutoff_time['time'].dtype.name not in PandasTypes._pandas_datetimes:
raise TypeError("cutoff_time times must be datetime type: try casting via pd.to_datetime(cutoff_time['time'])")
assert (cutoff_time[['instance_id', 'time']].duplicated().sum() == 0), \
"Duplicated rows in cutoff time dataframe."
pass_columns = [column_name for column_name in cutoff_time.columns[2:]]
if _check_time_type(cutoff_time['time'].iloc[0]) is None:
raise ValueError("cutoff_time time values must be datetime or numeric")
# make sure dtype of instance_id in cutoff time
# is same as column it references
target_entity = features[0].entity
dtype = entityset[target_entity.id].df[target_entity.index].dtype
cutoff_time["instance_id"] = cutoff_time["instance_id"].astype(dtype)
feature_set = FeatureSet(features)
# Get features to approximate
if approximate is not None:
approximate_feature_trie = gather_approximate_features(feature_set)
# Make a new FeatureSet that ignores approximated features
feature_set = FeatureSet(features, approximate_feature_trie=approximate_feature_trie)
# Check if there are any non-approximated aggregation features
no_unapproximated_aggs = True
for feature in features:
if isinstance(feature, AggregationFeature):
# do not need to check if feature is in to_approximate since
# only base features of direct features can be in to_approximate
no_unapproximated_aggs = False
break
if approximate is not None:
all_approx_features = {f for _, feats in feature_set.approximate_feature_trie
for f in feats}
else:
all_approx_features = set()
deps = feature.get_dependencies(deep=True, ignored=all_approx_features)
for dependency in deps:
if isinstance(dependency, AggregationFeature):
no_unapproximated_aggs = False
break
cutoff_df_time_var = 'time'
target_time = '_original_time'
if approximate is not None:
# If there are approximated aggs, bin times
binned_cutoff_time = bin_cutoff_times(cutoff_time.copy(), approximate)
# Think about collisions: what if original time is a feature
binned_cutoff_time[target_time] = cutoff_time[cutoff_df_time_var]
cutoff_time_to_pass = binned_cutoff_time
else:
cutoff_time_to_pass = cutoff_time
chunk_size = _handle_chunk_size(chunk_size, cutoff_time.shape[0])
tqdm_options = {'total': (cutoff_time.shape[0] / FEATURE_CALCULATION_PERCENTAGE),
'bar_format': PBAR_FORMAT,
'disable': True}
if verbose:
tqdm_options.update({'disable': False})
elif progress_callback is not None:
# allows us to utilize progress_bar updates without printing to anywhere
tqdm_options.update({'file': open(os.devnull, 'w'), 'disable': False})
progress_bar = make_tqdm_iterator(**tqdm_options)
if n_jobs != 1 or dask_kwargs is not None:
feature_matrix = parallel_calculate_chunks(cutoff_time=cutoff_time_to_pass,
chunk_size=chunk_size,
feature_set=feature_set,
approximate=approximate,
training_window=training_window,
save_progress=save_progress,
entityset=entityset,
n_jobs=n_jobs,
no_unapproximated_aggs=no_unapproximated_aggs,
cutoff_df_time_var=cutoff_df_time_var,
target_time=target_time,
pass_columns=pass_columns,
progress_bar=progress_bar,
dask_kwargs=dask_kwargs or {},
progress_callback=progress_callback)
else:
feature_matrix = calculate_chunk(cutoff_time=cutoff_time_to_pass,
chunk_size=chunk_size,
feature_set=feature_set,
approximate=approximate,
training_window=training_window,
save_progress=save_progress,
entityset=entityset,
no_unapproximated_aggs=no_unapproximated_aggs,
cutoff_df_time_var=cutoff_df_time_var,
target_time=target_time,
pass_columns=pass_columns,
progress_bar=progress_bar,
progress_callback=progress_callback)
# ensure rows are sorted by input order
feature_matrix = feature_matrix.reindex(cutoff_time[["instance_id", "time"]])
if not cutoff_time_in_index:
feature_matrix.reset_index(level='time', drop=True, inplace=True)
if save_progress and os.path.exists(os.path.join(save_progress, 'temp')):
shutil.rmtree(os.path.join(save_progress, 'temp'))
# force to 100% since we saved last 5 percent
previous_progress = progress_bar.n
progress_bar.update(progress_bar.total - progress_bar.n)
if progress_callback is not None:
update, progress_percent, time_elapsed = update_progress_callback_parameters(progress_bar, previous_progress)
progress_callback(update, progress_percent, time_elapsed)
progress_bar.refresh()
progress_bar.close()
return feature_matrix
def calculate_chunk(cutoff_time, chunk_size, feature_set, entityset, approximate, training_window,
save_progress, no_unapproximated_aggs, cutoff_df_time_var, target_time,
pass_columns, progress_bar=None, progress_callback=None):
if not isinstance(feature_set, FeatureSet):
feature_set = cloudpickle.loads(feature_set)
feature_matrix = []
if no_unapproximated_aggs and approximate is not None:
if entityset.time_type == NumericTimeIndex:
group_time = np.inf
else:
group_time = datetime.now()
for _, group in cutoff_time.groupby(cutoff_df_time_var):
# if approximating, calculate the approximate features
if approximate is not None:
precalculated_features_trie = approximate_features(
feature_set,
group,
window=approximate,
entityset=entityset,
training_window=training_window,
)
else:
precalculated_features_trie = None
@save_csv_decorator(save_progress)
def calc_results(time_last, ids, precalculated_features=None, training_window=None):
update_progress_callback = None
if progress_bar is not None:
def update_progress_callback(done):
previous_progress = progress_bar.n
progress_bar.update(done * group.shape[0])
if progress_callback is not None:
update, progress_percent, time_elapsed = update_progress_callback_parameters(progress_bar, previous_progress)
progress_callback(update, progress_percent, time_elapsed)
calculator = FeatureSetCalculator(entityset,
feature_set,
time_last,
training_window=training_window,
precalculated_features=precalculated_features)
matrix = calculator.run(ids, progress_callback=update_progress_callback)
return matrix
# if all aggregations have been approximated, can calculate all together
if no_unapproximated_aggs and approximate is not None:
inner_grouped = [[group_time, group]]
else:
# if approximated features, set cutoff_time to unbinned time
if precalculated_features_trie is not None:
group[cutoff_df_time_var] = group[target_time]
inner_grouped = group.groupby(cutoff_df_time_var, sort=True)
if chunk_size is not None:
inner_grouped = _chunk_dataframe_groups(inner_grouped, chunk_size)
for time_last, group in inner_grouped:
# sort group by instance id
ids = group['instance_id'].sort_values().values
if no_unapproximated_aggs and approximate is not None:
window = None
else:
window = training_window
# calculate values for those instances at time time_last
_feature_matrix = calc_results(time_last,
ids,
precalculated_features=precalculated_features_trie,
training_window=window)
id_name = _feature_matrix.index.name
# if approximate, merge feature matrix with group frame to get original
# cutoff times and passed columns
if approximate:
indexer = group[['instance_id', target_time] + pass_columns]
_feature_matrix = indexer.merge(_feature_matrix,
left_on=['instance_id'],
right_index=True,
how='left')
_feature_matrix.set_index(['instance_id', target_time], inplace=True)
_feature_matrix.index.set_names([id_name, 'time'], inplace=True)
_feature_matrix.sort_index(level=1, kind='mergesort', inplace=True)
else:
# all rows have same cutoff time. set time and add passed columns
num_rows = _feature_matrix.shape[0]
time_index = pd.Index([time_last] * num_rows, name='time')
_feature_matrix.set_index(time_index, append=True, inplace=True)
if len(pass_columns) > 0:
pass_through = group[['instance_id', cutoff_df_time_var] + pass_columns]
pass_through.rename(columns={'instance_id': id_name,
cutoff_df_time_var: 'time'},
inplace=True)
pass_through.set_index([id_name, 'time'], inplace=True)
for col in pass_columns:
_feature_matrix[col] = pass_through[col]
feature_matrix.append(_feature_matrix)
feature_matrix = pd.concat(feature_matrix)
return feature_matrix
def approximate_features(feature_set, cutoff_time, window, entityset,
training_window=None):
'''Given a set of features and cutoff_times to be passed to
calculate_feature_matrix, calculates approximate values of some features
to speed up calculations. Cutoff times are sorted into
window-sized buckets and the approximate feature values are only calculated
at one cutoff time for each bucket.
..note:: this only approximates DirectFeatures of AggregationFeatures, on
the target entity. In future versions, it may also be possible to
approximate these features on other top-level entities
Args:
cutoff_time (pd.DataFrame): specifies what time to calculate
the features for each instance at. The resulting feature matrix will use data
up to and including the cutoff_time. A DataFrame with
'instance_id' and 'time' columns.
window (Timedelta or str): frequency to group instances with similar
cutoff times by for features with costly calculations. For example,
if bucket is 24 hours, all instances with cutoff times on the same
day will use the same calculation for expensive features.
entityset (:class:`.EntitySet`): An already initialized entityset.
feature_set (:class:`.FeatureSet`): The features to be calculated.
training_window (`Timedelta`, optional):
Window defining how much older than the cutoff time data
can be to be included when calculating the feature. If None, all older data is used.
save_progress (str, optional): path to save intermediate computational results
'''
approx_fms_trie = Trie(path_constructor=RelationshipPath)
target_time_colname = 'target_time'
cutoff_time[target_time_colname] = cutoff_time['time']
approx_cutoffs = bin_cutoff_times(cutoff_time.copy(), window)
cutoff_df_time_var = 'time'
cutoff_df_instance_var = 'instance_id'
# should this order be by dependencies so that calculate_feature_matrix
# doesn't skip approximating something?
for relationship_path, approx_feature_names in feature_set.approximate_feature_trie:
if not approx_feature_names:
continue
cutoffs_with_approx_e_ids, new_approx_entity_index_var = \
_add_approx_entity_index_var(entityset, feature_set.target_eid,
approx_cutoffs.copy(), relationship_path)
# Select only columns we care about
columns_we_want = [new_approx_entity_index_var,
cutoff_df_time_var,
target_time_colname]
cutoffs_with_approx_e_ids = cutoffs_with_approx_e_ids[columns_we_want]
cutoffs_with_approx_e_ids = cutoffs_with_approx_e_ids.drop_duplicates()
cutoffs_with_approx_e_ids.dropna(subset=[new_approx_entity_index_var],
inplace=True)
approx_features = [feature_set.features_by_name[name]
for name in approx_feature_names]
if cutoffs_with_approx_e_ids.empty:
approx_fm = gen_empty_approx_features_df(approx_features)
else:
cutoffs_with_approx_e_ids.sort_values([cutoff_df_time_var,
new_approx_entity_index_var], inplace=True)
# CFM assumes specific column names for cutoff_time argument
rename = {new_approx_entity_index_var: cutoff_df_instance_var}
cutoff_time_to_pass = cutoffs_with_approx_e_ids.rename(columns=rename)
cutoff_time_to_pass = cutoff_time_to_pass[[cutoff_df_instance_var, cutoff_df_time_var]]
cutoff_time_to_pass.drop_duplicates(inplace=True)
approx_fm = calculate_feature_matrix(approx_features,
entityset,
cutoff_time=cutoff_time_to_pass,
training_window=training_window,
approximate=None,
cutoff_time_in_index=False,
chunk_size=cutoff_time_to_pass.shape[0])
approx_fms_trie.get_node(relationship_path).value = approx_fm
return approx_fms_trie
def scatter_warning(num_scattered_workers, num_workers):
if num_scattered_workers != num_workers:
scatter_warning = "EntitySet was only scattered to {} out of {} workers"
warnings.warn(scatter_warning.format(num_scattered_workers, num_workers))
def parallel_calculate_chunks(cutoff_time, chunk_size, feature_set, approximate, training_window,
save_progress, entityset, n_jobs, no_unapproximated_aggs,
cutoff_df_time_var, target_time, pass_columns,
progress_bar, dask_kwargs=None, progress_callback=None):
from distributed import as_completed, Future
from dask.base import tokenize
client = None
cluster = None
try:
client, cluster = create_client_and_cluster(n_jobs=n_jobs,
dask_kwargs=dask_kwargs,
entityset_size=entityset.__sizeof__())
# scatter the entityset
# denote future with leading underscore
start = time.time()
es_token = "EntitySet-{}".format(tokenize(entityset))
if es_token in client.list_datasets():
msg = "Using EntitySet persisted on the cluster as dataset {}"
progress_bar.write(msg.format(es_token))
_es = client.get_dataset(es_token)
else:
_es = client.scatter([entityset])[0]
client.publish_dataset(**{_es.key: _es})
# save features to a tempfile and scatter it
pickled_feats = cloudpickle.dumps(feature_set)
_saved_features = client.scatter(pickled_feats)
client.replicate([_es, _saved_features])
num_scattered_workers = len(client.who_has([Future(es_token)]).get(es_token, []))
num_workers = len(client.scheduler_info()['workers'].values())
chunks = cutoff_time.groupby(cutoff_df_time_var)
if not chunk_size:
chunk_size = _handle_chunk_size(1.0 / num_workers, cutoff_time.shape[0])
chunks = _chunk_dataframe_groups(chunks, chunk_size)
chunks = [df for _, df in chunks]
if len(chunks) < num_workers:
chunk_warning = "Fewer chunks ({}), than workers ({}) consider reducing the chunk size"
warning_string = chunk_warning.format(len(chunks), num_workers)
progress_bar.write(warning_string)
scatter_warning(num_scattered_workers, num_workers)
end = time.time()
scatter_time = round(end - start)
# if enabled, reset timer after scatter for better time remaining estimates
if not progress_bar.disable:
progress_bar.reset()
scatter_string = "EntitySet scattered to {} workers in {} seconds"
progress_bar.write(scatter_string.format(num_scattered_workers, scatter_time))
# map chunks
# TODO: consider handling task submission dask kwargs
_chunks = client.map(calculate_chunk,
chunks,
feature_set=_saved_features,
chunk_size=None,
entityset=_es,
approximate=approximate,
training_window=training_window,
save_progress=save_progress,
no_unapproximated_aggs=no_unapproximated_aggs,
cutoff_df_time_var=cutoff_df_time_var,
target_time=target_time,
pass_columns=pass_columns,
progress_bar=None,
progress_callback=progress_callback)
feature_matrix = []
iterator = as_completed(_chunks).batches()
for batch in iterator:
results = client.gather(batch)
for result in results:
feature_matrix.append(result)
previous_progress = progress_bar.n
progress_bar.update(result.shape[0])
if progress_callback is not None:
update, progress_percent, time_elapsed = update_progress_callback_parameters(progress_bar, previous_progress)
progress_callback(update, progress_percent, time_elapsed)
except Exception:
raise
finally:
if 'cluster' not in dask_kwargs and cluster is not None:
cluster.close()
if client is not None:
client.close()
feature_matrix = pd.concat(feature_matrix)
return feature_matrix
def _add_approx_entity_index_var(es, target_entity_id, cutoffs, path):
"""
Add a variable to the cutoff df linking it to the entity at the end of the
path.
Return the updated cutoff df and the name of this variable. The name will
consist of the variables which were joined through.
"""
last_child_var = 'instance_id'
last_parent_var = es[target_entity_id].index
for _, relationship in path:
child_vars = [last_parent_var, relationship.child_variable.id]
child_df = es[relationship.child_entity.id].df[child_vars]
# Rename relationship.child_variable to include the variables we have
# joined through.
new_var_name = '%s.%s' % (last_child_var, relationship.child_variable.id)
to_rename = {relationship.child_variable.id: new_var_name}
child_df = child_df.rename(columns=to_rename)
cutoffs = cutoffs.merge(child_df,
left_on=last_child_var,
right_on=last_parent_var)
# These will be used in the next iteration.
last_child_var = new_var_name
last_parent_var = relationship.parent_variable.id
return cutoffs, new_var_name
def _chunk_dataframe_groups(grouped, chunk_size):
"""chunks a grouped dataframe into groups no larger than chunk_size"""
for group_key, group_df in grouped:
for i in range(0, len(group_df), chunk_size):
yield group_key, group_df.iloc[i:i + chunk_size]
def _handle_chunk_size(chunk_size, total_size):
if chunk_size is not None:
assert chunk_size > 0, "Chunk size must be greater than 0"
if chunk_size < 1:
chunk_size = math.ceil(chunk_size * total_size)
chunk_size = int(chunk_size)
return chunk_size
def update_progress_callback_parameters(progress_bar, previous_progress):
update = (progress_bar.n - previous_progress) / progress_bar.total * 100
progress_percent = (progress_bar.n / progress_bar.total) * 100
time_elapsed = progress_bar.format_dict["elapsed"]
return (update, progress_percent, time_elapsed)
```
#### File: tests/computational_backend/test_feature_set.py
```python
import featuretools as ft
from featuretools.computational_backends.feature_set import FeatureSet
from featuretools.entityset.relationship import RelationshipPath
from featuretools.tests.testing_utils import backward_path
from featuretools.utils import Trie
def test_feature_trie_without_needs_full_entity(diamond_es):
es = diamond_es
country_name = ft.IdentityFeature(es['countries']['name'])
direct_name = ft.DirectFeature(country_name, es['regions'])
amount = ft.IdentityFeature(es['transactions']['amount'])
path_through_customers = backward_path(es, ['regions', 'customers', 'transactions'])
through_customers = ft.AggregationFeature(amount, es['regions'],
primitive=ft.primitives.Mean,
relationship_path=path_through_customers)
path_through_stores = backward_path(es, ['regions', 'stores', 'transactions'])
through_stores = ft.AggregationFeature(amount, es['regions'],
primitive=ft.primitives.Mean,
relationship_path=path_through_stores)
customers_to_transactions = backward_path(es, ['customers', 'transactions'])
customers_mean = ft.AggregationFeature(amount, es['customers'],
primitive=ft.primitives.Mean,
relationship_path=customers_to_transactions)
negation = ft.TransformFeature(customers_mean, ft.primitives.Negate)
regions_to_customers = backward_path(es, ['regions', 'customers'])
mean_of_mean = ft.AggregationFeature(negation, es['regions'],
primitive=ft.primitives.Mean,
relationship_path=regions_to_customers)
features = [direct_name, through_customers, through_stores, mean_of_mean]
feature_set = FeatureSet(features)
trie = feature_set.feature_trie
assert trie.value == \
(False, set(), {f.unique_name() for f in features})
assert trie.get_node(direct_name.relationship_path).value == \
(False, set(), {country_name.unique_name()})
assert trie.get_node(regions_to_customers).value == \
(False, set(), {negation.unique_name(), customers_mean.unique_name()})
regions_to_stores = backward_path(es, ['regions', 'stores'])
assert trie.get_node(regions_to_stores).value == (False, set(), set())
assert trie.get_node(path_through_customers).value == \
(False, set(), {amount.unique_name()})
assert trie.get_node(path_through_stores).value == \
(False, set(), {amount.unique_name()})
def test_feature_trie_with_needs_full_entity(diamond_es):
es = diamond_es
amount = ft.IdentityFeature(es['transactions']['amount'])
path_through_customers = backward_path(es, ['regions', 'customers', 'transactions'])
agg = ft.AggregationFeature(amount, es['regions'],
primitive=ft.primitives.Mean,
relationship_path=path_through_customers)
trans_of_agg = ft.TransformFeature(agg, ft.primitives.CumSum)
path_through_stores = backward_path(es, ['regions', 'stores', 'transactions'])
trans = ft.TransformFeature(amount, ft.primitives.CumSum)
agg_of_trans = ft.AggregationFeature(trans, es['regions'],
primitive=ft.primitives.Mean,
relationship_path=path_through_stores)
features = [agg, trans_of_agg, agg_of_trans]
feature_set = FeatureSet(features)
trie = feature_set.feature_trie
assert trie.value == \
(True, {agg.unique_name(), trans_of_agg.unique_name()}, {agg_of_trans.unique_name()})
assert trie.get_node(path_through_customers).value == \
(True, {amount.unique_name()}, set())
assert trie.get_node(path_through_customers[:1]).value == (True, set(), set())
assert trie.get_node(path_through_stores).value == \
(True, {amount.unique_name(), trans.unique_name()}, set())
assert trie.get_node(path_through_stores[:1]).value == (False, set(), set())
def test_feature_trie_with_needs_full_entity_direct(es):
value = ft.IdentityFeature(es['log']['value'],)
agg = ft.AggregationFeature(value, es['sessions'],
primitive=ft.primitives.Mean)
agg_of_agg = ft.AggregationFeature(agg, es['customers'],
primitive=ft.primitives.Sum)
direct = ft.DirectFeature(agg_of_agg, es['sessions'])
trans = ft.TransformFeature(direct, ft.primitives.CumSum)
features = [trans, agg]
feature_set = FeatureSet(features)
trie = feature_set.feature_trie
assert trie.value == \
(True, {direct.unique_name(), trans.unique_name()}, {agg.unique_name()})
assert trie.get_node(agg.relationship_path).value == \
(False, set(), {value.unique_name()})
parent_node = trie.get_node(direct.relationship_path)
assert parent_node.value == (True, {agg_of_agg.unique_name()}, set())
child_through_parent_node = parent_node.get_node(agg_of_agg.relationship_path)
assert child_through_parent_node.value == (True, {agg.unique_name()}, set())
assert child_through_parent_node.get_node(agg.relationship_path).value == \
(True, {value.unique_name()}, set())
def test_feature_trie_ignores_approximate_features(es):
value = ft.IdentityFeature(es['log']['value'],)
agg = ft.AggregationFeature(value, es['sessions'],
primitive=ft.primitives.Mean)
agg_of_agg = ft.AggregationFeature(agg, es['customers'],
primitive=ft.primitives.Sum)
direct = ft.DirectFeature(agg_of_agg, es['sessions'])
features = [direct, agg]
approximate_feature_trie = Trie(default=list, path_constructor=RelationshipPath)
approximate_feature_trie.get_node(direct.relationship_path).value = [agg_of_agg]
feature_set = FeatureSet(features, approximate_feature_trie=approximate_feature_trie)
trie = feature_set.feature_trie
# Since agg_of_agg is ignored it and its dependencies should not be in the
# trie.
sub_trie = trie.get_node(direct.relationship_path)
for _path, (_, _, features) in sub_trie:
assert not features
assert trie.value == (False, set(), {direct.unique_name(), agg.unique_name()})
assert trie.get_node(agg.relationship_path).value == \
(False, set(), {value.unique_name()})
```
#### File: featuretools/tests/conftest.py
```python
import copy
import pandas as pd
import pytest
import featuretools as ft
from featuretools.tests.testing_utils import make_ecommerce_entityset
@pytest.fixture(scope='session')
def make_es():
return make_ecommerce_entityset()
@pytest.fixture(scope='session')
def make_int_es():
return make_ecommerce_entityset(with_integer_time_index=True)
@pytest.fixture
def es(make_es):
return copy.deepcopy(make_es)
@pytest.fixture
def int_es(make_int_es):
return copy.deepcopy(make_int_es)
@pytest.fixture
def diamond_es():
countries_df = pd.DataFrame({
'id': range(2),
'name': ['US', 'Canada']
})
regions_df = pd.DataFrame({
'id': range(3),
'country_id': [0, 0, 1],
'name': ['Northeast', 'South', 'Quebec'],
})
stores_df = pd.DataFrame({
'id': range(5),
'region_id': [0, 1, 2, 2, 1],
'square_ft': [2000, 3000, 1500, 2500, 2700],
})
customers_df = pd.DataFrame({
'id': range(5),
'region_id': [1, 0, 0, 1, 1],
'name': ['A', 'B', 'C', 'D', 'E'],
})
transactions_df = pd.DataFrame({
'id': range(8),
'store_id': [4, 4, 2, 3, 4, 0, 1, 1],
'customer_id': [3, 0, 2, 4, 3, 3, 2, 3],
'amount': [100, 40, 45, 83, 13, 94, 27, 81],
})
entities = {
'countries': (countries_df, 'id'),
'regions': (regions_df, 'id'),
'stores': (stores_df, 'id'),
'customers': (customers_df, 'id'),
'transactions': (transactions_df, 'id'),
}
relationships = [
('countries', 'id', 'regions', 'country_id'),
('regions', 'id', 'stores', 'region_id'),
('regions', 'id', 'customers', 'region_id'),
('stores', 'id', 'transactions', 'store_id'),
('customers', 'id', 'transactions', 'customer_id'),
]
return ft.EntitySet(id='ecommerce_diamond',
entities=entities,
relationships=relationships)
@pytest.fixture
def home_games_es():
teams = pd.DataFrame({
'id': range(3),
'name': ['Breakers', 'Spirit', 'Thorns']
})
games = pd.DataFrame({
'id': range(5),
'home_team_id': [2, 2, 1, 0, 1],
'away_team_id': [1, 0, 2, 1, 0],
'home_team_score': [3, 0, 1, 0, 4],
'away_team_score': [2, 1, 2, 0, 0]
})
entities = {'teams': (teams, 'id'), 'games': (games, 'id')}
relationships = [('teams', 'id', 'games', 'home_team_id')]
return ft.EntitySet(entities=entities,
relationships=relationships)
@pytest.fixture
def games_es(home_games_es):
away_team = ft.Relationship(home_games_es['teams']['id'],
home_games_es['games']['away_team_id'])
return home_games_es.add_relationship(away_team)
```
#### File: tests/utils_tests/test_gen_utils.py
```python
import pytest
from featuretools.utils.gen_utils import import_or_raise
def test_import_or_raise_errors():
with pytest.raises(ImportError, match="error message"):
import_or_raise("_featuretools", "error message")
def test_import_or_raise_imports():
math = import_or_raise("math", "error message")
assert math.ceil(0.1) == 1
```
#### File: tests/wrappers/test_sklearn_wrapper.py
```python
import numpy as np
import pandas as pd
import pytest
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.impute import SimpleImputer
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer, StandardScaler
from featuretools.demo.mock_customer import load_mock_customer
from featuretools.wrappers import DFSTransformer
def select_numeric(df):
return df.select_dtypes(exclude=['object'])
@pytest.fixture(scope='module')
def es():
es = load_mock_customer(n_customers=15,
n_products=15,
n_sessions=75,
n_transactions=1000,
random_seed=0,
return_entityset=True)
return es
@pytest.fixture(scope='module')
def df(es):
df = es['customers'].df
df['target'] = np.random.randint(1, 3, df.shape[0]) # 1 or 2 values
return df
@pytest.fixture(scope='module')
def pipeline(es):
pipeline = Pipeline(steps=[
('ft', DFSTransformer(entityset=es,
target_entity="customers",
max_features=20)),
("numeric", FunctionTransformer(select_numeric, validate=False)),
('imp', SimpleImputer()),
('et', ExtraTreesClassifier(n_estimators=10))
])
return pipeline
def test_sklearn_transformer(es, df):
# Using with transformers
pipeline = Pipeline(steps=[
('ft', DFSTransformer(entityset=es,
target_entity="customers")),
("numeric", FunctionTransformer(select_numeric, validate=False)),
('sc', StandardScaler()),
])
X_train = pipeline.fit(df['customer_id']).transform(df['customer_id'])
assert X_train.shape[0] == 15
def test_sklearn_estimator(df, pipeline):
# Using with estimator
pipeline.fit(df['customer_id'].values, y=df.target.values) \
.predict(df['customer_id'].values)
result = pipeline.score(df['customer_id'].values, df.target.values)
assert isinstance(result, (float))
# Pickling / Unpickling Pipeline
# TODO fix this
# s = pickle.dumps(pipeline)
# pipe_pickled = pickle.loads(s)
# result = pipe_pickled.score(df['customer_id'].values, df.target.values)
# assert isinstance(result, (float))
def test_sklearn_cross_val_score(df, pipeline):
# Using with cross_val_score
results = cross_val_score(pipeline,
X=df['customer_id'].values,
y=df.target.values,
cv=2,
scoring="accuracy")
assert isinstance(results[0], (float))
assert isinstance(results[1], (float))
def test_sklearn_gridsearchcv(df, pipeline):
# Using with GridSearchCV
params = {
'et__max_depth': [5, 10]
}
grid = GridSearchCV(estimator=pipeline,
param_grid=params,
cv=3)
grid.fit(df['customer_id'].values, df.target.values)
assert len(grid.predict(df['customer_id'].values)) == 15
def test_sklearn_cuttoff(pipeline):
# Using cuttof_time to filter data
ct = pd.DataFrame()
ct['customer_id'] = [1, 2, 3]
ct['time'] = pd.to_datetime(['2014-1-1 04:00',
'2014-1-1 04:00',
'2014-1-1 04:00'])
ct['label'] = [True, True, False]
results = pipeline.fit(ct, y=ct.label).predict(ct)
assert len(results) == 3
```
#### File: featuretools/utils/s3_utils.py
```python
import json
import shutil
from featuretools.utils.gen_utils import import_or_raise
def use_smartopen_es(file_path, path, transport_params=None, read=True):
open = import_or_raise("smart_open", SMART_OPEN_ERR_MSG).open
if read:
with open(path, "rb", transport_params=transport_params) as fin:
with open(file_path, 'wb') as fout:
shutil.copyfileobj(fin, fout)
else:
with open(file_path, 'rb') as fin:
with open(path, 'wb', transport_params=transport_params) as fout:
shutil.copyfileobj(fin, fout)
def use_s3fs_es(file_path, path, read=True):
s3fs = import_or_raise("s3fs", S3FS_ERR_MSG)
s3 = s3fs.S3FileSystem(anon=True)
if read:
s3.get(path, file_path)
else:
s3.put(file_path, path)
def use_smartopen_features(path, features_dict=None, transport_params=None, read=True):
open = import_or_raise("smart_open", SMART_OPEN_ERR_MSG).open
if read:
with open(path, 'r', encoding='utf-8', transport_params=transport_params) as f:
features_dict = json.load(f)
return features_dict
else:
with open(path, "w", transport_params=transport_params) as f:
json.dump(features_dict, f)
def use_s3fs_features(file_path, features_dict=None, read=True):
s3fs = import_or_raise("s3fs", S3FS_ERR_MSG)
s3 = s3fs.S3FileSystem(anon=True)
if read:
with s3.open(file_path, "r", encoding='utf-8') as f:
features_dict = json.load(f)
return features_dict
else:
with s3.open(file_path, "w", encoding='utf-8') as f:
features = json.dumps(features_dict, ensure_ascii=False)
f.write(features)
BOTO3_ERR_MSG = (
"The boto3 library is required to read and write from URLs and S3.\n"
"Install via pip:\n"
" pip install boto3\n"
"Install via conda:\n"
" conda install boto3"
)
SMART_OPEN_ERR_MSG = (
"The smart_open library is required to read and write from URLs and S3.\n"
"Install via pip:\n"
" pip install smart-open\n"
"Install via conda:\n"
" conda install smart_open"
)
S3FS_ERR_MSG = (
"The s3fs library is required to read and write from S3.\n"
"Install via pip:\n"
" pip install s3fs\n"
"Install via conda:\n"
" conda install s3fs"
)
```
#### File: featuretools/variable_types/variable.py
```python
import numpy as np
import pandas as pd
from featuretools.utils.gen_utils import find_descendents
class Variable(object):
"""Represent a variable in an entity
A Variable is analogous to a column in table in a relational database
Args:
id (str) : Id of variable. Must match underlying data in Entity
it belongs to.
entity (:class:`.Entity`) : Entity this variable belongs to.
name (str, optional) : Variable name. Defaults to id.
See Also:
:class:`.Entity`, :class:`.Relationship`, :class:`.BaseEntitySet`
"""
type_string = None
_default_pandas_dtype = object
def __init__(self, id, entity, name=None):
assert isinstance(id, str), "Variable id must be a string"
self.id = id
self._name = name
self.entity_id = entity.id
assert entity.entityset is not None, "Entity must contain reference to EntitySet"
self.entity = entity
self._interesting_values = pd.Series()
@property
def entityset(self):
return self.entity.entityset
def __eq__(self, other, deep=False):
shallow_eq = isinstance(other, self.__class__) and \
self.id == other.id and \
self.entity_id == other.entity_id
if not deep:
return shallow_eq
else:
return shallow_eq and set(self.interesting_values.values) == set(other.interesting_values.values)
def __hash__(self):
return hash((self.id, self.entity_id))
def __repr__(self):
return u"<Variable: {} (dtype = {})>".format(self.name, self.type_string)
@classmethod
def create_from(cls, variable):
"""Create new variable this type from existing
Args:
variable (Variable) : Existing variable to create from.
Returns:
:class:`.Variable` : new variable
"""
v = cls(id=variable.id, name=variable.name, entity=variable.entity)
return v
@property
def name(self):
return self._name if self._name is not None else self.id
@property
def dtype(self):
return self.type_string \
if self.type_string is not None else "generic_type"
@name.setter
def name(self, name):
self._name = name
@property
def interesting_values(self):
return self._interesting_values
@interesting_values.setter
def interesting_values(self, interesting_values):
self._interesting_values = pd.Series(interesting_values)
@property
def series(self):
return self.entity.df[self.id]
def to_data_description(self):
return {
'id': self.id,
'type': {
'value': self.type_string,
},
'properties': {
'name': self.name,
'entity': self.entity.id,
'interesting_values': self._interesting_values.to_json()
},
}
class Unknown(Variable):
pass
class Discrete(Variable):
"""Superclass representing variables that take on discrete values"""
type_string = "discrete"
def __init__(self, id, entity, name=None):
super(Discrete, self).__init__(id, entity, name)
self._interesting_values = pd.Series()
@property
def interesting_values(self):
return self._interesting_values
@interesting_values.setter
def interesting_values(self, values):
seen = set()
seen_add = seen.add
self._interesting_values = pd.Series([v for v in values if not
(v in seen or seen_add(v))])
class Boolean(Variable):
"""Represents variables that take on one of two values
Args:
true_values (list) : List of valued true values. Defaults to [1, True, "true", "True", "yes", "t", "T"]
false_values (list): List of valued false values. Defaults to [0, False, "false", "False", "no", "f", "F"]
"""
type_string = "boolean"
_default_pandas_dtype = bool
def __init__(self,
id,
entity,
name=None,
true_values=None,
false_values=None):
default = [1, True, "true", "True", "yes", "t", "T"]
self.true_values = true_values or default
default = [0, False, "false", "False", "no", "f", "F"]
self.false_values = false_values or default
super(Boolean, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Boolean, self).to_data_description()
description['type'].update({
'true_values': self.true_values,
'false_values': self.false_values
})
return description
class Categorical(Discrete):
"""Represents variables that can take an unordered discrete values
Args:
categories (list) : List of categories. If left blank, inferred from data.
"""
type_string = "categorical"
def __init__(self, id, entity, name=None, categories=None):
self.categories = None or []
super(Categorical, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Categorical, self).to_data_description()
description['type'].update({'categories': self.categories})
return description
class Id(Categorical):
"""Represents variables that identify another entity"""
type_string = "id"
_default_pandas_dtype = int
class Ordinal(Discrete):
"""Represents variables that take on an ordered discrete value"""
type_string = "ordinal"
_default_pandas_dtype = int
class Numeric(Variable):
"""Represents variables that contain numeric values
Args:
range (list, optional) : List of start and end. Can use inf and -inf to represent infinity. Unconstrained if not specified.
start_inclusive (bool, optional) : Whether or not range includes the start value.
end_inclusive (bool, optional) : Whether or not range includes the end value
Attributes:
max (float)
min (float)
std (float)
mean (float)
"""
type_string = "numeric"
_default_pandas_dtype = float
def __init__(self,
id,
entity,
name=None,
range=None,
start_inclusive=True,
end_inclusive=False):
self.range = None or []
self.start_inclusive = start_inclusive
self.end_inclusive = end_inclusive
super(Numeric, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Numeric, self).to_data_description()
description['type'].update({
'range': self.range,
'start_inclusive': self.start_inclusive,
'end_inclusive': self.end_inclusive,
})
return description
class Index(Variable):
"""Represents variables that uniquely identify an instance of an entity
Attributes:
count (int)
"""
type_string = "index"
_default_pandas_dtype = int
class Datetime(Variable):
"""Represents variables that are points in time
Args:
format (str): Python datetime format string documented `here <http://strftime.org/>`_.
"""
type_string = "datetime"
_default_pandas_dtype = np.datetime64
def __init__(self, id, entity, name=None, format=None):
self.format = format
super(Datetime, self).__init__(id, entity, name=name)
def __repr__(self):
return u"<Variable: {} (dtype: {}, format: {})>".format(self.name, self.type_string, self.format)
def to_data_description(self):
description = super(Datetime, self).to_data_description()
description['type'].update({'format': self.format})
return description
class TimeIndex(Variable):
"""Represents time index of entity"""
type_string = "time_index"
_default_pandas_dtype = np.datetime64
class NumericTimeIndex(TimeIndex, Numeric):
"""Represents time index of entity that is numeric"""
type_string = "numeric_time_index"
_default_pandas_dtype = float
class DatetimeTimeIndex(TimeIndex, Datetime):
"""Represents time index of entity that is a datetime"""
type_string = "datetime_time_index"
_default_pandas_dtype = np.datetime64
class Timedelta(Variable):
"""Represents variables that are timedeltas
Args:
range (list, optional) : List of start and end of allowed range in seconds. Can use inf and -inf to represent infinity. Unconstrained if not specified.
start_inclusive (bool, optional) : Whether or not range includes the start value.
end_inclusive (bool, optional) : Whether or not range includes the end value
"""
type_string = "timedelta"
_default_pandas_dtype = np.timedelta64
def __init__(self,
id,
entity,
name=None,
range=None,
start_inclusive=True,
end_inclusive=False):
self.range = range or []
self.start_inclusive = start_inclusive
self.end_inclusive = end_inclusive
super(Timedelta, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Timedelta, self).to_data_description()
description['type'].update({
'range': self.range,
'start_inclusive': self.start_inclusive,
'end_inclusive': self.end_inclusive,
})
return description
class Text(Variable):
"""Represents variables that are arbitary strings"""
type_string = "text"
_default_pandas_dtype = str
class PandasTypes(object):
_all = 'all'
_categorical = 'category'
_pandas_datetimes = ['datetime64[ns]', 'datetime64[ns, tz]']
_pandas_timedeltas = ['Timedelta']
_pandas_numerics = ['int16', 'int32', 'int64',
'float16', 'float32', 'float64']
class LatLong(Variable):
"""Represents an ordered pair (Latitude, Longitude)
To make a latlong in a dataframe do
data['latlong'] = data[['latitude', 'longitude']].apply(tuple, axis=1)
"""
type_string = "latlong"
class ZIPCode(Categorical):
"""Represents a postal address in the United States.
Consists of a series of digits which are casts as
string. Five digit and 9 digit zipcodes are supported.
"""
type_string = "zipcode"
_default_pandas_dtype = str
class IPAddress(Variable):
"""Represents a computer network address. Represented
in dotted-decimal notation. IPv4 and IPv6 are supported.
"""
type_string = "ip"
_default_pandas_dtype = str
class FullName(Variable):
"""Represents a person's full name. May consist of a
first name, last name, and a title.
"""
type_string = "full_name"
_default_pandas_dtype = str
class EmailAddress(Variable):
"""Represents an email box to which email message are sent.
Consists of a local-part, an @ symbol, and a domain.
"""
type_string = "email"
_default_pandas_dtype = str
class URL(Variable):
"""Represents a valid web url (with or without http/www)"""
type_string = "url"
_default_pandas_dtype = str
class PhoneNumber(Variable):
"""Represents any valid phone number.
Can be with/without parenthesis.
Can be with/without area/country codes.
"""
type_string = "phone_number"
_default_pandas_dtype = str
class DateOfBirth(Datetime):
"""Represents a date of birth as a datetime"""
type_string = "date_of_birth"
_default_pandas_dtype = np.datetime64
class CountryCode(Categorical):
"""Represents an ISO-3166 standard country code.
ISO 3166-1 (countries) are supported. These codes
should be in the Alpha-2 format.
e.g. United States of America = US
"""
type_string = "country_code"
_default_pandas_dtype = str
class SubRegionCode(Categorical):
"""Represents an ISO-3166 standard sub-region code.
ISO 3166-2 codes (sub-regions are supported. These codes
should be in the Alpha-2 format.
e.g. United States of America, Arizona = US-AZ
"""
type_string = "subregion_code"
_default_pandas_dtype = str
class FilePath(Variable):
"""Represents a valid filepath, absolute or relative"""
type_string = "filepath"
_default_pandas_dtype = str
def find_variable_types():
return {str(vtype.type_string): vtype for vtype in find_descendents(
Variable) if hasattr(vtype, 'type_string')}
DEFAULT_DTYPE_VALUES = {
np.datetime64: pd.Timestamp.now(),
int: 0,
float: 0.1,
np.timedelta64: pd.Timedelta('1d'),
object: 'object',
bool: True,
str: 'test'
}
```
|
{
"source": "jeffzi/featuretools-tsfresh-primitives",
"score": 3
}
|
#### File: featuretools-tsfresh-primitives/featuretools_tsfresh_primitives/abs_energy.py
```python
from featuretools.primitives import AggregationPrimitive
from featuretools.variable_types import Numeric
from tsfresh.feature_extraction.feature_calculators import abs_energy
class AbsEnergy(AggregationPrimitive):
"""Returns the absolute energy of the time series
which is the sum over the squared values.
Docstring source:
https://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#tsfresh.feature_extraction.feature_calculators.abs_energy
"""
name = "abs_energy"
input_types = [Numeric]
return_type = Numeric
stack_on_self = False
def get_function(self):
return abs_energy
```
#### File: featuretools-tsfresh-primitives/featuretools_tsfresh_primitives/agg_autocorrelation.py
```python
from featuretools.primitives import AggregationPrimitive
from featuretools.variable_types import Numeric
from tsfresh.feature_extraction.feature_calculators import agg_autocorrelation
class AggAutocorrelation(AggregationPrimitive):
"""Calculates the value of an aggregation function (e.g. the variance or
the mean) over the autocorrelation for different lags.
Args:
f_agg (str) : Name of a numpy function (e.g. "mean", "var", "std",
"median"), its the name of the aggregator function that is applied
to the autocorrelations.
maxlag (int) : Maximal number of lags to consider.
Docstring source:
https://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#tsfresh.feature_extraction.feature_calculators.agg_autocorrelation
"""
name = "agg_autocorrelation"
input_types = [Numeric]
return_type = Numeric
stack_on_self = False
def __init__(self, f_agg, maxlag):
self.f_agg = f_agg
self.maxlag = maxlag
def get_function(self):
def function(x):
param = [{'f_agg': self.f_agg, 'maxlag': self.maxlag}]
return agg_autocorrelation(x, param=param)[0][1]
return function
```
#### File: featuretools-tsfresh-primitives/featuretools_tsfresh_primitives/count_above_mean.py
```python
from featuretools.primitives import AggregationPrimitive
from featuretools.variable_types import Numeric
from tsfresh.feature_extraction.feature_calculators import count_above_mean
class CountAboveMean(AggregationPrimitive):
"""Returns the number of values in x that are higher than the mean of x
Docstring source:
https://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#tsfresh.feature_extraction.feature_calculators.count_above_mean
"""
name = "count_above_mean"
input_types = [Numeric]
return_type = Numeric
stack_on_self = False
def get_function(self):
return count_above_mean
```
#### File: featuretools-tsfresh-primitives/featuretools_tsfresh_primitives/first_location_of_maximum.py
```python
from featuretools.primitives import AggregationPrimitive
from featuretools.variable_types import Numeric
from tsfresh.feature_extraction.feature_calculators import \
first_location_of_maximum
from .utils import to_array
class FirstLocationOfMaximum(AggregationPrimitive):
"""Returns the first location of the maximum value of x. The position is
calculated relatively to the length of x.
Docstring source:
https://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#tsfresh.feature_extraction.feature_calculators.first_location_of_maximum
"""
name = "first_location_of_maximum"
input_types = [Numeric]
return_type = Numeric
stack_on_self = False
def get_function(self):
def function(x):
return first_location_of_maximum(to_array(x))
return function
```
#### File: featuretools-tsfresh-primitives/featuretools_tsfresh_primitives/linear_trend.py
```python
from featuretools.primitives import AggregationPrimitive
from featuretools.variable_types import Numeric
from tsfresh.feature_extraction.feature_calculators import linear_trend
class LinearTrend(AggregationPrimitive):
"""Calculate a linear least-squares regression for the values of the time
series versus the sequence from 0 to length of the time series minus one.
This feature assumes the signal to be uniformly sampled. It will not use
the time stamps to fit the model.
Args:
attr (str) : Controls which of the characteristics are returned.
Possible extracted attributes are:
['pvalue', 'rvalue', 'intercept', 'slope', 'stderr'].
Docstring source:
https://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#tsfresh.feature_extraction.feature_calculators.linear_trend
"""
name = "linear_trend"
input_types = [Numeric]
return_type = Numeric
stack_on_self = False
def __init__(self, attr):
self.attr = attr
def get_function(self):
def function(x):
param = [{'attr': self.attr}]
return list(linear_trend(x, param))[0][1]
return function
```
#### File: featuretools-tsfresh-primitives/featuretools_tsfresh_primitives/mean.py
```python
from featuretools.primitives import AggregationPrimitive
from featuretools.variable_types import Numeric
from tsfresh.feature_extraction.feature_calculators import mean
class Mean(AggregationPrimitive):
"""Returns the mean of x.
Docstring source:
https://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#tsfresh.feature_extraction.feature_calculators.mean
"""
name = "mean"
input_types = [Numeric]
return_type = Numeric
stack_on_self = False
def get_function(self):
return mean
```
#### File: featuretools-tsfresh-primitives/featuretools_tsfresh_primitives/minimum.py
```python
from featuretools.primitives import AggregationPrimitive
from featuretools.variable_types import Numeric
from tsfresh.feature_extraction.feature_calculators import minimum
class Minimum(AggregationPrimitive):
"""Calculates the lowest value of the time series x.
Docstring source:
https://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#tsfresh.feature_extraction.feature_calculators.minimum
"""
name = "minimum"
input_types = [Numeric]
return_type = Numeric
stack_on_self = False
def get_function(self):
return minimum
```
#### File: featuretools-tsfresh-primitives/featuretools_tsfresh_primitives/symmetry_looking.py
```python
from featuretools.primitives import AggregationPrimitive
from featuretools.variable_types import Boolean, Numeric
from tsfresh.feature_extraction.feature_calculators import symmetry_looking
class SymmetryLooking(AggregationPrimitive):
"""Boolean variable denoting if the distribution looks symmetric.
Args:
r (float) : Percentage of the range to compare with.
Docstring source:
https://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#tsfresh.feature_extraction.feature_calculators.symmetry_looking
"""
name = "symmetry_looking"
input_types = [Numeric]
return_type = Boolean
stack_on_self = False
def __init__(self, r):
self.r = r
def get_function(self):
def function(x):
param = [{'r': self.r}]
return symmetry_looking(x, param)[0][1]
return function
```
#### File: featuretools-tsfresh-primitives/featuretools_tsfresh_primitives/value_count.py
```python
from featuretools.primitives import AggregationPrimitive
from featuretools.variable_types import Numeric
from tsfresh.feature_extraction.feature_calculators import value_count
class ValueCount(AggregationPrimitive):
"""Count occurrences of `value` in time series x.
Args:
value (float) : The value to be counted.
Docstring source:
https://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#tsfresh.feature_extraction.feature_calculators.value_count
"""
name = "value_count"
input_types = [Numeric]
return_type = Numeric
stack_on_self = False
def __init__(self, value):
self.value = value
def get_function(self):
def function(x):
return value_count(x, value=self.value)
return function
```
|
{
"source": "jeffzi/optuna",
"score": 3
}
|
#### File: optuna/examples/gluon_simple.py
```python
import mxnet as mx
from mxnet import autograd
from mxnet import gluon
from mxnet.gluon import nn
import numpy as np
import optuna
CUDA = False
EPOCHS = 10
BATCHSIZE = 128
LOG_INTERVAL = 100
def define_model(trial):
net = nn.Sequential()
n_layers = trial.suggest_int("n_layers", 1, 3)
for i in range(n_layers):
nodes = trial.suggest_int("n_units_l{}".format(i), 4, 128)
net.add(nn.Dense(nodes, activation="relu"))
net.add(nn.Dense(10))
return net
def transform(data, label):
data = data.reshape((-1,)).astype(np.float32) / 255
return data, label
def validate(ctx, val_data, net):
metric = mx.metric.Accuracy()
for data, label in val_data:
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
output = net(data)
metric.update([label], [output])
return metric.get()
def objective(trial):
if CUDA:
ctx = mx.gpu(0)
else:
ctx = mx.cpu()
train_data = gluon.data.DataLoader(
gluon.data.vision.MNIST("./data", train=True).transform(transform),
shuffle=True,
batch_size=BATCHSIZE,
last_batch="discard",
)
val_data = gluon.data.DataLoader(
gluon.data.vision.MNIST("./data", train=False).transform(transform),
batch_size=BATCHSIZE,
shuffle=False,
)
net = define_model(trial)
# Collect all parameters from net and its children, then initialize them.
net.initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx)
optimizer_name = trial.suggest_categorical("optimizer", ["Adam", "RMSprop", "SGD"])
# Trainer is for updating parameters with gradient.
lr = trial.suggest_float("lr", 1e-5, 1e-1, log=True)
trainer = gluon.Trainer(net.collect_params(), optimizer_name, {"learning_rate": lr})
metric = mx.metric.Accuracy()
loss = gluon.loss.SoftmaxCrossEntropyLoss()
val_acc = 0
for epoch in range(EPOCHS):
# Reset data iterator and metric at beginning of epoch.
metric.reset()
for i, (data, label) in enumerate(train_data):
# Copy data to ctx if necessary.
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
# Start recording computation graph with record() section.
# Recorded graphs can then be differentiated with backward.
with autograd.record():
output = net(data)
L = loss(output, label)
L.backward()
# Take a gradient step with batch_size equal to data.shape[0].
trainer.step(data.shape[0])
# Update metric at last.
metric.update([label], [output])
if i % LOG_INTERVAL == 0 and i > 0:
name, acc = metric.get()
print(f"[Epoch {epoch} Batch {i}] Training: {name}={acc}")
name, acc = metric.get()
print(f"[Epoch {epoch}] Training: {name}={acc}")
name, val_acc = validate(ctx, val_data, net)
print(f"[Epoch {epoch}] Validation: {name}={val_acc}")
trial.report(val_acc, epoch)
# Handle pruning based on the intermediate value.
if trial.should_prune():
raise optuna.exceptions.TrialPruned()
net.save_parameters("mnist.params")
return val_acc
if __name__ == "__main__":
study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=100, timeout=600)
print("Number of finished trials: ", len(study.trials))
print("Best trial:")
trial = study.best_trial
print(" Value: ", trial.value)
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
```
#### File: examples/samplers/simulated_annealing_sampler.py
```python
import numpy as np
import optuna
from optuna import distributions
from optuna.samplers import BaseSampler
from optuna.study import StudyDirection
from optuna.trial import TrialState
class SimulatedAnnealingSampler(BaseSampler):
def __init__(self, temperature=100, cooldown_factor=0.9, neighbor_range_factor=0.1, seed=None):
self._rng = np.random.RandomState(seed)
self._independent_sampler = optuna.samplers.RandomSampler(seed=seed)
self._temperature = temperature
self.cooldown_factor = cooldown_factor
self.neighbor_range_factor = neighbor_range_factor
self._current_trial = None
def infer_relative_search_space(self, study, trial):
return optuna.samplers.intersection_search_space(study)
def sample_relative(self, study, trial, search_space):
if search_space == {}:
# The relative search space is empty (it means this is the first trial of a study).
return {}
# The rest of this method is an implementation of Simulated Annealing (SA) algorithm.
prev_trial = self._get_last_complete_trial(study)
# Update the current state of SA if the transition is accepted.
if self._rng.uniform(0, 1) <= self._transition_probability(study, prev_trial):
self._current_trial = prev_trial
# Pick a new neighbor (i.e., parameters).
params = self._sample_neighbor_params(search_space)
# Decrease the temperature.
self._temperature *= self.cooldown_factor
return params
def _sample_neighbor_params(self, search_space):
# Generate a sufficiently near neighbor (i.e., parameters).
#
# In this example, we define a sufficiently near neighbor as
# `self.neighbor_range_factor * 100` percent region of the entire
# search space centered on the current point.
params = {}
for param_name, param_distribution in search_space.items():
if isinstance(param_distribution, distributions.UniformDistribution):
current_value = self._current_trial.params[param_name]
width = (
param_distribution.high - param_distribution.low
) * self.neighbor_range_factor
neighbor_low = max(current_value - width, param_distribution.low)
neighbor_high = min(current_value + width, param_distribution.high)
params[param_name] = self._rng.uniform(neighbor_low, neighbor_high)
else:
raise NotImplementedError(
"Unsupported distribution {}.".format(param_distribution)
)
return params
def _transition_probability(self, study, prev_trial):
if self._current_trial is None:
return 1.0
prev_value = prev_trial.value
current_value = self._current_trial.value
# `prev_trial` is always accepted if it has a better value than the current trial.
if study.direction == StudyDirection.MINIMIZE and prev_value <= current_value:
return 1.0
elif study.direction == StudyDirection.MAXIMIZE and prev_value >= current_value:
return 1.0
# Calculate the probability of accepting `prev_trial` that has a worse value than
# the current trial.
return np.exp(-abs(current_value - prev_value) / self._temperature)
@staticmethod
def _get_last_complete_trial(study):
complete_trials = [t for t in study.trials if t.state == TrialState.COMPLETE]
return complete_trials[-1]
def sample_independent(self, study, trial, param_name, param_distribution):
# In this example, this method is invoked only in the first trial of a study.
# The parameters of the trial are sampled by using `RandomSampler` as follows.
return self._independent_sampler.sample_independent(
study, trial, param_name, param_distribution
)
# Define a simple 2-dimensional objective function whose minimum value is -1 when (x, y) = (0, -1).
def objective(trial):
x = trial.suggest_float("x", -100, 100)
y = trial.suggest_float("y", -1, 1)
return x ** 2 + y
if __name__ == "__main__":
# Run optimization by using `SimulatedAnnealingSampler`.
sampler = SimulatedAnnealingSampler()
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=100)
print("Best trial:")
print(" Value: ", study.best_trial.value)
print(" Params: ")
for key, value in study.best_trial.params.items():
print(" {}: {}".format(key, value))
```
#### File: tests/integration_tests/test_skorch.py
```python
from typing import Any
import skorch
import torch
from torch import nn
import optuna
from optuna.integration import SkorchPruningCallback
from optuna.testing.integration import DeterministicPruner
class ClassifierModule(nn.Module):
def __init__(self) -> None:
super(ClassifierModule, self).__init__()
self.dense0 = nn.Linear(4, 8)
def forward(self, X: torch.Tensor, **kwargs: Any) -> torch.Tensor:
return self.dense0(X)
def test_skorch_pruning_callback() -> None:
X, y = torch.zeros(5, 4), torch.zeros(5, dtype=torch.long)
def objective(trial: optuna.trial.Trial) -> float:
net = skorch.NeuralNetClassifier(
ClassifierModule,
max_epochs=10,
lr=0.02,
callbacks=[SkorchPruningCallback(trial, "valid_acc")],
)
net.fit(X, y)
return 1.0
study = optuna.create_study(pruner=DeterministicPruner(True))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.trial.TrialState.PRUNED
study = optuna.create_study(pruner=DeterministicPruner(False))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.trial.TrialState.COMPLETE
assert study.trials[0].value == 1.0
```
#### File: multi_objective_tests/hypervolume_tests/test_wfg.py
```python
import numpy as np
import pytest
import optuna
def test_wfg_2d() -> None:
for n in range(2, 30):
r = n * np.ones(2)
s = np.asarray([[n - 1 - i, i] for i in range(n)])
for i in range(n + 1):
s = np.vstack((s, np.asarray([i, n - i])))
np.random.shuffle(s)
v = optuna.multi_objective._hypervolume.WFG().compute(s, r)
assert v == n * n - n * (n - 1) // 2
def test_wfg_3d() -> None:
n = 3
r = 10 * np.ones(n)
s = [np.hstack((np.zeros(i), [1], np.zeros(n - i - 1))) for i in range(n)]
for _ in range(10):
s.append(np.random.randint(1, 10, size=(n,)))
s = np.asarray(s)
np.random.shuffle(s)
v = optuna.multi_objective._hypervolume.WFG().compute(s, r)
assert v == 10 ** n - 1
def test_wfg_nd() -> None:
for n in range(2, 10):
r = 10 * np.ones(n)
s = [np.hstack((np.zeros(i), [1], np.zeros(n - i - 1))) for i in range(n)]
for _ in range(10):
s.append(np.random.randint(1, 10, size=(n,)))
s = np.asarray(s)
np.random.shuffle(s)
v = optuna.multi_objective._hypervolume.WFG().compute(s, r)
assert v == 10 ** n - 1
def test_wfg_duplicate_points() -> None:
n = 3
r = 10 * np.ones(n)
s = [np.hstack((np.zeros(i), [1], np.zeros(n - i - 1))) for i in range(n)]
for _ in range(10):
s.append(np.random.randint(1, 10, size=(n,)))
s = np.asarray(s)
v = optuna.multi_objective._hypervolume.WFG().compute(s, r)
# Add an already existing point.
s = np.vstack([s, s[-1]])
np.random.shuffle(s)
v_with_duplicate_point = optuna.multi_objective._hypervolume.WFG().compute(s, r)
assert v == v_with_duplicate_point
def test_invalid_input() -> None:
r = np.ones(3)
s = np.atleast_2d(2 * np.ones(3))
with pytest.raises(ValueError):
_ = optuna.multi_objective._hypervolume.WFG().compute(s, r)
```
#### File: multi_objective_tests/visualization_tests/test_pareto_front.py
```python
import pytest
import optuna
from optuna.multi_objective.visualization import plot_pareto_front
@pytest.mark.parametrize("include_dominated_trials", [False, True])
def test_plot_pareto_front_2d(include_dominated_trials: bool) -> None:
# Test with no trial.
study = optuna.multi_objective.create_study(["minimize", "minimize"])
figure = plot_pareto_front(study, include_dominated_trials=include_dominated_trials)
assert len(figure.data) == 1
assert figure.data[0]["x"] == ()
assert figure.data[0]["y"] == ()
# Test with three trials.
study.enqueue_trial({"x": 1, "y": 1})
study.enqueue_trial({"x": 1, "y": 0})
study.enqueue_trial({"x": 0, "y": 1})
study.optimize(lambda t: [t.suggest_int("x", 0, 1), t.suggest_int("y", 0, 1)], n_trials=3)
figure = plot_pareto_front(study, include_dominated_trials=include_dominated_trials)
assert len(figure.data) == 1
if include_dominated_trials:
# The last elements come from dominated trial that is enqueued firstly.
assert figure.data[0]["x"] == (1, 0, 1)
assert figure.data[0]["y"] == (0, 1, 1)
else:
assert figure.data[0]["x"] == (1, 0)
assert figure.data[0]["y"] == (0, 1)
assert figure.layout.xaxis.title.text == "Objective 0"
assert figure.layout.yaxis.title.text == "Objective 1"
# Test with `names` argument.
with pytest.raises(ValueError):
plot_pareto_front(study, names=[], include_dominated_trials=include_dominated_trials)
with pytest.raises(ValueError):
plot_pareto_front(study, names=["Foo"], include_dominated_trials=include_dominated_trials)
with pytest.raises(ValueError):
plot_pareto_front(
study,
names=["Foo", "Bar", "Baz"],
include_dominated_trials=include_dominated_trials,
)
figure = plot_pareto_front(
study,
names=["Foo", "Bar"],
include_dominated_trials=include_dominated_trials,
)
assert figure.layout.xaxis.title.text == "Foo"
assert figure.layout.yaxis.title.text == "Bar"
@pytest.mark.parametrize("include_dominated_trials", [False, True])
def test_plot_pareto_front_3d(include_dominated_trials: bool) -> None:
# Test with no trial.
study = optuna.multi_objective.create_study(["minimize", "minimize", "minimize"])
figure = plot_pareto_front(study, include_dominated_trials=include_dominated_trials)
assert len(figure.data) == 1
assert figure.data[0]["x"] == ()
assert figure.data[0]["y"] == ()
assert figure.data[0]["z"] == ()
# Test with three trials.
study.enqueue_trial({"x": 1, "y": 1, "z": 1})
study.enqueue_trial({"x": 1, "y": 0, "z": 1})
study.enqueue_trial({"x": 1, "y": 1, "z": 0})
study.optimize(
lambda t: [t.suggest_int("x", 0, 1), t.suggest_int("y", 0, 1), t.suggest_int("z", 0, 1)],
n_trials=3,
)
figure = plot_pareto_front(study, include_dominated_trials=include_dominated_trials)
assert len(figure.data) == 1
if include_dominated_trials:
# The last elements come from dominated trial that is enqueued firstly.
assert figure.data[0]["x"] == (1, 1, 1)
assert figure.data[0]["y"] == (0, 1, 1)
assert figure.data[0]["z"] == (1, 0, 1)
else:
assert figure.data[0]["x"] == (1, 1)
assert figure.data[0]["y"] == (0, 1)
assert figure.data[0]["z"] == (1, 0)
assert figure.layout.scene.xaxis.title.text == "Objective 0"
assert figure.layout.scene.yaxis.title.text == "Objective 1"
assert figure.layout.scene.zaxis.title.text == "Objective 2"
# Test with `names` argument.
with pytest.raises(ValueError):
plot_pareto_front(study, names=[], include_dominated_trials=include_dominated_trials)
with pytest.raises(ValueError):
plot_pareto_front(study, names=["Foo"], include_dominated_trials=include_dominated_trials)
with pytest.raises(ValueError):
plot_pareto_front(
study,
names=["Foo", "Bar"],
include_dominated_trials=include_dominated_trials,
)
with pytest.raises(ValueError):
plot_pareto_front(
study,
names=["Foo", "Bar", "Baz", "Qux"],
include_dominated_trials=include_dominated_trials,
)
figure = plot_pareto_front(study, names=["Foo", "Bar", "Baz"])
assert figure.layout.scene.xaxis.title.text == "Foo"
assert figure.layout.scene.yaxis.title.text == "Bar"
assert figure.layout.scene.zaxis.title.text == "Baz"
@pytest.mark.parametrize("include_dominated_trials", [False, True])
def test_plot_pareto_front_unsupported_dimensions(include_dominated_trials: bool) -> None:
# Unsupported: n_objectives == 1.
with pytest.raises(ValueError):
study = optuna.multi_objective.create_study(["minimize"])
study.optimize(lambda t: [0], n_trials=1)
plot_pareto_front(study, include_dominated_trials=include_dominated_trials)
# Supported: n_objectives == 2.
study = optuna.multi_objective.create_study(["minimize", "minimize"])
study.optimize(lambda t: [0, 0], n_trials=1)
plot_pareto_front(study, include_dominated_trials=include_dominated_trials)
# Supported: n_objectives == 3.
study = optuna.multi_objective.create_study(["minimize", "minimize", "minimize"])
study.optimize(lambda t: [0, 0, 0], n_trials=1)
plot_pareto_front(study, include_dominated_trials=include_dominated_trials)
# Unsupported: n_objectives == 4.
with pytest.raises(ValueError):
study = optuna.multi_objective.create_study(
["minimize", "minimize", "minimize", "minimize"]
)
study.optimize(lambda t: [0, 0, 0, 0], n_trials=1)
plot_pareto_front(study, include_dominated_trials=include_dominated_trials)
```
#### File: tests/visualization_tests/test_edf.py
```python
import pytest
from optuna.study import create_study
from optuna.visualization import plot_edf
@pytest.mark.parametrize("direction", ["minimize", "maximize"])
def test_plot_optimization_history(direction: str) -> None:
# Test with no studies.
figure = plot_edf([])
assert len(figure.data) == 0
# Test with no trials.
figure = plot_edf(create_study(direction=direction))
assert len(figure.data) == 0
figure = plot_edf([create_study(direction=direction), create_study(direction=direction)])
assert len(figure.data) == 0
# Test with a study.
study0 = create_study(direction=direction)
study0.optimize(lambda t: t.suggest_float("x", 0, 5), n_trials=10)
figure = plot_edf(study0)
assert len(figure.data) == 1
# Test with two studies.
study1 = create_study(direction=direction)
study1.optimize(lambda t: t.suggest_float("x", 0, 5), n_trials=10)
figure = plot_edf([study0, study1])
assert len(figure.data) == 2
figure = plot_edf((study0, study1))
assert len(figure.data) == 2
```
|
{
"source": "jeffzi/pandas-select",
"score": 3
}
|
#### File: pandas-select/pandas_select/pandera.py
```python
from typing import Any, Iterable, List, Optional
import pandas as pd
from pandas.util import Substitution
from pandas_select.label import LEVEL_DOC, AnyOf, Level, Match
try:
import pandera as pa # noqa: WPS433
except ImportError as exc: # pragma: no cover
raise ImportError(
"Support for schemas requires pandera. \n"
+ "You can install pandas-select together with the schema dependencies with: \n"
+ "pip install pandas-select[schema]\n"
) from exc
@Substitution(level=LEVEL_DOC)
class SchemaSelector(AnyOf):
"""
Select columns based on the column attributes of the
:class:`~pandera.schemas.DataFrameSchema` associated with the
:class:`~pandas.DataFrame`.
Parameters
----------
attrs: Dictionary of columns attributes to filter on.
%(level)s
Raises
------
ValueError:
If a :class:`~pandera.schemas.DataFrameSchema`is not associated with the
class:`~pandas.DataFrame`.
Notes
-----
A :class:`~pandera.schemas.DataFrameSchema` is automatically added to a
:class:`~pandas.DataFrame` after calling
:meth:`pandera.schemas.DataFrameSchema.validate`.
Examples
--------
>>> df = pd.DataFrame(data=[[1, 2, 3]], columns=["a", "abc", "b"])
>>> df
a abc b
0 1 2 3
>>> import pandera as pa
>>> schema = pa.DataFrameSchema({"a": pa.Column(int, regex=True, required=False)})
>>> df = df.pandera.add_schema(schema)
>>> df[SchemaSelector(required=False)]
a abc
0 1 2
"""
def __init__(
self,
level: Optional[Level] = None,
**attrs: Any,
):
super().__init__(values=None, axis="columns", level=level)
self.attrs = attrs
def __call__(self, df: pd.DataFrame) -> Iterable:
schema = df.pandera.schema
if not schema:
raise ValueError("A schema is not associated with the DataFrame.")
self.values = self._filter_schema(schema, df, **self.attrs) # type: ignore
selection = super().__call__(df)
self.values = None # type: ignore
return selection
def _filter_schema(
self,
schema: pa.DataFrameSchema,
df: pd.DataFrame,
**attrs: Any,
) -> List[str]:
names: List[str] = []
for col in schema.columns.values():
if any( # noqa: WPS221, WPS337
getattr(col, attr) != value for attr, value in attrs.items()
):
continue
if getattr(col, "regex", False):
selection = Match(col.name, axis=self.axis, level=self.level)(df)
else:
selection = AnyOf(col.name, axis=self.axis, level=self.level)(df)
names.extend(selection)
return names
```
#### File: pandas-select/tests/test_column.py
```python
import pandas as pd
import pytest
from pandas_select.column import (
AllBool,
AllCat,
AllNominal,
AllNumeric,
AllStr,
HasDtype,
)
from .utils import assert_col_indexer, pp_param
@pytest.fixture
def df():
"""
int float category string
0 1 1.0 a a
1 -1 -1.0 b b
"""
data = {
"int": [1],
"float": [1.0],
"category": ["a"],
"ordered_category": pd.Categorical(["a"], categories=["a", "b"], ordered=True),
"object": ["a"],
"bool": [True],
}
types = {
"int": "int",
"float": "float",
"category": "category",
"object": "object",
"bool": "bool",
}
df = pd.DataFrame(data).astype(types)
if pd.__version__ >= "1.0.0":
df["string"] = pd.Series(["a"], dtype="string")
return df
@pytest.mark.parametrize(
"dtypes, expected",
[
pp_param("int", ["int"]),
pp_param("category", ["category", "ordered_category"]),
pp_param("number", ["int", "float"]),
pp_param(["object", "float"], ["float", "object"]),
pp_param(["bool"], ["bool"]),
],
)
def test_has_dtype(df, dtypes, expected):
assert_col_indexer(df, HasDtype(dtypes), expected)
def test_all_numeric(df):
assert_col_indexer(df, AllNumeric(), ["int", "float"])
def test_all_bool(df):
assert_col_indexer(df, AllBool(), ["bool"])
def test_all_str(df):
expected = ["object", "string"] if pd.__version__ >= "1.0.0" else ["object"]
assert_col_indexer(df, AllStr(strict=False), expected)
if pd.__version__ >= "1.0.0":
assert_col_indexer(df, AllStr(strict=True), ["string"])
else:
with pytest.raises(ValueError):
AllStr(strict=True)
@pytest.mark.parametrize(
"ordered, expected",
[
pp_param(None, ["category", "ordered_category"]),
pp_param(False, ["category"]),
pp_param(True, ["ordered_category"]),
],
)
def test_all_cat(df, ordered, expected):
assert_col_indexer(df, AllCat(ordered=ordered), expected)
def test_all_nominal(df):
expected = ["category", "ordered_category", "object"]
if pd.__version__ >= "1.0.0":
expected.append("string")
assert_col_indexer(df, AllNominal(strict=False), expected)
if pd.__version__ >= "1.0.0":
expected.remove("object")
assert_col_indexer(df, AllNominal(strict=True), expected)
else:
with pytest.raises(ValueError):
AllNominal(strict=True)
```
#### File: pandas-select/tests/test_pandera.py
```python
import pandas as pd
import pandera as pa
import pytest
from pandas_select import SchemaSelector
from .utils import assert_col_indexer, pp_param
@pytest.fixture
def df() -> pd.DataFrame:
"""
a abc b
0 1 2 3
"""
return pd.DataFrame(data=[[1, 2, 3]], columns=["a", "abc", "b"])
@pytest.fixture
def df_mi():
"""
data_type int float category string
ml_type number number nominal nominal
0 -1 -1.0 a a
1 1 1.0 b b
"""
return pd.DataFrame(
data=[[-1, -1.0, "a", "a"], [1, 1.0, "b", "b"]],
columns=pd.MultiIndex.from_arrays(
[
["int", "float", "category", "string"],
["number", "number", "nominal", "nominal"],
],
names=["data_type", "ml_type"],
),
)
@pytest.mark.parametrize(
"attrs, expected",
[
pp_param({"required": False}, ["b"]),
pp_param({"nullable": False}, ["a", "abc"]),
],
)
def test_schema_selector(df, attrs, expected):
schema = pa.DataFrameSchema(
{
"a": pa.Column(int, regex=True, nullable=False),
"b": pa.Column(int, required=False, nullable=True),
}
)
df = schema.validate(df)
selector = SchemaSelector(**attrs)
assert_col_indexer(df, selector, expected)
@pytest.mark.parametrize(
"attrs, expected",
[
pp_param(
{"nullable": True},
[("int", "number"), ("float", "number"), ("string", "nominal")],
),
pp_param({"nullable": True, "required": False}, [("string", "nominal")]),
],
)
def test_schema_selector_multi_index(df_mi, attrs, expected):
schema = pa.DataFrameSchema(
{
("int", "number"): pa.Column(int, nullable=True),
("float", "number"): pa.Column(float, nullable=True),
("category", "nominal"): pa.Column(str, required=False),
("string", "nominal"): pa.Column(str, required=False, nullable=True),
}
)
df = schema.validate(df_mi)
selector = SchemaSelector(**attrs)
assert_col_indexer(df, selector, expected)
def test_no_schema(df):
with pytest.raises(
ValueError, match="A schema is not associated with the DataFrame."
):
df[SchemaSelector()]
```
#### File: pandas-select/tests/utils.py
```python
import pytest
from pandas.testing import assert_frame_equal
def assert_col_indexer(df, selector, expected):
print(f"{selector} selected:\n\t{selector(df)}")
assert list(selector(df)) == expected
assert df.loc[:, selector].columns.tolist() == expected
assert_frame_equal(df[selector], df[expected])
def assert_row_indexer(df, selector, expected):
print(f"{selector} selected:\n\t{selector(df)}")
assert list(selector(df)) == expected
assert df.loc[selector, :].index.tolist() == expected
assert_frame_equal(df.loc[selector], df.loc[expected])
def pp_param(*values, **kw):
id = kw.pop("id", "-".join(map(str, values)))
return pytest.param(*values, id=id, **kw)
```
|
{
"source": "jeffzi/tsfresh",
"score": 2
}
|
#### File: tests/integrations/test_notebooks.py
```python
import os
import subprocess
import tempfile
import nbformat
from unittest import TestCase
default_timeout = 900
def _notebook_run(path, timeout=default_timeout):
"""
Execute a singular ipython notebook at path and returns the cells and outputs
:returns (parsed nb object, execution errors)
"""
dirname, _ = os.path.split(path)
execproc_timeout = '--ExecutePreprocessor.timeout=%d' % timeout
# Do not run notebook tests on Travis. notebooks tests should only be
# run in the local developer testing context and notebook tests often
# cause time out failures on Travis builds see (github #409, #410)
try:
if os.environ['TRAVIS']:
return [], []
except BaseException:
pass
# Ensure temporary files are not auto-deleted as processes have limited
# permissions to re-use file handles under WinNT-based operating systems.
fname = ''
with tempfile.NamedTemporaryFile(mode='w+t', suffix=".ipynb", delete=False) as fout:
fname = fout.name
args = ["jupyter", "nbconvert",
"--to", "notebook", "--execute", execproc_timeout]
args += ["--ExecutePreprocessor.kernel_name=python3"]
args += ["--output", fout.name, path]
subprocess.check_call(args)
fout.seek(0)
nb = nbformat.read(fout, nbformat.current_nbformat)
os.remove(fname)
errors = [output for cell in nb.cells if "outputs" in cell
for output in cell["outputs"]
if output.output_type == "error"]
return nb, errors
class NotebooksTestCase(TestCase):
def test_basic_pipeline_example(self):
nb, errors = _notebook_run('notebooks/basic_pipeline_example.ipynb', default_timeout)
self.assertEqual(errors, [])
def test_friedrich_coefficients(self):
nb, errors = _notebook_run('notebooks/friedrich_coefficients.ipynb', default_timeout)
self.assertEqual(errors, [])
def test_human_activity_recognition_multi_class_example(self):
nb, errors = _notebook_run('notebooks/human_activity_recognition_multi_class_example.ipynb', default_timeout)
self.assertEqual(errors, [])
def test_inspect_dft_features(self):
nb, errors = _notebook_run('notebooks/inspect_dft_features.ipynb', default_timeout)
self.assertEqual(errors, [])
def test_pipeline_with_two_datasets(self):
nb, errors = _notebook_run('notebooks/pipeline_with_two_datasets.ipynb', default_timeout)
self.assertEqual(errors, [])
def test_robot_failure_example(self):
nb, errors = _notebook_run('notebooks/robot_failure_example.ipynb', default_timeout)
self.assertEqual(errors, [])
def test_perform_PCA_on_extracted_features(self):
nb, errors = _notebook_run('notebooks/perform-PCA-on-extracted-features.ipynb', default_timeout)
self.assertEqual(errors, [])
def test_fc_parameters_extraction_dictionary(self):
nb, errors = _notebook_run('notebooks/the-fc_parameters-extraction-dictionary.ipynb', default_timeout)
self.assertEqual(errors, [])
def test_timeseries_forecasting_basic_example(self):
nb, errors = _notebook_run('notebooks/timeseries_forecasting_basic_example.ipynb', default_timeout)
self.assertEqual(errors, [])
def test_timeseries_forecasting_google_stock(self):
nb, errors = _notebook_run('notebooks/timeseries_forecasting_google_stock.ipynb', default_timeout)
self.assertEqual(errors, [])
def test_visualize_benjamini_yekutieli_procedure(self):
nb, errors = _notebook_run('notebooks/visualize-benjamini-yekutieli-procedure.ipynb', default_timeout)
self.assertEqual(errors, [])
def test_feature_extraction_with_datetime_index(self):
nb, errors = _notebook_run('notebooks/feature_extraction_with_datetime_index.ipynb', default_timeout)
self.assertEqual(errors, [])
```
#### File: units/feature_selection/test_relevance.py
```python
import numpy as np
import pandas as pd
import pytest
import mock
import warnings
from tsfresh.feature_selection.relevance import infer_ml_task, calculate_relevance_table, combine_relevance_tables, \
get_feature_type
class TestInferMLTask:
def test_infers_classification_for_integer_target(self):
y = pd.Series([1, 2, 3])
assert 'classification' == infer_ml_task(y)
def test_infers_classification_for_boolean_target(self):
y = pd.Series([True, False, False])
assert 'classification' == infer_ml_task(y)
def test_infers_classification_for_object_target(self):
y = pd.Series(['high', 'low'])
assert 'classification' == infer_ml_task(y)
def test_infers_regression_for_float_target(self):
y = pd.Series([1.0, 1.5, 1.7])
assert 'regression' == infer_ml_task(y)
class TestCalculateRelevanceTable:
@pytest.fixture()
def y_binary(self):
return pd.Series([0, 1, 1])
@pytest.fixture()
def y_real(self):
return pd.Series([0.1, 0.2, 0.1])
@pytest.fixture()
def X(self):
df = pd.DataFrame()
df['feature_binary'] = [1, 1, 0]
df['feature_real'] = [0.1, 0.2, 0.3]
return df
def test_restrict_ml_task_options(self, X, y_binary):
with pytest.raises(ValueError):
calculate_relevance_table(X, y_binary, ml_task='some_other_task')
def test_constant_feature_irrelevant(self, y_binary):
X = pd.DataFrame([1, 1, 1], columns=['feature_binary'])
relevance_table = calculate_relevance_table(X, y_binary)
assert "feature_binary" == relevance_table.index[0]
assert 'constant' == relevance_table.type[0]
assert np.isnan(relevance_table.p_value[0])
assert not relevance_table.relevant[0]
@mock.patch('tsfresh.feature_selection.relevance.target_binary_feature_real_test')
@mock.patch('tsfresh.feature_selection.relevance.target_binary_feature_binary_test')
def test_target_binary_calls_correct_tests(self, significance_test_feature_binary_mock,
significance_test_feature_real_mock, X, y_binary):
significance_test_feature_binary_mock.return_value = 0.5
significance_test_feature_real_mock.return_value = 0.7
relevance_table = calculate_relevance_table(X, y_binary, n_jobs=0)
assert 0.5 == relevance_table.loc['feature_binary'].p_value
assert 0.7 == relevance_table.loc['feature_real'].p_value
assert 2 == significance_test_feature_binary_mock.call_count
assert 2 == significance_test_feature_real_mock.call_count
@mock.patch('tsfresh.feature_selection.relevance.target_real_feature_real_test')
@mock.patch('tsfresh.feature_selection.relevance.target_real_feature_binary_test')
def test_target_real_calls_correct_tests(self, significance_test_feature_binary_mock,
significance_test_feature_real_mock, X, y_real):
significance_test_feature_binary_mock.return_value = 0.5
significance_test_feature_real_mock.return_value = 0.7
relevance_table = calculate_relevance_table(X, y_real, n_jobs=0)
assert 0.5 == relevance_table.loc['feature_binary'].p_value
assert 0.7 == relevance_table.loc['feature_real'].p_value
significance_test_feature_binary_mock.assert_called_once_with(X['feature_binary'], y=y_real)
significance_test_feature_real_mock.assert_called_once_with(X['feature_real'], y=y_real)
@mock.patch('tsfresh.feature_selection.relevance.target_real_feature_real_test')
@mock.patch('tsfresh.feature_selection.relevance.target_real_feature_binary_test')
def test_warning_for_no_relevant_feature(self, significance_test_feature_binary_mock,
significance_test_feature_real_mock, X, y_real):
significance_test_feature_binary_mock.return_value = 0.95
significance_test_feature_real_mock.return_value = 0.95
with mock.patch('logging.Logger.warning') as m:
_ = calculate_relevance_table(X, y_real, n_jobs=0, ml_task="regression")
m.assert_called_with("No feature was found relevant for regression for fdr level = 0.05 (which corresponds "
"to the maximal percentage of irrelevant features, consider using an higher fdr level "
"or add other features.")
class TestCombineRelevanceTables:
@pytest.fixture()
def relevance_table(self):
relevance_table = pd.DataFrame(index=pd.Series(['f1', 'f2', 'f3', 'f4'], name='feature'))
relevance_table['relevant'] = [True, False, True, False]
relevance_table['type'] = ['real'] * 4
relevance_table['p_value'] = [0.1, 0.2, 0.3, 0.4]
return relevance_table
def test_disjuncts_relevance(self, relevance_table):
relevance_table_2 = relevance_table.copy()
relevance_table_2.relevant = [False, True, True, False]
result = combine_relevance_tables([relevance_table, relevance_table_2])
assert ([True, True, True, False] == result.relevant).all()
def test_respects_index(self, relevance_table):
relevance_table_2 = relevance_table.copy()
relevance_table_2.reindex(reversed(relevance_table.index))
result = combine_relevance_tables([relevance_table, relevance_table_2])
assert ([True, False, True, False] == result.relevant).all()
def test_aggregates_p_value(self, relevance_table):
relevance_table_2 = relevance_table.copy()
relevance_table_2.p_value = [0.2, 0.1, 0.4, 0.3]
result = combine_relevance_tables([relevance_table, relevance_table_2])
assert (np.array([0.1, 0.1, 0.3, 0.3]) == result.p_value).all()
class TestGetFeatureType:
def test_binary(self):
feature = pd.Series([0.0, 1.0, 1.0])
assert 'binary' == get_feature_type(feature)
def test_constant(self):
feature = pd.Series([0.0, 0.0, 0.0])
assert 'constant' == get_feature_type(feature)
def test_real(self):
feature = pd.Series([0.0, 1.0, 2.0])
assert 'real' == get_feature_type(feature)
```
|
{
"source": "jeff-zqiu/uocweb",
"score": 2
}
|
#### File: uocsecrets/forum/models.py
```python
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.validators import validate_image_file_extension
import os
from uuid import uuid4
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
user_pk = models.IntegerField(default=0, editable=False)
clicked = models.TextField(default='', null=True, blank=True)
@classmethod
def default_user(cls):
try: anon = User.objects.get(pk=1)
except ObjectDoesNotExist:
anon = Profile.create_user('Anonymous', '', 'Anonymous').user
return anon
def is_anon(self):
return self.user.pk == 1
@classmethod
def create_user(cls, username, email, password):
user = User.objects.create_user(username, email, password)
return cls.objects.create(user = user, user_pk = user.pk, clicked ='')
def __str__(self):
return self.user.username
def rand_path_filename(instance, original_name):
upload_to = 'post/'
ext = original_name.split('.')[-1]
if instance.pk:
filename = '{}.{}'.format(instance.pk, ext)
else:
filename = '{}.{}'.format(uuid4().hex, ext)
return os.path.join(upload_to, filename)
class Post(models.Model):
title = models.CharField(max_length=50, default='Untitled')
author = models.ForeignKey(User, on_delete=models.CASCADE)
date = models.DateTimeField(default = timezone.now)
content = models.TextField(default="Whoa such empty")
clicks = models.IntegerField(default=0)
tags = models.IntegerField(default=0)
image = models.ImageField(upload_to=rand_path_filename,
validators=[validate_image_file_extension],
null=True, blank=True)
def __str__(self):
return self.title
@classmethod
def get_next_title(self):
last_post = Post.objects.last()
if last_post:
return 'Secret #'+str(last_post.id+1)
else: return 'Secret #1'
class Comments(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE)
display_name = models.CharField(max_length=50, default='')
date = models.DateTimeField(default=timezone.now)
content = models.TextField(default='')
# if a post gets deleted, the comments will also be deleted (SQL CASCADE DELETE)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
# recursive model relationship
parent_comment = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True)
@classmethod
def new_display_name(self, request, data):
last_comment = Comments.objects.last()
if last_comment:
last_comment_id = last_comment.id
else: last_comment_id = 0
if request.user.is_authenticated:
return '#' + str(last_comment_id + 1) + request.user.username
else: return '#'+str(last_comment_id + 1) + ' '+ 'User'
def __str__(self):
name = self.display_name + ' : ' + str(self.content)[:30]
if len(name) >= 29:
name += '...'
return name
```
|
{
"source": "jefkine/ML-From-Scratch",
"score": 2
}
|
#### File: mlfromscratch/deep_learning/gru.py
```python
import numpy as np
from ..base import Layer
from ztlearn.utils import clip_gradients as cg
from ztlearn.dl.initializers import InitializeWeights as init
from ztlearn.dl.activations import ActivationFunction as activate
from ztlearn.dl.optimizers import OptimizationFunction as optimizer
class GRU(Layer):
def __init__(self, h_units, activation = 'tanh', input_shape = None, gate_activation = 'sigmoid'):
self.h_units = h_units # number of hidden states
self.activation = activation
self.input_shape = input_shape
self.gate_activation = gate_activation
self.init_method = None # just added
self.optimizer_kwargs = None # just added
# gate weights
self.W_update = None
self.W_reset = None
self.W_states = None
# gate bias
self.b_update = None
self.b_reset = None
self.b_states = None
# final output to nodes weights
self.W_final = None
# final output to nodes bias
self.b_final = None
def prep_layer(self):
_, input_dim = self.input_shape
z_dim = self.h_units + input_dim # concatenate (h_units, vocabulary_size) vector
# gate weights
self.W_update = init(self.init_method).initialize_weights((z_dim, self.h_units))
self.W_reset = init(self.init_method).initialize_weights((z_dim, self.h_units))
self.W_cell = init(self.init_method).initialize_weights((z_dim, self.h_units))
self.W_states = init(self.init_method).initialize_weights((z_dim, self.h_units))
# gate hidden bias
self.b_update = np.zeros((self.h_units,))
self.b_reset = np.zeros((self.h_units,))
self.b_cell = np.zeros((self.h_units,))
self.b_states = np.zeros((self.h_units,))
# final output to nodes weights (input_dim is the vocab size and also the ouput size)
self.W_final = init(self.init_method).initialize_weights((self.h_units, input_dim))
# final output to nodes bias (input_dim is the vocab size and also the ouput size)
self.b_final = np.zeros((input_dim,))
@property
def weight_initializer(self):
return self.init_method
@weight_initializer.setter
def weight_initializer(self, init_method):
self.init_method = init_method
@property
def weight_optimizer(self):
return self.optimizer_kwargs
@weight_optimizer.setter
def weight_optimizer(self, optimizer_kwargs = {}):
self.optimizer_kwargs = optimizer_kwargs
@property
def layer_activation(self):
return self.activation
@layer_activation.setter
def layer_activation(self, activation):
self.activation = activation
@property
def output_shape(self):
return self.input_shape
def pass_forward(self, inputs, train_mode = True):
self.inputs = inputs
batch_size, time_steps, input_dim = inputs.shape
self.update = np.zeros((batch_size, time_steps, self.h_units))
self.reset = np.zeros((batch_size, time_steps, self.h_units))
self.cell = np.zeros((batch_size, time_steps, self.h_units))
self.states = np.zeros((batch_size, time_steps, self.h_units))
self.final = np.zeros((batch_size, time_steps, input_dim))
self.z = np.concatenate((self.inputs, self.states), axis = 2)
self.z_tilde = np.zeros_like(self.z)
for t in range(time_steps):
self.update[:, t] = activate(self.gate_activation)._forward(np.dot(self.z[:, t], self.W_update) + self.b_update)
self.reset[:, t] = activate(self.gate_activation)._forward(np.dot(self.z[:, t], self.W_reset) + self.b_reset)
self.z_tilde[:, t] = np.concatenate((self.reset[:, t] * self.states[:, t-1], self.inputs[:, t]), axis = 1)
self.cell[:, t] = activate(self.activation)._forward(np.dot(self.z_tilde[:, t-1], self.W_cell) + self.b_cell)
self.states[:, t] = (1. - self.update[:, t]) * self.states[:, t-1] + self.update[:, t] * self.cell[:, t]
# logits
self.final[:, t] = np.dot(self.states[:, t], self.W_final) + self.b_final
if not train_mode:
return activate('softmax')._forward(self.final) # if mode is not training
return self.final
def pass_backward(self, grad):
_, time_steps, _ = grad.shape
dW_update = np.zeros_like(self.W_update)
dW_reset = np.zeros_like(self.W_reset)
dW_cell = np.zeros_like(self.W_cell)
dW_final = np.zeros_like(self.W_final)
db_update = np.zeros_like(self.b_update)
db_reset = np.zeros_like(self.b_reset)
db_cell = np.zeros_like(self.b_cell)
db_final = np.zeros_like(self.b_final)
dstates = np.zeros_like(self.states)
dstate_a = np.zeros_like(self.states)
dstate_b = np.zeros_like(self.states)
dstate_c = np.zeros_like(self.states)
dstates_next = np.zeros_like(self.states)
dstates_prime = np.zeros_like(self.states)
dz_cell = np.zeros_like(self.cell)
dcell = np.zeros_like(self.cell)
dz_reset = np.zeros_like(self.reset)
dreset = np.zeros_like(self.reset)
dz_update = np.zeros_like(self.update)
dupdate = np.zeros_like(self.update)
next_grad = np.zeros_like(grad)
for t in np.arange(time_steps)[::-1]: # reversed
dW_final += np.dot(self.states[:, t].T, grad[:, t])
db_final += np.sum(grad[:, t], axis = 0)
dstates[:, t] = np.dot(grad[:, t], self.W_final.T)
dstates[:, t] += dstates_next[:, t]
next_grad = np.dot(dstates, self.W_final)
dcell[:, t] = self.update[:, t] * dstates[:, t]
dstate_a[:, t] = (1. - self.update[:, t]) * dstates[:, t]
dupdate[:, t] = self.cell[:, t] * dstates[:, t] - self.states[:, t-1] * dstates[:, t]
dcell[:, t] = activate(self.activation)._backward(self.cell[:, t]) * dcell[:, t]
dW_cell += np.dot(self.z_tilde[:, t-1].T, dcell[:, t])
db_cell += np.sum(dcell[:, t], axis = 0)
dz_cell = np.dot(dcell[:, t], self.W_cell.T)
dstates_prime[:, t] = dz_cell[:, :self.h_units]
dstate_b[:, t] = self.reset[:, t] * dstates_prime[:, t]
dreset[:, t] = self.states[:, t-1] * dstates_prime[:, t]
dreset[:, t] = activate(self.gate_activation)._backward(self.reset[:, t]) * dreset[:, t]
dW_reset += np.dot(self.z[:, t].T, dreset[:, t])
db_reset += np.sum(dreset[:, t], axis = 0)
dz_reset = np.dot(dreset[:, t], self.W_reset.T)
dupdate[:, t] = activate(self.gate_activation)._backward(self.update[:, t]) * dupdate[:, t]
dW_update += np.dot(self.z[:, t].T, dupdate[:, t])
db_update += np.sum(dupdate[:, t], axis = 0)
dz_update = np.dot(dupdate[:, t], self.W_update.T)
dz = dz_reset + dz_update
dstate_c[:, t] = dz[:, :self.h_units]
dstates_next = dstate_a + dstate_b + dstate_c
# optimize weights and bias
self.W_final = optimizer(self.optimizer_kwargs)._update(self.W_final, cg(dW_final))
self.b_final = optimizer(self.optimizer_kwargs)._update(self.b_final, cg(db_final))
self.W_cell = optimizer(self.optimizer_kwargs)._update(self.W_cell, cg(dW_cell))
self.b_cell = optimizer(self.optimizer_kwargs)._update(self.b_cell, cg(db_cell))
self.W_reset = optimizer(self.optimizer_kwargs)._update(self.W_reset, cg(dW_reset))
self.b_reset = optimizer(self.optimizer_kwargs)._update(self.b_reset, cg(db_reset))
self.W_update = optimizer(self.optimizer_kwargs)._update(self.W_update, cg(dW_update))
self.b_update = optimizer(self.optimizer_kwargs)._update(self.b_update, cg(db_update))
return next_grad
```
|
{
"source": "jefkine/zeta-learn",
"score": 2
}
|
#### File: ztlearn/datasets/data_set.py
```python
class DataSet:
def __init__(self, data, target, describe = None):
self.data = data
self.target = target
self.describe = describe
```
#### File: datasets/pima/pima_indians.py
```python
import os
import pandas as pd
from ztlearn.utils import maybe_download
from ztlearn.utils import train_test_split
from ztlearn.datasets.data_set import DataSet
URL = 'http://ftp.ics.uci.edu/pub/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data'
def fetch_pima_indians(data_target = True, custom_path = os.getcwd()):
file_path = maybe_download(custom_path + '/../../ztlearn/datasets/pima/', URL)
describe = [
'Pregnancies',
'Glucose',
'BloodPressure',
'SkinThickness',
'DiabetesPedigreeFunction',
'Age',
'Insulin',
'BMI',
'Outcome (0 or 1)'
]
dataframe = pd.read_csv(file_path, names = describe)
data, target = dataframe.values[:,0:8], dataframe.values[:,8]
if data_target:
return DataSet(data, target, describe)
else:
return train_test_split(data, target, test_size = 0.2, random_seed = 2)
```
#### File: datasets/steel/steel_plates_faults.py
```python
import os
import pandas as pd
from ztlearn.utils import maybe_download
from ztlearn.utils import train_test_split
from ztlearn.datasets.data_set import DataSet
URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00198/Faults.NNA'
URL_2 = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00198/Faults27x7_var'
def fetch_steel_plates_faults(data_target = True, custom_path = os.getcwd()):
file_path = maybe_download(custom_path + '/../../ztlearn/datasets/steel/', URL)
file_path_2 = maybe_download(custom_path + '/../../ztlearn/datasets/steel/', URL_2)
describe = [
'Pastry',
'Z_Scratch',
'K_Scatch',
'Stains',
'Dirtiness',
'Bumps',
'Other_Faults'
]
InputDataHeader = pd.read_csv(file_path_2, header=None)
InputData = pd.read_csv(file_path, header=None, sep="\t")
InputData.set_axis(InputDataHeader.values.flatten(), axis=1, inplace=True)
dataframe = InputData.copy()
dataframe.drop(describe, axis=1,inplace=True)
targetframe = InputData[describe].copy()
data, target = dataframe.values, targetframe.values
if data_target:
return DataSet(data, target, describe)
else:
return train_test_split(data, target, test_size = 0.2, random_seed = 2)
```
#### File: dl/layers/embedding.py
```python
import numpy as np
from .base import Layer
from ztlearn.utils import one_hot
from ztlearn.utils import get_sentence_tokens
from ztlearn.initializers import InitializeWeights as init
from ztlearn.optimizers import OptimizationFunction as optimizer
class Embedding(Layer):
def __init__(self,
input_dim, # number of unique words in the text dataset
output_dim, # size of the embedding vectors
embeddings_init = 'uniform', # init type for the embedding matrix (weights)
input_length = 10): # size of input sentences
self.input_dim = input_dim
self.output_dim = output_dim
self.input_length = input_length
self.input_shape = None # required by the base class
self.init_method = None
self.optimizer_kwargs = None
self.is_trainable = True
@property
def trainable(self):
return self.is_trainable
@trainable.setter
def trainable(self, is_trainable):
self.is_trainable = is_trainable
@property
def weight_initializer(self):
return self.init_method
@weight_initializer.setter
def weight_initializer(self, init_method):
self.init_method = init_method
@property
def weight_optimizer(self):
return self.optimizer_kwargs
@weight_optimizer.setter
def weight_optimizer(self, optimizer_kwargs = {}):
self.optimizer_kwargs = optimizer_kwargs
@property
def layer_parameters(self):
return sum([np.prod(param.shape) for param in [self.weights]])
@property
def output_shape(self):
return (self.input_length, self.output_dim)
def prep_layer(self):
self.uniques_one_hot = one_hot(np.arange(self.input_dim)) # master one hot matrix
self.kernel_shape = (self.input_dim, self.output_dim)
self.weights = init(self.weight_initializer).initialize_weights(self.kernel_shape) # embeddings
# inputs should be gotten from sentences_tokens = get_sentence_tokens(text_input)
def pass_forward(self, inputs, train_mode = True, **kwargs):
self.inputs = inputs # tokenized inputs
embeded_inputs = []
for _, tokens in enumerate(self.inputs.tolist()):
for i, word_index in enumerate(tokens):
embed = np.expand_dims(self.uniques_one_hot[word_index,:], 1).T.dot(self.weights)
tokens[i] = list(np.array(embed).flat)
embeded_inputs.append(tokens)
return np.array(embeded_inputs)
def pass_backward(self, grad, epoch_num, batch_num, batch_size):
prev_weights = self.weights
if self.is_trainable:
dweights = np.sum(grad @ self.weights.T, axis = 1)
self.weights = optimizer(self.weight_optimizer).update(self.weights, dweights.T, epoch_num, batch_num, batch_size)
# endif self.is_trainable
return grad @ prev_weights.T
```
#### File: layers/recurrent/rnn.py
```python
import numpy as np
from ..base import Layer
from ztlearn.utils import clip_gradients as cg
from ztlearn.initializers import InitializeWeights as init
from ztlearn.activations import ActivationFunction as activate
from ztlearn.optimizers import OptimizationFunction as optimizer
class RNN(Layer):
def __init__(self, h_units, activation = None, bptt_truncate = 5, input_shape = None):
self.h_units = h_units # number of hidden states
self.activation = activation # should be tanh by default
self.bptt_truncate = bptt_truncate
self.input_shape = input_shape
self.init_method = None
self.optimizer_kwargs = None
self.W_input = None
self.W_output = None
self.W_recur = None
self.b_output = None
self.b_input = None
self.is_trainable = True
@property
def trainable(self):
return self.is_trainable
@trainable.setter
def trainable(self, is_trainable):
self.is_trainable = is_trainable
@property
def weight_initializer(self):
return self.init_method
@weight_initializer.setter
def weight_initializer(self, init_method):
self.init_method = init_method
@property
def weight_optimizer(self):
return self.optimizer_kwargs
@weight_optimizer.setter
def weight_optimizer(self, optimizer_kwargs = {}):
self.optimizer_kwargs = optimizer_kwargs
@property
def layer_activation(self):
return self.activation
@layer_activation.setter
def layer_activation(self, activation):
self.activation = activation
@property
def layer_parameters(self):
parameters = [
self.W_input,
self.W_output,
self.W_recur,
self.b_output,
self.b_input
]
return sum([np.prod(param.shape) for param in parameters])
@property
def output_shape(self):
return self.input_shape
def prep_layer(self):
_, input_dim = self.input_shape
self.W_input = init(self.init_method).initialize_weights((self.h_units, input_dim))
self.W_output = init(self.init_method).initialize_weights((input_dim, self.h_units))
self.W_recur = init(self.init_method).initialize_weights((self.h_units, self.h_units))
self.b_output = np.zeros((input_dim,))
self.b_input = np.zeros((self.h_units,))
# implementation based on techniques as seen here: https://github.com/dennybritz/rnn-tutorial-rnnlm/blob/master/RNNLM.ipynb
def pass_forward(self, inputs, train_mode = True):
self.inputs = inputs
batch_size, time_steps, input_dim = inputs.shape
self.state_inputs = np.zeros((batch_size, time_steps, self.h_units))
self.states = np.zeros((batch_size, time_steps + 1, self.h_units)) # additional(+1) last column containing the final state also set to zero
self.state_outputs = np.zeros((batch_size, time_steps, input_dim))
for t in range(time_steps):
self.state_inputs[:, t] = (np.dot(inputs[:, t], self.W_input.T) + np.dot(self.states[:, t - 1], self.W_recur.T)) + self.b_input
self.states[:, t] = activate(self.activation).forward(self.state_inputs[:, t])
self.state_outputs[:, t] = np.dot(self.states[:, t], self.W_output.T) + self.b_output
if not train_mode:
return activate('softmax').forward(self.state_outputs) # if mode is not training
return self.state_outputs
# implementation based on techniques as seen here: https://github.com/dennybritz/rnn-tutorial-rnnlm/blob/master/RNNLM.ipynb
def pass_backward(self, grad, epoch_num, batch_num, batch_size):
_, time_steps, _ = grad.shape
next_grad = np.zeros_like(grad)
if self.is_trainable:
dW_input = np.zeros_like(self.W_input)
dW_recur = np.zeros_like(self.W_recur)
dW_output = np.zeros_like(self.W_output)
db_input = np.zeros_like(self.b_input)
db_output = np.zeros_like(self.b_output)
for t in np.arange(time_steps)[::-1]: # reversed
dW_output += np.dot(grad[:, t].T, self.states[:, t])
db_output += np.sum(grad[:, t], axis = 0)
dstate = np.dot(grad[:, t], self.W_output) * activate(self.activation).backward(self.state_inputs[:, t])
next_grad[:, t] = np.dot(dstate, self.W_input)
for tt in np.arange(max(0, t - self.bptt_truncate), t + 1)[::-1]: # reversed
dW_input += np.dot(dstate.T, self.inputs[:, tt])
dW_recur += np.dot(dstate.T, self.states[:, tt - 1])
db_input += np.sum(dstate, axis = 0)
dstate = np.dot(dstate, self.W_recur) * activate(self.activation).backward(self.state_inputs[:, tt - 1])
# optimize weights and bias
self.W_input = optimizer(self.optimizer_kwargs).update(self.W_input, cg(dW_input), epoch_num, batch_num, batch_size)
self.W_output = optimizer(self.optimizer_kwargs).update(self.W_output, cg(dW_output), epoch_num, batch_num, batch_size)
self.W_recur = optimizer(self.optimizer_kwargs).update(self.W_recur, cg(dW_recur), epoch_num, batch_num, batch_size)
self.b_input = optimizer(self.optimizer_kwargs).update(self.b_input, cg(db_input), epoch_num, batch_num, batch_size)
self.b_output = optimizer(self.optimizer_kwargs).update(self.b_output, cg(db_output), epoch_num, batch_num, batch_size)
# endif self.is_trainable
return next_grad
```
#### File: zeta-learn/ztlearn/initializers.py
```python
import numpy as np
class WeightInitializer:
def compute_fans(self, shape):
"""
func: compute_fans adapted from keras: https://github.com/fchollet/keras/blob/master/keras/initializers.py
copyright held by fchollet(keras-team), 2017 as part of Keras project
licence: MIT
"""
# kernel shape: ('NF': Total Filters, 'CF': Filter Channels, 'HF': Filter Height 'WF': Filter Width)
shape = (shape[0], 1) if len(shape) == 1 else shape
receptive_field_size = np.prod(shape[:2])
fan_out = shape[0] * receptive_field_size # NF *receptive_field_size
fan_in = shape[1] * receptive_field_size # CF *receptive_field_size
return fan_in, fan_out
class HeNormal(WeightInitializer):
"""
**He Normal (HeNormal)**
HeNormal is a robust initialization method that particularly considers the
rectifier nonlinearities. He normal is an implementation based on Gaussian
distribution
References:
[1] Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification
* [Kaiming He, 2015] https://arxiv.org/abs/1502.01852
* [PDF] https://arxiv.org/pdf/1502.01852.pdf
[2] Initialization Of Deep Networks Case of Rectifiers
* [DeepGrid Article - Jefkine Kafunah] https://goo.gl/TBNw5t
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(2. / fan_in)
np.random.seed(random_seed)
return np.random.normal(loc = 0.0, scale = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class HeUniform(WeightInitializer):
"""
**He Normal (HeNormal)**
HeNormal is a robust initialization method that particularly considers the
rectifier nonlinearities. He uniform is an implementation based on Uniform
distribution
References:
[1] Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification
* [<NAME>, 2015] https://arxiv.org/abs/1502.01852
* [PDF] https://arxiv.org/pdf/1502.01852.pdf
[2] Initialization Of Deep Networks Case of Rectifiers
* [DeepGrid Article - Jefkine Kafunah] https://goo.gl/TBNw5t
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(6. / fan_in)
np.random.seed(random_seed)
return np.random.uniform(low = -scale, high = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class GlorotNormal(WeightInitializer):
"""
**Glorot Normal (GlorotNormal)**
GlorotNormal, more famously known as the Xavier initialization is based on
the effort to try mantain the same variance of the gradients of the weights
for all the layers. Glorot normal is an implementation based on Gaussian
distribution
References:
[1] Understanding the difficulty of training deep feedforward neural networks
* [<NAME>, 2010] http://proceedings.mlr.press/v9/glorot10a.html
* [PDF] http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf
[2] Initialization Of Deep Feedfoward Networks
* [DeepGrid Article - Jefkine Kafunah] https://goo.gl/E2XrGe
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(2. / (fan_in + fan_out))
np.random.seed(random_seed)
return np.random.normal(loc = 0.0, scale = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class GlorotUniform(WeightInitializer):
"""
**Glorot Uniform (GlorotUniform)**
GlorotUniform, more famously known as the Xavier initialization is based on
the effort to try mantain the same variance of the gradients of the weights
for all the layers. Glorot uniform is an implementation based on Uniform
distribution
References:
[1] Understanding the difficulty of training deep feedforward neural networks
* [<NAME>, 2010] http://proceedings.mlr.press/v9/glorot10a.html
* [PDF] http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf
[2] Initialization Of Deep Feedfoward Networks
* [DeepGrid Article - <NAME>afunah] https://goo.gl/E2XrGe
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(6. / (fan_in + fan_out))
np.random.seed(random_seed)
return np.random.uniform(low = -scale, high = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class LeCunUniform(WeightInitializer):
"""
**LeCun Uniform (LeCunUniform)**
Weights should be randomly chosen but in such a way that the sigmoid is
primarily activated in its linear region. LeCun uniform is an implementation
based on Uniform distribution
References:
[1] Efficient Backprop
* [LeCun, 1998][PDF] http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(3. / fan_in)
np.random.seed(random_seed)
return np.random.uniform(low = -scale, high = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class LeCunNormal(WeightInitializer):
"""
**LeCun Normal (LeCunNormal)**
Weights should be randomly chosen but in such a way that the sigmoid is
primarily activated in its linear region. LeCun uniform is an implementation
based on Gaussian distribution
References:
[1] Efficient Backprop
* [LeCun, 1998][PDF] http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(1. / fan_in)
np.random.seed(random_seed)
return np.random.normal(loc = -scale, scale = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class RandomUniform(WeightInitializer):
"""
**Random Uniform (RandomUniform)**
Random uniform, an implementation of weight initialization based on Uniform
distribution
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(1. / (fan_in + fan_out))
np.random.seed(random_seed)
return np.random.uniform(low = -scale, high = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class RandomNormal(WeightInitializer):
"""
**Random Normal (RandomNormal)**
Random uniform, an implementation of weight initialization based on Gaussian
distribution
"""
def weights(self, shape, random_seed):
fan_in, fan_out = self.compute_fans(shape)
scale = np.sqrt(1. / (fan_in + fan_out))
np.random.seed(random_seed)
return np.random.normal(loc = 0.0, scale = scale, size = shape)
@property
def init_name(self):
return self.__class__.__name__
class Zero(WeightInitializer):
"""
**Zero (Zero)**
Zero is an implementation of weight initialization that returns all zeros
"""
def weights(self, shape, random_seed):
return np.zeros(shape = shape)
@property
def init_name(self):
return self.__class__.__name__
class One(WeightInitializer):
"""
**One (One)**
One is an implementation of weight initialization that returns all ones
"""
def weights(self, shape, random_seed):
return np.ones(shape = shape)
@property
def init_name(self):
return self.__class__.__name__
class Identity(WeightInitializer):
"""
**Identity (Identity)**
Identity is an implementation of weight initialization that returns an
identity matrix of size shape
"""
def weights(self, shape, random_seed):
return np.eye(shape[0], shape[1], dtype = np.float32)
@property
def init_name(self):
return self.__class__.__name__
class InitializeWeights:
_methods = {
'ones' : One,
'zeros' : Zero,
'identity' : Identity,
'he_normal' : HeNormal,
'he_uniform' : HeUniform,
'lecun_normal' : LeCunNormal,
'lecun_uniform' : LeCunUniform,
'random_normal' : RandomNormal,
'glorot_normal' : GlorotNormal,
'random_uniform' : RandomUniform,
'glorot_uniform' : GlorotUniform
}
def __init__(self, name):
if name not in self._methods.keys():
raise Exception('Weight initialization method must be either one of the following: {}.'.format(', '.join(self._methods.keys())))
self.init_method = self._methods[name]()
@property
def name(self):
return self.init_method.init_name
def initialize_weights(self, shape, random_seed = None):
return self.init_method.weights(shape, random_seed)
```
#### File: zeta-learn/ztlearn/regularizers.py
```python
import numpy as np
# Note: careful as np.multiply does an elementwise multiply on numpy arrays
# asterisk (*) does the same but will perfom matrix multiplication on mat (numpy matrices)
class L1Regularization:
"""
**Lasso Regression (L1Regularization)**
L1Regularization adds sum of the absolute value magnitudes of parameters as
penalty term to the loss function
References:
[1] Regularization (mathematics)
* [Wikipedia Article] https://en.wikipedia.org/wiki/Regularization_(mathematics)
[2] Regression shrinkage and selection via the lasso
* [R Tibshirani, 1996] https://goo.gl/Yh9bBU
* [PDF] https://goo.gl/mQP5mA
[3] Feature selection, L1 vs. L2 regularization, and rotational invariance
* [<NAME>, ] [PDF] https://goo.gl/rbwNCt
Args:
_lambda (float32): controls the weight of the penalty term
"""
def __init__(self, _lambda, **kwargs):
self._lambda = _lambda
def regulate(self, weights):
return np.multiply(self._lambda, np.linalg.norm(weights))
def derivative(self, weights):
return np.multiply(self._lambda, np.sign(weights))
@property
def regulation_name(self):
return self.__class__.__name__
class L2Regularization:
"""
**Lasso Regression (L2Regularization)**
L1Regularization adds sum of the squared magnitudes of parameters as penalty
term to the loss function
References:
[1] Regularization (mathematics)
* [Wikipedia Article] https://en.wikipedia.org/wiki/Regularization_(mathematics)
[2] Regression shrinkage and selection via the lasso
* [R Tibshirani, 1996] https://goo.gl/Yh9bBU
* [PDF] https://goo.gl/mQP5mA
[3] Feature selection, L1 vs. L2 regularization, and rotational invariance
* [<NAME>. Ng, ] [PDF] https://goo.gl/rbwNCt
Args:
_lambda (float32): controls the weight of the penalty term
"""
def __init__(self, _lambda, **kwargs):
self._lambda = _lambda
def regulate(self, weights):
return np.multiply(self._lambda, (0.5 * weights.T.dot(weights)))
def derivative(self, weights):
return np.multiply(self._lambda, weights)
@property
def regulation_name(self):
return self.__class__.__name__
class ElasticNetRegularization:
"""
**Elastic Net Regularization (ElasticNetRegularization)**
ElasticNetRegularization adds both absolute value of magnitude and squared
magnitude of coefficient as penalty term to the loss function
References:
[1] Regularization (mathematics)
* [Wikipedia Article] https://en.wikipedia.org/wiki/Regularization_(mathematics)
Args:
_lambda (float32): controls the weight of the penalty term
l1_ratio (float32): controls the value l1 penalty as a ratio of total penalty added to the loss function
"""
def __init__(self, _lambda, l1_ratio):
self._lambda = _lambda
self.l1_ratio = l1_ratio
def regulate(self, weights):
return np.multiply(self._lambda, (((self.l1_ratio * 0.5) * weights.T.dot(weights)) + ((1 - self.l1_ratio) * np.linalg.norm(weights))))
def derivative(self, weights):
return np.multiply(self._lambda, (((self.l1_ratio * 0.5) * weights) + ((1 - self.l1_ratio) * np.sign(weights))))
@property
def regulation_name(self):
return self.__class__.__name__
class RegularizationFunction:
_regularizers = {
'l1' : L1Regularization,
'lasso' : L1Regularization,
'l2' : L2Regularization,
'ridge' : L2Regularization,
'elastic' : ElasticNetRegularization,
'elastic_net' : ElasticNetRegularization
}
def __init__(self, name = 'lasso', _lambda = 0.5, l1_ratio = 0.5):
if name not in self._regularizers.keys():
raise Exception('Regularization function must be either one of the following: {}.'.format(', '.join(self._regularizers.keys())))
self.regularization_func = self._regularizers[name](_lambda, l1_ratio = l1_ratio)
@property
def name(self):
return self.regularization_func.regularization_name
def regulate(self, weights):
return self.regularization_func.regulate(weights)
def derivative(self, weights):
return self.regularization_func.derivative(weights)
```
#### File: ztlearn/utils/conv_utils.py
```python
import math as mt
import numpy as np
def alt_get_output_dims(input_height, input_width, kernel_size, strides, pad_height, pad_width):
"""
FORMULA: [((W - Kernel_W + 2P) / S_W) + 1] and [((H - Kernel_H + 2P) / S_H) + 1]
FORMULA: [((W - Pool_W + 2P) / S_W) + 1] and [((H - Pool_H + 2P) / S_H) + 1]
"""
output_height = ((input_height - kernel_size[0] + np.sum(pad_height)) / strides[0]) + 1
output_width = ((input_width - kernel_size[1] + np.sum(pad_width)) / strides[1]) + 1
return output_height, output_width
def get_output_dims(input_height, input_width, kernel_size, strides, padding_type = 'valid'):
"""
**SAME and VALID Padding**
VALID: No padding is applied. Assume that all dimensions are valid so that input image
gets fully covered by filter and stride you specified.
SAME: Padding is applied to input (if needed) so that input image gets fully covered by
filter and stride you specified. For stride 1, this will ensure that output image
size is same as input.
References:
[1] SAME and VALID Padding: http://bit.ly/2MtGgBM
"""
if padding_type == 'same':
output_height = mt.ceil(float(input_height) / float(strides[0]))
output_width = mt.ceil(float(input_width) / float(strides[1]))
if padding_type == 'valid':
output_height = mt.ceil(float(input_height - kernel_size[0] + 1) / float(strides[0]))
output_width = mt.ceil(float(input_width - kernel_size[1] + 1) / float(strides[1]))
return output_height, output_width
# unroll for toeplitz
def unroll_inputs(padded_inputs,
batch_num,
filter_num,
output_height,
output_width,
kernel_size):
unrolled_inputs = np.zeros((batch_num,
filter_num,
output_height * output_width,
kernel_size**2))
offset = 0
for h in np.arange(output_height): # output height
for w in np.arange(output_width): # output width
for b in np.arange(batch_num): # batch number
for f in np.arange(filter_num): # filter number
unrolled_inputs[b, f, offset, :] = padded_inputs[b,
f,
h:h+kernel_size,
w:w+kernel_size].flatten()
offset += 1
return unrolled_inputs.reshape(filter_num * kernel_size**2, -1)
```
#### File: ztlearn/utils/sequence_utils.py
```python
import numpy as np
from .data_utils import one_hot
#-----------------------------------------------------------------------------#
# GENERATE SYNTHETIC SEQUENCES DATA #
#-----------------------------------------------------------------------------#
def gen_mult_sequence_xtyt(nums, cols = 10, factor = 10, tensor_dtype = np.int):
assert factor >= cols, 'factor should be more than or equal to cols'
lookup = cols * factor
x = np.zeros([nums, cols, lookup], dtype = tensor_dtype)
y = np.zeros([nums, cols, lookup], dtype = tensor_dtype)
for i in range(nums):
start = np.random.randint(1, cols)
seq = np.arange(start, (start*cols)+1, start)
x[i] = one_hot(seq, lookup)
y[i] = np.roll(x[i], -1, axis=0)
y[:, -1, 1] = 1
return x, y, lookup
def gen_mult_sequence_xtym(nums, cols = 10, factor = 10, tensor_dtype = np.int):
assert factor >= cols, 'factor should be more than or equal to cols'
lookup = cols * factor
cols_p = cols - 1
x = np.zeros([nums, cols, lookup], dtype = tensor_dtype)
x_p = np.zeros([nums, cols_p, lookup], dtype = tensor_dtype)
y = np.zeros([nums, lookup], dtype = np.int)
for i in range(nums):
start = np.random.randint(1, cols)
seq = np.arange(start, (start*cols)+1, start)
x[i] = one_hot(seq, lookup)
x_p[i] = x[i,:-1,:]
y[i] = x[i,cols_p,:]
return x_p, y, lookup
```
|
{
"source": "jeflucas/netflixSommelier",
"score": 2
}
|
#### File: netflixSommelier/website/auth.py
```python
from enum import EnumMeta
from flask import Blueprint, render_template, request, flash, redirect, url_for
from sqlalchemy.sql.functions import user
from .models import User
from werkzeug.security import generate_password_hash, check_password_hash
from . import db
from flask_login import login_user, login_required, logout_user, current_user
auth = Blueprint('auth', __name__)
@auth.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
email = request.form.get('email')
password = request.form.get('password')
user = User.query.filter_by(email=email).first()
if user:
if check_password_hash(user.password, password):
flash('Logged in sucessfully!', category='success')
login_user(user, remember=True)
return redirect(url_for('views.home'))
else:
flash('Incorrect password, try again!', category='error')
else:
flash('E-mail does not exist', category='error')
return render_template("login.html", user=current_user)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('auth.login'))
@auth.route("/sign-up", methods=['GET', 'POST'])
def sign_up():
if request.method == 'POST':
email = request.form.get('email')
first_name = request.form.get('firstName')
password1 = request.form.get('<PASSWORD>')
password2 = request.form.get('<PASSWORD>')
user = User.query.filter_by(email=email).first()
if user:
flash('Email already exists', category='error')
elif len(email) < 4:
flash("Email must be greater than 4 characters", category='error')
pass
elif len(first_name) < 2:
flash("First name must be greater than 3 characters", category='error')
pass
elif password1 != password2:
flash("Password don\'t match", category='error')
pass
elif len(password1) < 6:
flash("Password must be at least 6 characters", category='error')
pass
else:
new_user = User(email=email, first_name=first_name, password=generate_password_hash(<PASSWORD>, method='sha256'))
db.session.add(new_user)
db.session.commit()
login_user(user, remeber=True)
flash("Account created!", category='success')
return redirect(url_for('views.home'))
pass
return render_template("sign_up.html", user=current_user)
```
|
{
"source": "JEFMX/Codigo-Morse",
"score": 3
}
|
#### File: JEFMX/Codigo-Morse/ObenedorDeEntrada.py
```python
class ObtenedorDeEntrada:
def getEntrada(self):
f = open ('Entrada.txt','r')
entrada = f.read()
print(entrada)
f.close()
return entrada
#input("Introduce el texto\n")
```
|
{
"source": "jef-n/qwc-db-auth",
"score": 3
}
|
#### File: jef-n/qwc-db-auth/forms.py
```python
import re
from flask_wtf import FlaskForm
from wtforms import BooleanField, HiddenField, PasswordField, PasswordField, \
StringField, SubmitField, ValidationError
from wtforms.validators import DataRequired, Email, EqualTo, Length, Optional
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Sign In')
class VerifyForm(FlaskForm):
token = StringField('Verification code', validators=[DataRequired()])
class NewPasswordForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
class EditPasswordForm(FlaskForm):
reset_password_token = HiddenField(validators=[Optional()])
url = HiddenField(validators=[Optional()])
password = PasswordField(
'<PASSWORD>'
)
password_confirmation = PasswordField(
'<PASSWORD>',
validators=[DataRequired(), EqualTo('password')]
)
def __init__(self, min_length, max_length, constraints, min_constraints,
constraints_message, **kwargs):
"""Constructor
:param int min_length: Min password length (-1 for none)
:param int max_length: Max password length (-1 for none)
:param list(str) constraints: List of custom constraints as RegEx
:param int min_constraints: Min number of constraints to meet
:param constraints_message: Message if constraints are not met
"""
# set dynamic validators for password field
validators = [
DataRequired(),
Length(min=min_length, max=max_length)
]
self.password.kwargs['validators'] = validators
# store constraints
self.constraints = constraints
self.min_constraints = min_constraints
self.constraints_message = constraints_message
super(EditPasswordForm, self).__init__(**kwargs)
def validate_password(self, field):
"""Validate password constraints.
:param Field field: Password field
"""
# count number of validated constraints
constraints_met = 0
for constraint in self.constraints:
if re.search(constraint, field.data):
# constraint validated
constraints_met += 1
if constraints_met < self.min_constraints:
raise ValidationError(self.constraints_message)
```
|
{
"source": "jeford/mdprop",
"score": 2
}
|
#### File: examples/harmonic_oscillator/1D_HO_animate.py
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import mdprop
from mdprop.units import K_TO_AU
# Set parameters
kT = 0.1
nparticle = 5
dim = 1
k = 1.0
r0 = 0.0
dt = 0.05
# Initialize particles in 1D
np.random.seed(1337)
X = np.random.uniform(-1.5, 1.5, (nparticle, dim))
masses = np.ones((nparticle, 1))
V = mdprop.init.boltzmann(kT, masses, dim)
state = {
'X': X,
'V': V,
'masses': masses,
}
# Construct harmonic oscillator potential
bound = mdprop.potential.SoftSphere(r0, k)
# Construct integrator
vel_update = mdprop.update.VelocityUpdate(bound, masses)
integ = mdprop.integrator.VelocityVerlet(vel_update)
print(integ)
# Initialize plots
xlim = [-2.0, 2.0]
ylim = [-0.2, 3.8]
fig = plt.figure()
fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
# Left plot holds x coordinate and potential energy
ax = fig.add_subplot(121, aspect='equal', autoscale_on=True,
xlim=xlim, ylim=ylim)
ax2 = fig.add_subplot(121, aspect='equal', autoscale_on=True,
xlim=xlim, ylim=ylim)
ax.set_title('Harmonic Oscillator PE')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax2.set_xlim(xlim)
ax2.set_ylim(ylim)
# Right plot holds phase space diagram
ax3 = fig.add_subplot(122, aspect='equal', autoscale_on=False,
xlim=xlim, ylim=xlim)
ax4 = fig.add_subplot(122, aspect='equal', autoscale_on=False,
xlim=xlim, ylim=xlim)
ax3.set_title('Harmonic Oscillator Phase Space')
ax3.set_xlim(xlim)
ax3.set_ylim(xlim)
ax4.set_xlim(xlim)
ax4.set_ylim(xlim)
# Plot PES
xs = np.linspace(xlim[0], xlim[1])[:, None]
pes = bound.energy_per_particle(xs)
pe = ax2.plot(xs, pes, 'k', ls='-')
# Plot analytic answer of phase space path
circles = [plt.Circle((0, 0), 0.5*i, color='k', fill=False) for i in range(5)]
for c in circles:
ax4.add_artist(c)
# Initialize variables for holding data
particles, = ax.plot([], [], 'bo', ms=6)
phasespace, = ax3.plot([], [], 'bo', ms=6)
# Methods to initialize and update animation
def init():
global particles, phasepace, state
particles.set_data(state['X'][:, 0], bound.energy_per_particle(state['X'], masses=masses))
phasespace.set_data(state['X'][:, 0], state['V'][:, 0])
return particles, phasespace
def animate(i):
global particles, phasespace, integ, state
state = integ.step(dt, state)
particles.set_data(state['X'][:, 0], bound.energy_per_particle(state['X'], masses=masses))
phasespace.set_data(state['X'][:, 0], state['V'][:, 0])
return particles, phasespace
anim = animation.FuncAnimation(fig, animate,
frames=200, interval=1, blit=True)
plt.tight_layout()
plt.show()
```
#### File: examples/joukowsky/shadow.py
```python
import autograd.numpy as np
import autograd as ag
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import mdprop
def _compute_energy(Xi):
r = np.linalg.norm(Xi)
return 5.0 * r + 1.0 / r
#def _compute_energy(Xi):
# return Xi**2
_compute_gradient = ag.grad(_compute_energy)
_compute_hessian = ag.grad(_compute_gradient)
class Joukowsky(mdprop.potential.Potential):
def __init__(self):
pass
def compute_energy_per_particle(self, X, **state):
Es = np.zeros((len(X), ))
for i, Xi in enumerate(X):
Es[i] = _compute_energy(Xi)
return Es
def compute_energy(self, X, **state):
return np.sum(self.compute_energy_per_particle(X, **state))
def compute_gradient(self, X, **state):
V = self.compute_energy(X, **state)
Gs = np.zeros_like(X)
for i, Xi in enumerate(X):
Gs[i] = _compute_gradient(Xi)
return V, Gs
def compute_hessian(self, X, **state):
V = self.compute_energy(X, **state)
Gs = np.zeros_like(X)
Hs = np.zeros_like(X)
for i, Xi in enumerate(X):
Gs[i] = _compute_gradient(Xi)
Hs[i] = _compute_hessian(Xi)
return V, Gs, Hs
pot = Joukowsky()
x = np.linspace(-2.0, -0.25, 100)
#x = np.linspace(-2.0, 2.0, 100)
u = pot.compute_energy_per_particle(x)
_, g, h = pot.compute_hessian(x)
print(x.shape)
print(u.shape)
print(g.shape)
print(h.shape)
e0 = u[0]
v = -np.sqrt(np.abs(u - e0))*np.sign(x)
l0 = (g*v)
lg = g**2
lh = v**2 * h
lt = np.abs(0.5 * lg - lh)
l0m = np.max(np.abs(l0))
lgm = np.max(np.abs(lg))
lhm = np.max(np.abs(lh))
ltm = np.max(np.abs(lt))
l0n = l0 / np.max(np.abs(l0))
lgn = lg / np.max(np.abs(lg))
lhn = lh / np.max(np.abs(lh))
ltn = lt / np.max(np.abs(lt))
def subplot(x, y, z, labely, labelz, filename):
fig, ax1 = plt.subplots()
ax1.plot(x, y, color='tab:blue', label=labely)
ax1.set_ylabel(labely, color='tab:blue')
ax2 = ax1.twinx()
ax2.plot(x, z, color='tab:red', label=labelz)
ax2.set_ylabel(labelz, color='tab:red')
plt.tight_layout()
plt.savefig(filename)
subplot(x, u, l0, 'Pot', 'g^T v', 'l0.eps')
subplot(x, u, lg, 'Pot', 'g^T g', 'lg.eps')
subplot(x, u, lh, 'Pot', 'v^T H v', 'lh.eps')
subplot(x, u, lt, 'Pot', '0.5 g^T g - v^T H v', 'ldiff.eps')
#plt.figure()
##plt.plot(x, u, label='potential')
##plt.plot(x, g, label='gradient')
##plt.plot(x, h, label='hessian')
#plt.plot(x, l0n, label=)
#plt.plot(x, lgn, label='lg / %f' % lgm)
#plt.plot(x, lhn, label='lh / %f' % lhm)
#plt.plot(x, ltn, label='lt / %f' % ltm)
#plt.legend()
#plt.tight_layout()
#plt.savefig("shadow.eps")
```
#### File: examples/morse/morse_interatomic_spring.py
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import mdprop
# Set parameters
kT = 0.1
dim = 2
nparticle = 8
dt = 0.04
# Initiatize ideal gas in 2D
#X = np.random.uniform(-1.5, 1.5, (nparticle, dim))
X = np.random.normal(0.0, 0.25, (nparticle, dim))
masses = np.ones((nparticle, 1))
V = mdprop.init.boltzmann(kT, masses, dim)
X = mdprop.init.center(X, masses)
V = mdprop.init.center(V, masses)
state = {
'X': X,
'V': V,
'masses': masses,
'PE': 0.0,
'aux_momentum_CN': np.zeros((2, 1*nparticle)),
'simulation_time': 0.00
}
# Construct integrator for Morse fluid
morse = mdprop.potential.Morse(1.0, 0.1, 0.5)
morse_update = mdprop.update.VelocityUpdate(morse.compute_forces)
integ = mdprop.integrator.VelocityVerlet(morse_update)
# Insert interatomic spring force
spring = mdprop.potential.InteratomicSpring(0, 1, 1.5, 0.5, True)
spring_update= mdprop.update.GeneralVelocityUpdate(spring.compute_forces)
td_spring_update = mdprop.update.TimeDependent(spring_update, 0.25, 1.25, 3.0)
integ = integ.compose_into(mdprop.integrator.OneStepIntegrator(td_spring_update), 1.0, 0.5)
# Uncomment for harmonic boundary
#bound = mdprop.potential.SoftSphere(0.0, 0.25)
#bound_update = mdprop.update.GeneralVelocityUpdate(bound.compute_forces)
#integ = integ.compose_into(mdprop.integrator.OneStepIntegrator(bound_update), 1.0, 0.5)
# Uncomment for thermostat
noise = mdprop.update.WhiteNoise(kT, 0.2)
#noise = mdprop.update.ColoredNoise(kT)
integ = integ.compose_into(mdprop.integrator.OneStepIntegrator(noise), 1.0, 0.5)
integ = integ.squash()
print(integ)
# Initialize plots
xlim = [-2.0, 2.0]
ylim = [-2.0, 2.0]
fig = plt.figure()
#fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
# Plot holds x,y coordinates
ax = fig.add_subplot(111, aspect='equal', autoscale_on=True,
xlim=xlim, ylim=ylim)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# Initialize variables for holding data
particles, = ax.plot([], [], 'bo', ms=6)
# Methods to initialize and update animation
def init():
global particles, state
particles.set_data(state['X'][:, 0], state['X'][:, 1])
#PE = morse.compute_energy(state['X'])
#KE = mdprop.hook.kinetic_energy.compute(state)['kinetic_energy']
#TE = KE + PE
return particles,
def animate(i):
global particles, integ, state
state = integ.step(dt, state)
state['simulation_time'] = state['simulation_time'] + dt
particles.set_data(state['X'][:, 0], state['X'][:, 1])
#PE = morse.compute_energy(state['X'])
#KE = mdprop.hook.kinetic_energy.compute(state)['kinetic_energy']
#TE = KE + PE
#print(TE)
return particles,
anim = animation.FuncAnimation(fig, animate,
frames=10, interval=1, blit=False)
plt.tight_layout()
plt.show()
```
#### File: mdprop/mdprop/io.py
```python
import numpy as np
import pickle as pkl
import json
from . import units
def read_xyz(filename, unit_conversion=units.ANGSTROM_TO_AU):
"""
Read xyz file of atomic coordinates.
Args:
filename: xyz file to read coordinates from.
"""
X = []
symbols = []
with open(filename, 'r') as fin:
lines = fin.readlines()
natom = int(lines[0])
for l in lines[2:2+natom]:
sp = l.split()
symbols.append(sp[0])
X.append([float(s) for s in sp[1:]])
#X.append((float(sp[1]), float(sp[2]), float(sp[3])))
X = np.array(X) * unit_conversion
return X, np.array(symbols)
def read_traj_xyz(filename, unit_conversion=units.ANGSTROM_TO_AU, spaced=False):
"""
Read xyz, vel trajectory file.
Args:
filename: xyz file to read coordinates from.
unit_conversion: multiplicative factor to change units of file
spaced: True if there is a space between frames, otherwise false
Returns:
X, symbols: list of frames of coordinates, and list of atomic symbols
"""
X = []
currX = []
symbols = []
natom = 0
frame = -1
space = int(spaced)
# Extract data about all frames
with open(filename, 'r') as fin:
natom = int(fin.readline())
fin.readline() # skip second line
for i in range(natom):
symbols.append(fin.readline().split()[0])
# Extract coordinates
with open(filename, 'r') as fin:
for i, line in enumerate(fin):
mod = i%(2+natom+space)
if mod==0:
if currX:
X.append(np.array(currX) * unit_conversion)
currX = []
frame += 1
# We don't care about second lines of frame
elif mod==1:
pass
# Read in lines
elif mod>=2 and mod<2+natom:
sp = line.split()
currX.extend(list(map(float, sp[1:])))
# Get final frame
if currX:
X.append(np.array(currX) * unit_conversion)
return X, symbols
def read_xyz_bagel_input(filename, unit_conversion=1.0):
"""
Read coordinates from BAGEL input json file.
Args:
filename: BAGEL input file to read coordinates from.
unit_conversion: conversion factor of units, default 1.0 (a.u.)
"""
with open(filename, 'r') as fin:
dct = json.load(fin)
S = [d['atom'] for d in dct['bagel'][0]['geometry']]
X = np.array([d['xyz'] for d in dct['bagel'][0]['geometry']])
return X, S
def read_xyz_bagel_output(filename, unit_conversion=1.0):
"""
Read coordinates from BAGEL output file, returns all frames found
Args:
filename: BAGEL output file to read coordinates from.
unit_conversion: conversion factor of units, default 1.0 (a.u.)
Returns:
Xs: list of (natom, 3) np.arrays of coordinates
S: list of atomic symbols
"""
Xs = []
S = []
ibegin = -2
nframes = 0
read = False
with open(filename, 'r') as fin:
for i, line in enumerate(fin):
if "*** Geometry ***" in line:
ibegin = i
read = True
nframes += 1
Xcurr = []
elif i == ibegin + 1:
continue
elif read and "atom" in line:
sp = line.split()
if nframes == 1:
S.append(sp[3][1:-2])
Xcurr.append([sp[7][:-1], sp[8][:-1], sp[9][:-1]])
elif read:
Xs.append(np.array(Xcurr, dtype=np.float64))
read = False
return Xs, S
def save_xyz_filehandle(X, symbols, handle, text="", format_str="% .11E"):
"""
Write coordinates and symbols to given file handle.
Args:
X: coordinates (can also append velocities or whatever)
symbols: atomic symbols
handle: file handle to write to
text: Helper text in second line
format_str: formatting for coordinates to write
"""
Xsh = np.shape(X)
handle.write("%d\n" % Xsh[0])
handle.write(text)
handle.write("\n")
for i in range(Xsh[0]):
handle.write(symbols[i])
for j in range(Xsh[1]):
handle.write("\t")
handle.write(format_str % X[i, j])
handle.write("\n")
def save_xyz(X, symbols, filename, unit_conversion=1.0/units.ANGSTROM_TO_AU, text="", format_str="% .11E"):
"""
Write coordinates and symbols to given filename.
Args:
X: coordinates (can also append velocities or whatever)
symbols: atomic symbols
filename: file name to write to
unit_conversion: unit conversion of coordinates (defaults to converting AU to Angstrom
text: Helper text in second line
format_str: formatting for coordinates to write
"""
with open(filename, 'w') as fout:
Xc = np.array(X) * unit_conversion
save_xyz_filehandle(Xc, symbols, fout, text, format_str)
def save_xyz_bagel(X, symbols, filename, unit_conversion=1.0, format_str="% .11E"):
"""
Write coordinates and symbols to given filename in BAGEL's json syntax in atomic units
Args:
X: coordinates (can also append velocities or whatever)
symbols: atomic symbols
filename: file name to write to
unit_conversion: unit conversion of coordinates (defaults to converting AU to Angstrom
text: Helper text in second line
format_str: formatting for coordinates to write
"""
# Construct dict
Xc = np.array(X) * unit_conversion
with open(filename, 'w') as fout:
fout.write('\t{ "geometry" : [\n')
for i, s in enumerate(symbols):
fout.write(('\t\t{ "atom": "%s" , "xyz": [ ' + format_str + ', ' + format_str + ', ' + format_str + ' ] },\n') % (s, Xc[i, 0], Xc[i, 1], Xc[i, 2]))
fout.write('\t] },')
def save_traj_xyz(Xs, symbols, filename, texts=None, unit_conversion=1.0/units.ANGSTROM_TO_AU, format_str="% .11E"):
"""
Write coordinates and symbols to given filename for trajectory.
Args:
Xs: list of X coordinates to save
symbols: atomic symbols
filename: file name to write to
texts: lines of text to print between atom number and symbols/coordinates
unit_conversion: unit conversion of coordinates (defaults to converting AU to Angstrom
format_str: formatting for coordinates to write
"""
if texts is None:
texts = ["Frame %d"%i for i in range(len(Xs))]
with open(filename, 'w') as fout:
for i, X in enumerate(Xs):
Xc = X * unit_conversion
save_xyz_filehandle(Xc, symbols, fout, texts[i], format_str)
def save_traj_hist(history, filename, texts=None, unit_conversion=1.0/units.ANGSTROM_TO_AU, format_str="% .11E"):
"""
Write coordinates and symbols to given filename for trajectory.
Args:
history: list of state dicts
filename: file name to write to
texts: lines of text to print between atom number and symbols/coordinates
unit_conversion: unit conversion of coordinates (defaults to converting AU to Angstrom
format_str: formatting for coordinates to write
"""
if history[0].get('symbols', None) is None:
raise ValueError("State key 'symbols' required to output xyz.")
symbols = history[0]['symbols']
if texts is None:
texts = ["Frame %d, time %f" % (i, frame.get('simulation_time', 0.0)) for i, frame in enumerate(history)]
Xs = [frame['X'] for frame in history]
save_traj_xyz(Xs, symbols, filename, texts, unit_conversion, format_str)
def save_traj_hist_pkl(history, filename):
with open(filename, 'wb') as fout:
pkl.dump(history, fout, protocol=2)
def load_traj_hist_pkl(filename):
with open(filename, 'rb') as fin:
history = pkl.load(fin)
return history
```
#### File: mdprop/mdprop/potential.py
```python
import numpy as np
from . import utils
"""
potential.py contains several harmonic boundary potentials, such as soft
spherical boundaries (SoftSphere) or soft hyperplane boundaries (SoftHalfSpace).
It also contains simple interparticle potentials such as InteratomicSpring and
Morse. Lastly it contains model potentials for optimization or dynamics such as
the Rosenbrock function and the HenonHeiles model.
All potentials have a compute_energy and compute_gradient function, which takes in a geometry X.
Many model potentials also have a compute_energy_per_particle method which returns an (n, ) array containing each particle's energy.
Model potentials taking in a magnitude parameter can generally use a vector of magnitudes for cases when perhaps mass-dependent magnitudes are desired.
All methods expect an X argument which has the shape (n, k) where n is the
number of particles, and k is the dimension in space each particle lives,
generally 1 to 3.
"""
class Potential(object):
"""
Base class for simple potentials and the forces they exert on dynamic
simulations. Each potential object has defined compute_energy(X) and
compute_gradient(X); compute_force(X) is automatically defined in this base
class. Certain per-particle potentials also have
compute_energy_per_particle(X) to return componentwise potential energies.
Cached versions of compute_quantity() are simply named 'quantity()'
"""
def __init__(self):
if self.compute_energy is not Potential.compute_energy:
self.energy = utils.np_cache(cache_size=4, arg_ind=0)(self.compute_energy)
if self.compute_energy_per_particle is not Potential.compute_energy_per_particle:
self.energy_per_particle = utils.np_cache(cache_size=4, arg_ind=0)(self.compute_energy_per_particle)
if self.compute_gradient is not Potential.compute_gradient:
self.gradient = utils.np_cache(cache_size=4, arg_ind=0)(self.compute_gradient)
self.hessian = utils.np_cache(cache_size=4, arg_ind=0)(self.compute_hessian)
def compute_energy(self, *args, **kwargs):
"""
Compute the energy corresponding to the potential
Args:
X ({nparticle, ndim} ndarray): coordinates
*args
**kwargs
Returns:
(float): energy corresponding to configuration
"""
raise NotImplementedError
def compute_energy_per_particle(self, *args, **kwargs):
"""
Compute the energy per particle corresponding to the potential
Args:
X ({nparticle, ndim} ndarray): coordinates
*args
**kwargs
Returns:
({nparticle,} ndarray): energies
"""
raise NotImplementedError
def compute_gradient(self, *args, **kwargs):
"""
Compute the gradient of the potential
Args:
X ({nparticle, ndim} ndarray): coordinates
*args
**kwargs
Returns:
(float, {nparticle, ndim} ndarray): energy, gradient tuple
"""
raise NotImplementedError
def compute_force(self, *args, **kwargs):
"""
Compute the force of the potential
Args:
X ({nparticle, ndim} ndarray): coordinates
*args
**kwargs
Returns:
(float, {nparticle, ndim} ndarray): energy, force tuple
"""
pe, g = self.compute_gradient(*args, **kwargs)
return pe, -g
def compute_hessian(self, *args, **kwargs):
"""
Compute the hessian of the potential, defaults to numerical hessian but can be overwritten
Args:
X ({nparticle, ndim} ndarray): coordinates
*args
**kwargs
Returns:
{nparticle*ndim, nparticle*ndim} ndarray: hessian matrix
"""
X = args[0]
if len(args) > 1:
H = utils.numerical_gradient(X, self.gradient, eps=1.E-5, output_ind=1, *(args[1:]), **kwargs)
else:
H = utils.numerical_gradient(X, self.gradient, eps=1.E-5, output_ind=1, **kwargs)
H_r = np.reshape(H, (np.size(X), np.size(X)))
H_sym = 0.5 * (H_r + np.transpose(H_r))
return H_sym
def compute_hessian_vector_product(self, *args, **kwargs):
"""
Compute the hessian vector product, equivalent to a symmetrized directional derivative of the gradient
Args:
X ({nparticle, ndim} ndarray): coordinates
V ({nparticle, ndim} ndarray): direction in which to evaluate change of gradient
*args
**kwargs
Returns:
{nparticle, ndim} ndarray: hessian vector product
"""
X = args[0]
V = args[1]
eps = 1E-5
if len(args) > 2:
Hv = 0.5 / eps * (self.gradient(X + eps * V, *(args[2:]), **kwargs) - self.gradient(X - eps * V, *(args[2:]), **kwargs))
else:
Hv = 0.5 / eps * (self.gradient(X + eps * V, **kwargs) - self.gradient(X - eps * V, **kwargs))
return Hv
def force(self, *args, **kwargs):
"""
Compute the force of the potential, includes caching
Args:
X ({nparticle, ndim} ndarray): coordinates
*args
**kwargs
Returns:
(float, {nparticle, ndim} ndarray): energy, force tuple
"""
pe, g = self.gradient(*args, **kwargs)
return pe, -g
def add(self, other):
"""
Construct a PotentialList object from two potentials
Args:
pot1 (Potential): first potential
pot2 (Potential): second potential
Returns:
(PotentialList): combination of two potentials with unit coefficients
"""
if isinstance(self, PotentialList):
potentials = [p for p in self.potentials]
else:
potentials = [self]
if isinstance(other, PotentialList):
potentials += [p for p in other.potentials]
else:
potentials += [other]
return PotentialList(potentials)
@property
def name(self):
return self.__class__.__name__.lower()
class PotentialWrapper(Potential):
"""
Simple wrapper for energy/gradient/force functions so that they play nice with Update class
"""
def __init__(self, energy=None, energy_per_particle=None, gradient=None, force=None):
"""
Args:
energy (function): returns energy of given configuration
energy_per_particle (function): returns ({nparticle,} ndarray) of particle energies for given configuration
gradient (function): returns (energy, gradient) tuple corresponding to given configuration
force (function): returns (energy, force) tupe corresponding to given configuration
"""
if energy is not None:
self.compute_energy = energy
if energy_per_particle is not None:
self.compute_energy_per_particle = energy_per_particle
if gradient is not None:
self.compute_gradient = gradient
if force is not None:
self.compute_force = force
super(PotentialWrapper, self).__init__()
class PotentialList(Potential):
"""
Simple class to combine potentials into a single potential, for ease of use with a single Update
Args:
potentials (list of Potential objects):
energy_coeffs ({npotentials,} ndarray): coefficients with which to add potential energies together (default ones)
grad_coeffs ({npotentials,} ndarray): coefficients with which to add potential gradients together (default ones)
Note:
The difference in coefficients is useful when you only want to hold onto the energy corresponding to a certain potential but want to use the gradient for multiple.
"""
def __init__(self, potentials, energy_coeffs=None, grad_coeffs=None):
self.potentials = potentials
self.energy_coeffs = energy_coeffs or np.ones((len(potentials),))
self.grad_coeffs = grad_coeffs or np.ones((len(potentials),))
super(PotentialList, self).__init__()
def __getitem__(self, index):
return self.potentials[index]
def compute_energy(self, X, **state):
E = 0.0
for coeff, potential in zip(self.energy_coeffs, self.potentials):
E += coeff * potential.compute_energy(X, **state)
return E
def compute_gradient(self, X, **state):
E = 0.0
G = np.zeros_like(X)
for e_coeff, g_coeff, potential in zip(self.energy_coeffs, self.grad_coeffs, self.potentials):
Ecurr, Gcurr = potential.compute_gradient(X, **state)
E += e_coeff * Ecurr
G += g_coeff * Gcurr
return E, G
class SoftHalfSpace(Potential):
"""
Exerts linearly growing forces along the negative of the normal if the
particle position minus the offset has a positive projection along the
normal.
Args:
normal ({ndim,} ndarray): vector normal to the plane defining the halfspace
magnitude (float or {nparticle,} ndarray): Magnitude of applied force
offset ({ndim,} ndarray): vector of offset from origin to any point in hyperplane defining the halfspace
"""
def __init__(self,
normal,
magnitude=1.0,
offset=0.0,
):
self.normal = np.reshape(normal / np.linalg.norm(normal), (1, -1))
self.offset = np.reshape(offset, (1, -1))
self.magnitude = magnitude
super(SoftHalfSpace, self).__init__()
def compute_energy_per_particle(self, X, **state):
X_offset = X - self.offset
X_dot_normal = np.reshape(np.inner(X_offset, self.normal), (-1, 1))
violation = np.maximum(0.0, X_dot_normal)
pe = 0.5 * self.magnitude * violation**2
return pe
def compute_energy(self, X, **state):
pe = self.compute_energy_per_particle(X, **state)
return np.sum(pe)
def compute_gradient(self, X, **state):
X_offset = X - self.offset
X_dot_normal = np.reshape(np.inner(X_offset, self.normal), (-1, 1))
violation = np.maximum(0.0, X_dot_normal)
pe = 0.5 * self.magnitude * violation**2
grad = self.normal * self.magnitude * violation
pe = np.sum(pe)
return pe, grad
class SoftCube(Potential):
"""
Exerts linearly growing forces along each coordinate that is beyond the side length of the cube centered at the origin
Args:
bound (float): Side length of cube to use divided by 2; if abs(x[0, 0]) is greater than bound, then the force is exerted
magnitude (float or {nparticle,} ndarray): Magnitude of applied force
offset ({ndim,} ndarray): coordinates of center of cube
"""
def __init__(
self,
bound,
magnitude=1.0,
offset=0.0,
):
self.bound = bound
self.offset = np.reshape(offset, (1, -1))
self.magnitude = magnitude
super(SoftCube, self).__init__()
def compute_energy_per_particle(self, X, **state):
diff = np.abs(X - self.offset) - self.bound
pe = np.where(diff > 0.0, 0.5 * self.magnitude * diff**2, 0.0)
pe = np.sum(pe, axis=1)
return pe
def compute_energy(self, X, **state):
pe = self.compute_energy_per_particle(X, **state)
return np.sum(pe)
def compute_gradient(self, X, **state):
X_offset = X - self.offset
diff = np.abs(X_offset) - self.bound
pe = np.where(diff > 0.0, 0.5 * self.magnitude * diff**2, 0.0)
grad = np.where(np.abs(X_offset) > self.bound, np.sign(X_offset) * self.magnitude * diff, 0.0)
pe = np.sum(pe)
return pe, grad
class SoftSphere(Potential):
r"""
Exerts linearly growing force along vector of particles displacement from sphere's center
.. math:: V(x_i) = 0.5 * k * \mathrm{max}(0, ||x_i - x_0|| - r) ** 2
Args:
radius (float): radius of sphere to use; if norm(x[0, :]) is greater than radius, then the force is exerted
magnitude (float or {nparticle,} ndarray): Magnitude of applied force
offset ({ndim,} ndarray): origin of spherical potential
"""
def __init__(
self,
radius,
magnitude=1.0,
offset=0.0,
):
self.radius = radius
self.offset = np.reshape(offset, (1, -1))
self.magnitude = magnitude
super(SoftSphere, self).__init__()
def compute_energy_per_particle(self, X, **state):
X_offset = X - self.offset
dists = np.linalg.norm(X_offset, axis=1).reshape(-1, 1)
pe = 0.5 * self.magnitude * np.maximum(0.0, dists - self.radius)**2
return pe
def compute_energy(self, X, **state):
pe = self.compute_energy_per_particle(X, **state)
return np.sum(pe)
def compute_gradient(self, X, **state):
X_offset = X - self.offset
dists = np.linalg.norm(X_offset, axis=1).reshape(-1, 1)
violation = np.maximum(0.0, dists - self.radius)
pe = 0.5 * self.magnitude * violation**2
grad = X_offset / dists * self.magnitude * violation
pe = np.sum(pe)
return pe, grad
class InteratomicSpring(Potential):
"""
Soft equivalent of the PositionDriver Update, places harmonic well about
around the selected atoms bond length so that they move toward or away from
another, but still allows them to vibrate.
Args:
atom_ind1 (int): atom index of first atom to pull toward one another
atom_ind2 (int): atom index of second atom to pull toward one another
dist_stop (float): distance at which to stop pulling the atoms together
magnitude (float): Magnitude of applied force
interpolate (bool): Linearly move the wells based on time_frac if Update is TimeDependent
"""
def __init__(self,
atom_ind1,
atom_ind2,
dist_stop,
magnitude,
interpolate=False,
):
self.atom_ind1 = atom_ind1
self.atom_ind2 = atom_ind2
self.dist_start = None
self.dist_stop = dist_stop
self.magnitude = magnitude
self.interpolate = interpolate
self.time_frac = 1.0
super(InteratomicSpring, self).__init__()
def compute_energy(self, X, **state):
if self.interpolate:
# Restart movement cycle
if state['time_frac'] <= self.time_frac:
# Compute initial distance
self.dist_start = np.linalg.norm(X[self.atom_ind1, :] - X[self.atom_ind2, :])
self.time_frac = state['time_frac']
des_dist = (1.0 - self.time_frac) * self.dist_start + self.time_frac * self.dist_stop
else:
des_dist = self.dist_stop
vec = X[self.atom_ind2, :] - X[self.atom_ind1, :]
curr_dist = np.linalg.norm(vec)
diff = curr_dist - des_dist
pe = 0.5 * self.magnitude * diff**2
return pe
def compute_gradient(self, X, **state):
if self.interpolate:
# Restart movement cycle
if state['time_frac'] <= self.time_frac:
# Compute initial distance
self.dist_start = np.linalg.norm(X[self.atom_ind1, :] - X[self.atom_ind2, :])
self.time_frac = state['time_frac']
des_dist = (1.0 - self.time_frac) * self.dist_start + self.time_frac * self.dist_stop
else:
des_dist = self.dist_stop
vec = X[self.atom_ind2, :] - X[self.atom_ind1, :]
curr_dist = np.linalg.norm(vec)
vec /= curr_dist
diff = curr_dist - des_dist
pe = 0.5 * self.magnitude * diff**2
grad = np.zeros_like(X)
grad[self.atom_ind1, :] = -self.magnitude * diff * vec
grad[self.atom_ind2, :] = self.magnitude * diff * vec
return pe, grad
class InteratomicLinear(Potential):
"""
Linear potential V(r) = kr between two atoms. Useful for force modified PES given a negative magnitude.
Args:
atom_ind1 (int): atom index of first atom to pull toward one another
atom_ind2 (int): atom index of second atom to pull toward one another
magnitude (float): Magnitude of applied force
"""
def __init__(self,
atom_ind1,
atom_ind2,
magnitude,
):
self.atom_ind1 = atom_ind1
self.atom_ind2 = atom_ind2
self.magnitude = magnitude
super(InteratomicLinear, self).__init__()
def compute_energy(self, X, **state):
vec = X[self.atom_ind2, :] - X[self.atom_ind1, :]
curr_dist = np.linalg.norm(vec)
pe = self.magnitude * curr_dist
return pe
def compute_gradient(self, X, **state):
vec = X[self.atom_ind2, :] - X[self.atom_ind1, :]
curr_dist = np.linalg.norm(vec)
pe = self.magnitude * curr_dist
grad = np.zeros_like(X)
grad[self.atom_ind1, :] = -self.magnitude * vec / curr_dist
grad[self.atom_ind2, :] = self.magnitude * vec / curr_dist
return pe, grad
class ClassicalCoulomb(Potential):
"""
Pairwise additive potential of q1q2/r, charges will not be updated from initial values
Args:
q ({nparticle,} ndarray): charges of each particle
magnitude (float or {nparticle, nparticle} ndarray): magnitudes to scale the energy/gradient contributions
"""
def __init__(self, q, magnitude):
self.q = np.reshape(q, (-1, 1))
self.q2 = self.q * np.transpose(self.q)
self.magnitude = magnitude
super(ClassicalCoulomb, self).__init__()
def compute_energy_per_particle(self, X, **state):
dists = utils.pairwise_dist(X)
dists = np.where(dists <= 1E-12, np.inf, dists)
pairwise_energy = self.magnitude * self.q2 / dists
return 0.5 * np.sum(pairwise_energy, axis=1)
def compute_energy(self, X, **state):
return np.sum(self.compute_energy_per_particle(X, **state))
def compute_gradient(self, X, **state):
#return (self.compute_energy(X), utils.numerical_gradient(X, self.compute_energy))
nparticle = X.shape[0]
xyz_view = X[:, :, None]
xyz_tran = np.transpose(xyz_view, (2, 1, 0))
dist_vec = xyz_view - xyz_tran
dists = np.linalg.norm(dist_vec, axis=1)
dists = np.where(dists <= 1E-12, np.inf, dists)
inv_dists = 1.0/dists
pairwise_energy = self.magnitude * self.q2 * inv_dists
pe_particle = 0.5 * np.sum(pairwise_energy, axis=1)
pe_tot = np.sum(pe_particle)
grad_mag = np.reshape(pairwise_energy * inv_dists * inv_dists, (nparticle, 1, nparticle))
grad = -np.sum(dist_vec * grad_mag, axis=2)
return pe_tot, grad
class Harmonic(Potential):
r"""
Harmonic force given as Taylor expansion of energy.
.. math:: E(X) = E(X0) + (X-X0)^T \nabla E(X0) + 1/2 (X-X0)^T \nabla^2 E(X0) (X-X0)
Args:
X0 ({nparticle, ndim} ndarray): coordinates at which expansion is made
E0 (float): energy at X0
grad ({nparticle, ndim} ndarray): gradient at X0
hessian ({nparticle*ndim, nparticle*ndim} ndarray): hessian at X0
"""
def __init__(self, X0, E0=0.0, grad=None, hessian=None):
self.X0 = X0
self.E0 = E0
if grad is None:
self.grad = np.zeros_like(X0)
else:
self.grad = grad
if hessian is None:
self.hessian = np.zeros_like(np.outer(X0, X0))
else:
self.hessian = hessian
super(Harmonic, self).__init__()
def compute_energy(self, X, **state):
dX = np.reshape(X-self.X0, (-1, 1))
E = self.E0 + np.inner(self.grad.reshape((-1, 1)), dX) + 0.5 * np.dot(np.transpose(dX), np.dot(self.hessian, dX))
return E
def compute_gradient(self, X, **state):
dX = np.reshape(X-self.X0, (-1, 1))
hp = np.dot(self.hessian, dX)
E = self.E0 + np.sum(self.grad.reshape((-1, 1)) * dX) + 0.5 * np.sum(dX * hp)
G = self.grad + np.reshape(hp, np.shape(X))
return E, G
class Morse(Potential):
r"""
Morse inter particle potential function.
Can be given a single parameters to treat all pairs, or matrix of parameters
for unique pairwise interactions.
.. math:: E_{ij} = D_{ij} * ((1 - \exp(-a_{ij} * (r_{ij} - r_{ij}^{eq})))^2 - 1)
Args:
D (float or {nparticle, nparticle} ndarray): dissociation energy parameter
a (float or {nparticle, nparticle} ndarray): exponential constant / width of well
r_equil (float or {nparticle, nparticle} ndarray): equilibrium distance
"""
def __init__(self, D, a, r_equil):
self.D = D
self.a = a
self.r_equil = r_equil
super(Morse, self).__init__()
def compute_energy_per_particle(self, X, **state):
dists = utils.pairwise_dist(X)
pe_contrib = self.D * ((1.0 - np.exp(-self.a * (dists - self.r_equil)))**2 - 1.0)
np.fill_diagonal(pe_contrib, 0.0)
pe_particle = 0.5 * np.sum(pe_contrib, axis=1)
return pe_particle
def compute_energy(self, X, **state):
pe_particle = self.compute_energy_per_particle(X, **state)
pe_tot = np.sum(pe_particle)
return pe_tot
def compute_gradient(self, X, **state):
nparticle = X.shape[0]
xyz_view = X[:, :, None]
xyz_tran = np.transpose(xyz_view, (2, 1, 0))
dist_vec = xyz_view - xyz_tran
dists = np.linalg.norm(dist_vec, axis=1)
with np.errstate(divide='ignore'):
inv_dist = np.reshape(np.where(dists > 0.0, 1.0/dists, 0.0), (nparticle, 1, nparticle))
exp_term = np.exp( - self.a * (dists - self.r_equil))
pe_contrib = self.D * ((1.0 - exp_term)**2 - 1.0)
np.fill_diagonal(pe_contrib, 0.0)
pe_particle = 0.5 * np.sum(pe_contrib, axis=1)
pe_tot = np.sum(pe_particle)
grad_mag = np.reshape(self.D * self.a * ( exp_term - exp_term**2 ), (nparticle, 1, nparticle))
grad = 2.0 * np.sum(dist_vec * inv_dist * grad_mag, axis=2)
return pe_tot, grad
class Kepler(Potential):
r"""
Planetary model with inverse distance potential from the origin.
.. math:: E = k / ||r||
Args:
magnitude (float): magnitude of applied force
"""
def __init__(self, magnitude):
self.magnitude = magnitude
super(Kepler, self).__init__()
def compute_energy_per_particle(self, X, **state):
return -self.magnitude / np.linalg.norm(X, axis=-1)
def compute_energy(self, X, **state):
return np.sum(self.compute_energy_per_particle(X, **state))
def compute_gradient(self, X, **state):
V = -self.magnitude / np.linalg.norm(X, axis=-1)
G = self.magnitude * X / np.linalg.norm(X, axis=-1, keepdims=True)**3
return V, G
@staticmethod
def init_cond(eccentricity):
q = np.array([[1.0 - eccentricity, 0.0]])
p = np.array([[0.0, np.sqrt((1.0 + eccentricity) / (1.0 - eccentricity))]])
return q, p
class Rosenbrock(Potential):
r"""
Classic model for non-convex optimization functions.
.. math:: f(x,y) = (a-x)^2 + b(y - x^2)^2
References:
https://en.wikipedia.org/wiki/Rosenbrock_function
Args:
a (float): a in equation
b (float): b in equation
"""
def __init__(
self,
a=1.0,
b=100.0,
):
self.a = a
self.b = b
super(Rosenbrock, self).__init__()
def compute_energy_per_particle(self, X, **state):
if X.shape[1] != 2:
raise ValueError("Shape for Rosenbrock function must be N x 2")
pe = (self.a - X[:, 0])**2 + self.b*(X[:, 1] - X[:, 0]**2)**2
return pe
def compute_energy(self, X, **state):
return np.sum(self.compute_energy_per_particle(X, **state))
def compute_gradient(self, X, **state):
if X.shape[1] != 2:
raise ValueError("Shape for Rosenbrock function must be N x 2")
grad = np.empty_like(X)
ymx2 = X[:, 1] - X[:, 0]**2
pe = (self.a - X[:, 0])**2 + self.b*ymx2**2
grad[:, 0] = 2.0*(X[:, 0] - self.a) - 4.0*self.b*X[:, 0]*ymx2
grad[:, 1] = 2.0*self.b*ymx2
return pe, grad
class HeinonHeiles(Potential):
r"""
Chaotic system model
.. math:: V(x, y) = 1/2 (x^2 + y^2) + \alpha (x^2 y - y^3 / 3)
Args:
alpha (float): as in equation
"""
def __init__(self, alpha=1.0):
self.alpha = alpha
super(HeinonHeiles, self).__init__()
def compute_energy_per_particle(self, X, **state):
if X.shape[1] != 2:
raise ValueError("Shape for Rosenbrock function must be N x 2")
X02 = X[:, 0]**2
pe = 0.5 * (X02 + X[:, 1]**2) + self.alpha * (X02 * X[:, 1] - X[:, 1]**3)
def compute_energy(self, X, **state):
return np.sum(self.compute_energy_per_particle(self, X, **state))
def compute_gradient(self, X, **state):
if X.shape[1] != 2:
raise ValueError("Shape for Rosenbrock function must be N x 2")
X02 = X[:, 0]**2
pe = np.sum(0.5 * (X02 + X[:, 1]**2) + self.alpha * (X02 * X[:, 1] - X[:, 1]**3))
G = np.zeros_like(X)
G[:, 0] = X[:, 0] + 2.0 * self.alpha * X[:, 0] * X[:, 1]
G[:, 1] = X[:, 1] + self.alpha * (X02 - X[:, 1]**2)
class MullerBrown(Potential):
r"""
Model potential often used for transition state search testing.
.. math:: E = \sum_i A_i \exp(a_i * (x - x_i^0)^2 + b_i(x - x_i^0)(y - y_0) + c_i(y - y_i^0)^2)
Args:
A ({1, nterms} ndarray):
a ({1, nterms} ndarray):
b ({1, nterms} ndarray):
c ({1, nterms} ndarray):
x0 ({1, nterms} ndarray):
y0 ({1, nterms} ndarray):
"""
def __init__(self,
A=None,
a=None,
b=None,
c=None,
x0=None,
y0=None,
):
self.A = A or np.array([[-200.0, -100.0, -170.0, 15.0]])
self.a = a or np.array([[ -1.0, -1.0, -6.5, 0.7]])
self.b = b or np.array([[ 0.0, 0.0, 11.0, 0.6]])
self.c = c or np.array([[ -10.0, -10.0, -6.5, 0.7]])
self.x0 = x0 or np.array([[ 1.0, 0.0, -0.5, -1.0]])
self.y0 = y0 or np.array([[ 0.0, 0.5, 1.5, 1.0]])
super(MullerBrown, self).__init__()
def compute_energy_per_particle(self, X, **state):
x = X[:, 0:1] # Maintain (N, 1) shape
y = X[:, 1:2]
V = np.sum(self.A * np.exp(
self.a * (x - self.x0)**2
+ self.b * (x - self.x0) * (y - self.y0)
+ self.c * (y - self.y0)**2
), axis=1)
return V
def compute_energy(self, X, **state):
return np.sum(self.compute_energy_per_particle(X, **state))
def compute_gradient(self, X, **state):
x = X[:, 0:1] # Maintain (N, 1) shape
y = X[:, 1:2]
Vcomp = self.A * np.exp(
self.a * (x - self.x0)**2
+ self.b * (x - self.x0) * (y - self.y0)
+ self.c * (y - self.y0)**2
)
V = np.sum(Vcomp)
G = np.zeros_like(X)
G[:, 0] = np.sum(Vcomp * (2.0 * self.a * (x - self.x0) + self.b * (y - self.y0)), axis=1)
G[:, 1] = np.sum(Vcomp * (self.b * (self.x - x0) + 2.0 * self.c * (y - self.y0)), axis=1)
return V, G
class Prinz(Potential):
r"""
1D model potential for Markov state modeling, should only be used in [-1, 1] for numerical stability
.. math:: V(x) = A * (x^b + \sum_i c_i \exp(d_i * (x + e_i)^2)
Args:
A (float):
b (float):
c ({1, nterms} ndarray):
d ({1, nterms} ndarray):
e ({1, nterms} ndarray):
"""
def __init__(self, A=None, b=None, c=None, d=None, e=None):
self.A = A or 4.0
self.b = b or 8.0
self.c = c or np.array([[0.8, 0.2, 0.5]])
self.d = d or np.array([[-80, -80, -40]])
self.e = e or np.array([[0.0, -0.5, 0.5]])
super(Prinz, self).__init__()
def compute_energy_per_particle(self, X, **state):
V = self.A * (X[:, 0] ** self.b + np.sum(self.c * np.exp(self.d * (X - self.e)**2), axis=-1))
return V
def compute_energy(self, X, **state):
return np.sum(self.compute_energy_per_particle(X, **state))
def compute_gradient(self, X, **state):
V = self.compute_energy(X, **state)
G = self.A * (self.b * X[:, 0] ** (self.b - 1.0) + np.sum(self.c * np.exp(self.d * (X - self.e) ** 2) * self.d * 2.0 * (X - self.e), axis=-1))
G = np.reshape(G, np.shape(X))
return V, G
```
#### File: mdprop/mdprop/update.py
```python
import numpy as np
from scipy.linalg import expm, cholesky
import warnings
from . import init, units, utils
class Update(object):
"""
Abstract base class describing single updates to position or velocity (or
other members of the state dict), a list of these is used to construct
an integrator; each update is similar to a single term in a Liouvillian
The __init__ method of each Update object should construct self.params dict
and self.requirements set that specifies the object.
Each update object contains a params dict which governs how to conduct updates,
and should not change. Most __init__ functions are written to take in natural
units, so that they can easily be applied to any system desired.
"""
h5_keys = []
h5_shapes = []
h5_types = []
def __init__(self):
raise NotImplementedError
def __str__(self):
return self.params.get('name', self.__class__.__name__)
def __repr__(self):
st = self.__class__.__name__ \
+ "\n\nParams:\n" \
+ str(self.params) \
+ "\n\nRequirements:\n" \
+ str(self.requirements)
return st
def update(self, step_length, state):
"""
update functions are called in sequence by the ExplicitIntegrator
Args:
step_length: length of step taken, generally given by integrator coeff*dt
state: input dict containing keys/vals needed by update
Returns:
state_update: dict of updates to the current state
"""
raise NotImplementedError
def get_h5_data(self, **kwargs):
h5_shapes_trans = []
for shape in self.h5_shapes:
curr_shape = []
for element in shape:
if isinstance(element, str):
curr_shape.append(kwargs[element])
else:
curr_shape.append(element)
h5_shapes_trans.append(curr_shape)
return self.h5_keys, h5_shapes_trans, self.h5_types
@staticmethod
def get_list_h5_data(hooks, **kwargs):
"""
Given a list of updates, returns H5 tuple containing all uniquely named data
Dynamic shapes such as 'natom' can be specified with **kwargs
"""
h5_keys = []
h5_shapes = []
h5_types = []
h5_key_set = set([])
for h in hooks:
keys, shapes, types = h.get_h5_data(**kwargs)
for k, s, t in zip(keys, shapes, types):
if k not in h5_key_set:
h5_keys.append(k)
h5_shapes.append(s)
h5_types.append(t)
h5_key_set = h5_key_set.union([k])
return h5_keys, h5_shapes, h5_types
class PositionUpdate(Update):
"""
Update position X based on velocity V
Params:
recenter (bool): True to remove COM / COM translation and rotation prior to position update
masses ({nparticle,} ndarray): masses, only required if recenter is True
coord_key (str): key to positions in state
vel_key (str): key to velocities in state
name (str): name of update
"""
h5_keys = ['X']
h5_shapes = [('natom', 3)]
h5_types = ['f']
def __init__(self, recenter=False, masses=None, coord_key='X', vel_key='V', name="position"):
self.params = {
'recenter' : recenter,
'coord_key' : coord_key,
'vel_key' : vel_key,
'name' : name,
}
self.requirements = set([coord_key, vel_key])
if recenter:
if masses is None:
raise ValueError("Must give masses to enforce recentering in PositionUpdate")
else:
self.params['masses'] = np.reshape(masses, (-1, 1))
self.dX = None
self.X = None
self.V = None
def update(self, step_length, state):
if self.params['recenter']:
self.X, self.V = init.initialize_centered(state[self.params['coord_key']], state[self.params['vel_key']], self.params['masses'])
else:
self.X = state[self.params['coord_key']]
self.V = state[self.params['vel_key']]
self.dX = step_length * self.V
self.X = self.X + self.dX
self.state_update = {
self.params['coord_key']: self.X,
}
if self.params['recenter']:
self.state_update[self.params['vel_key']] = self.V
return self.state_update
class VelocityUpdate(Update):
"""
Update velocities V based on potential.force given
Params:
potential (Potential object): potential.force must take in state['X'], outputing (potential_energy, force_vector)
masses ({nparticle,} ndarray): masses for each particle
coord_key (str): key to positions in state
vel_key (str): key to velocities in state
name (str): name of update, used for naming the energy contribution (default 'potential')
"""
h5_keys = ['V']
h5_shapes = [('natom', 3)]
h5_types = ['f']
def __init__(self, potential, masses, coord_key='X', vel_key='V', name="potential"):
self.params = {
'potential': potential,
'masses': np.reshape(masses, (-1, 1)),
'coord_key' : coord_key,
'vel_key' : vel_key,
'name': name,
}
self.requirements = set([coord_key, vel_key])
self.E = None
self.F = None
self.dV = None
self.V = None
self.state_update = {}
def update(self, step_length, state):
self.E, self.F = self.params['potential'].force(state[self.params['coord_key']])
self.dV = step_length * self.F / self.params['masses']
self.V = state[self.params['vel_key']] + self.dV
KE = utils.compute_kinetic_energy(self.V, self.params['masses'])
self.state_update = {
self.params['vel_key'] : self.V,
self.params['name'] + '_energy': self.E,
self.params['name'] + '_gradient': -self.F,
'kinetic_energy': KE,
}
return self.state_update
class GeneralVelocityUpdate(Update):
"""
Update velocities V based on force function given.
This object is subtly different from VelocityUpdate in that the force
function can use any object in the state dict, but the forces still
propagate the velocities the same way.
Params:
potential (Potential object): potential.force must take in state['X'], outputing (potential_energy, force_vector)
masses ({nparticle,} ndarray): masses for each particle
recalculate (bool): True to always recalculate force
coord_key (str): key to positions in state
vel_key (str): key to velocities in state
name (str): name of update, used for naming the energy contribution (default 'potential')
"""
h5_keys = ['V']
h5_shapes = [('natom', 3)]
h5_types = ['f']
def __init__(self,
potential,
masses,
recalculate=False,
vel_key='V',
name="potential",
):
self.params = {
'potential': potential,
'masses': np.reshape(masses, (-1, 1)),
'recalculate': recalculate,
'vel_key' : vel_key,
'name': name,
}
self.requirements = set([vel_key])
self.E = None
self.F = None
self.dV = None
self.V = None
self.state_update = {}
def update(self, step_length, state):
if self.params['recalculate']:
self.E, self.F = self.params['potential'].compute_force(**state)
else:
self.E, self.F = self.params['potential'].force(**state)
self.dV = step_length * self.F / self.params['masses']
self.V = state[self.params['vel_key']] + self.dV
KE = utils.compute_kinetic_energy(self.V, self.params['masses'])
self.state_update = {
self.params['vel_key']: self.V,
self.params['name'] + '_energy': self.E,
self.params['name'] + '_gradient': -self.F,
'kinetic_energy': KE,
}
return self.state_update
class IsokineticVelocityUpdate(Update):
"""
Update velocities while enforcing an isokinetic distribution.
Params:
potential (Potential object): potential.force must take in state['X'], outputing (potential_energy, force_vector)
masses ({nparticle,} ndarray): masses for each particle
kT (float): kinetic energy to constrain to
nhc (bool): True to apply joint isokinetic constraint to velocities and first NHC dofs
name (str): name of update, used for naming the energy contribution (default 'potential')
References:
The Journal of Chemical Physics 118, 2510 (2003); doi: 10.1063/1.1534582
https://www.tandfonline.com/doi/abs/10.1080/00268976.2013.844369
"""
h5_keys = ['V']
h5_shapes = [('natom', 3)]
h5_types = ['f']
def __init__(self, potential, masses, kT, nhc=False, name="potential"):
self.params = {
'potential': potential,
'masses': np.reshape(masses, (-1, 1)),
'kT': kT,
'name': name,
}
self.requirements = set(['X', 'V'])
self.nhc = nhc
if nhc:
self.requirements.add('aux_velocity_NH')
self.E = None
self.F = None
self.V = None
self.K = None
self.lmbd = None
self.state_update = {}
def update(self, step_length, state):
self.E, self.F = self.params['potential'].force(state['X'])
if self.nhc:
self.L = np.shape(state['aux_velocity_NH'])[1]
self.lmbd = self.lmbd or self.L*self.params['kT']
self.a = self.F * state['V'] / self.lmbd
self.b = self.F**2 / self.params['masses'] / self.lmbd
else:
self.K = self.K or (np.size(state['V']) - 1) * 0.5 * self.params['kT']
self.a = 0.5 / self.K * np.sum(state['V'] * self.F)
self.b = 0.5 / self.K * np.sum(self.F**2 / self.params['masses'])
sqb = np.sqrt(self.b)
arg = step_length * sqb
with np.errstate(divide='ignore', invalid='ignore'): # Hide all the divide by zero warnings
self.s = np.where(
arg > 0.00001,
self.a / self.b * (np.cosh(arg) - 1.0) + 1.0 / sqb * np.sinh(arg),
((((self.b*self.a/24.0)*step_length + self.b/6.0)*step_length + 0.5*self.a)*step_length + 1.0)*step_length
)
self.sdot = np.where(
arg > 0.00001,
self.a / sqb * np.sinh(arg) + np.cosh(arg),
(((self.b*self.a/6.0)*step_length + 0.5*self.b)*step_length + self.a)*step_length + 1.0
)
self.V = (state['V'] + self.s * self.F / self.params['masses']) / self.sdot
KE = utils.compute_kinetic_energy(self.V, self.params['masses'])
self.state_update = {
'V': self.V,
self.params['name'] + '_energy': self.E,
self.params['name'] + '_gradient': -self.F,
'kinetic_energy': KE,
}
if self.nhc:
self.aux_v = np.copy(state['aux_velocity_NH'])
self.aux_v[0] = self.aux_v[0] / self.sdot
self.state_update['aux_velocity_NH'] = self.aux_v
return self.state_update
class TimeDependent(Update):
"""
Update state based on update given, if the condition
simulation_time % time_modulus >= time_start && simulation_time % time_modulus < time_stop
Unlike other updates, this update wraps an existing update and makes it time dependent
Params:
update (Update): Update to make time dependent
time_start (float): scalar to add to remainder (see above) (default 0.0)
time_stop (float): scalar to add to remainder (see above) (default np.inf)
time_modulus (float): modulus parameter of simulation time to determine if to apply Update (see above) (default None)
scale_step (bool): True to scale the step length by
(1.0 - s(time_frac)) * scale_start + s(time_frac) * scale_stop
allows adiabatically turning updates on or off, i.e. for adiabatic switching
scale_start (float): see scale_step (default 0.0)
scale_stop (float): see scale_step (default 1.0)
switching_func (function): switching function with range and domain [0, 1], see scale_step
null_return (dict): returned in the case that the update is not currently turned on (default {})
name_prefix (str): Renames update with prefix (default 'time_dependent_')
"""
def __init__(self,
update,
time_start=0.0,
time_stop=np.inf,
time_modulus=None,
scale_step=False,
scale_start=0.0,
scale_stop=1.0,
switching_func=utils.smootherstep,
null_return={},
name_prefix="timedependent_",
):
self.params = update.params.copy()
self.params.update({
'update': update,
'time_start': time_start,
'time_stop': time_stop,
'time_modulus': time_modulus,
'scale_step': scale_step,
'scale_start': scale_start,
'scale_stop': scale_stop,
'switching_func': switching_func,
'null_return': null_return,
'name': name_prefix + update.params['name'],
})
self.requirements = set(list(update.requirements) + ['simulation_time'])
self.h5_keys = update.h5_keys
self.h5_shapes = update.h5_shapes
self.h5_types = update.h5_types
self.curr_mod = None
self.curr_frac = None
self.curr_scale = None
self.state_update = {}
def update(self, step_length, state):
if self.params['time_modulus'] is not None:
self.curr_mod = state['simulation_time'] % self.params['time_modulus']
else:
self.curr_mod = state['simulation_time']
self.curr_frac = (self.curr_mod - self.params['time_start']) / (self.params['time_stop'] - self.params['time_start'])
self.curr_frac = np.clip(self.curr_frac, 0.0, 1.0)
state['time_frac'] = self.curr_frac
if self.params['scale_step']:
self.curr_scale = (1.0 - self.params['switching_func'](self.self.curr_frac)) * self.params['scale_start'] + self.params['switching_func'](self.curr_frac) * self.params['scale_stop']
else:
self.curr_scale = 1.0
cond1 = self.curr_mod >= self.params['time_start']
cond2 = self.curr_mod < self.params['time_stop']
if cond1 and cond2:
self.state_update = self.params['update'].update(self.curr_scale * step_length, state)
elif self.params['scale_step'] and np.abs(self.curr_scale) > 1E-8 and self.params['time_modulus'] is None:
self.state_update = self.params['update'].update(self.curr_scale * step_length, state)
else:
self.state_update = self.params['null_return']
return self.state_update
class Langevin(Update):
"""
Update velocities using Bussi-Parrinello Langevin integrator
Params:
masses ({nparticle,} ndarray): masses for each particle
kT (float): temperature in energy units
damptime (float): damping time
rescale (bool): True to project the new momentum vector along the old
name (str): name of update (default 'langevin')
References:
doi:10.1103/PhysRevE.75.056707
doi:10.1063/1.5029833
"""
h5_keys = ['V']
h5_shapes = [('natom', 3)]
h5_types = ['f']
def __init__(self,
masses,
kT,
damptime,
rescale=False,
vel_key='V',
name='langevin',
):
self.params = {
'masses': np.reshape(masses, (-1, 1)),
'kT': kT,
'damptime': damptime,
'gamma': 1.0 / damptime,
'rescale': rescale,
'vel_key' : vel_key,
'name' : name,
}
self.requirements = set(['V'])
self.step_length = None
self.c1 = None
self.c2 = None
self.dV = None
self.V = None
self.state_update = {}
def update(self, step_length, state):
if self.step_length != step_length:
self.c1 = np.exp(-self.params['gamma'] * abs(step_length))
self.c2 = np.sqrt((1.0 - self.c1**2) * self.params['kT'] / self.params['masses'])
self.step_length = step_length
self.dV = (self.c1 - 1.0) * state[self.params['vel_key']] + self.c2 * np.random.standard_normal(state[self.params['vel_key']].shape)
self.V = state[self.params['vel_key']] + self.dV
if self.params['rescale']:
self.V = np.linalg.norm(self.V, axis=1, keepdims=True) / np.linalg.norm(state[self.params['vel_key']], axis=1, keepdims=True) * state[self.params['vel_key']]
self.state_update = {
self.params['vel_key'] : self.V,
}
return self.state_update
class AdaptiveLangevin(Update):
"""
Update velocities using adaptive Langevin integrator
Params:
masses ({nparticle,} ndarray): masses for each particle
kT (float): temperature in energy units
aux_mass (float): mass to use in for auxiliary degree of freedom corresponding to thermostat frequency
sigma (float): variance of additional noise (default is sqrt(2kT gamma_0))
name (str): name of update (default 'langevin')
References:
https://epubs.siam.org/doi/pdf/10.1137/15M102318X
https://aip.scitation.org/doi/10.1063/1.3626941
"""
h5_keys = ['V']
h5_shapes = [('natom', 3)]
h5_types = ['f']
def __init__(self,
masses,
kT,
aux_mass,
sigma=None,
name='adaptive_langevin',
):
self.params = {
'masses': np.reshape(masses, (-1, 1)),
'kT': kT,
'aux_mass': aux_mass,
'name' : name,
}
self.requirements = set(['V', 'gamma'])
self.step_length = None
self.sigma = sigma
self.gamma = None
self.c1 = None
self.c2 = None
self.V = None
self.state_update = {}
@staticmethod
def initialize(kT, ndof, tau):
r"""
Compute 'optimal damping' parameters given characteristic timescale
.. math::
\gamma &= 2 / \tau \\
Q &= N_d k_B T \tau^2 / 2
Args:
kT (float): temperature in energy units
ndof (int): total number of degrees of freedom
tau (float): characteristic time scale
Returns:
(float, float): tuple of initial gamma and auxiliary mass to use
"""
gamma = 2.0 / tau
Q = 0.5 * ndof * kT * tau**2
return gamma, Q
def update(self, step_length, state):
if self.sigma is None:
self.sigma = np.sqrt(2.0 * self.params['kT'] * state['gamma'])
KE = state.get('kinetic_energy', utils.compute_kinetic_energy(state['V'], self.params['masses']))
self.gamma = state['gamma'] + 0.5 * step_length / self.params['aux_mass'] * (2.0 * KE - np.size(state['V']) * self.params['kT'])
self.c1 = np.exp(-self.gamma * abs(step_length))
self.c2 = np.sqrt((1.0 - self.c1**2) * 0.5 / self.gamma / self.params['masses'])
self.V = self.c1 * state['V'] + self.sigma * self.c2 * np.random.standard_normal(state['V'].shape)
KE = utils.compute_kinetic_energy(self.V, self.params['masses'])
self.gamma += 0.5 * step_length / self.params['aux_mass'] * (2.0 * KE - np.size(state['V']) * self.params['kT'])
self.state_update = {
'V': self.V,
'gamma': self.gamma,
'kinetic_energy': KE,
}
return self.state_update
class ColoredNoise(Update):
"""
Update velocities using colored noise
Params:
masses ({nparticle,} ndarray): masses for each particle
kT (float): temperature in energy units
gamma ({naux+1, naux+1} ndarray): friction coefficient matrix in inverse units of time
regularization (float): Small number to add to diagonal of gamma to ensure successful cholesky decomposition
gamma_inf (float): noise at high frequency, used to build gamma if it's not given
gamma_0 (float): noise at omega_til, used to build gamma if it's not given
omega_til (float): displacement of exponential distributions from 0, used to build gamma if it's not given
name (str): name of update (default 'colored_noise')
References:
doi:10.1063/1.3518369
"""
h5_keys = ['V', 'aux_momentum_CN']
h5_shapes = [('natom', 3), ('naux', 3)]
h5_types = ['f', 'f']
def __init__(self,
masses,
kT=300.0 * units.K_TO_AU,
gamma=None,
gamma_inf=83.33/units.PS_TO_AU, # Using GLE 12fs parameters from ref
gamma_0=0.01/units.PS_TO_AU,
omega_til=300.0/units.PS_TO_AU,
regularization=1E-8,
dim=3,
name='colored_noise',
):
# Build gamma as in reference
if gamma is None:
var = np.sqrt(omega_til * (gamma_inf - gamma_0))
tmp = 3.0**(0.25)
gamma = np.array([
[gamma_inf, tmp*var, 1.0/tmp * var],
[tmp*var, tmp**2 * omega_til, omega_til],
[-1.0/tmp * var, -omega_til, 0.0]
])
gamma = gamma + np.eye(gamma.shape[0]) * regularization
self.params = {
# Broadcast masses to match dimension of velocities
'masses': (np.reshape(masses, (-1, 1)) * np.ones((dim,))).reshape((1, -1)), # (N x 1) -> (1 x 3N)
'kT': kT * units.K_TO_AU,
'gamma': gamma,
'name' : name,
}
self.requirements = set(['V', 'aux_momentum_CN'])
self.step_length = None
self.C1 = None
self.C2 = None
self.dV = None
self.V = None
self.state_update = {}
def update(self, step_length, state):
if self.step_length != step_length:
self.C1 = expm(-self.params['gamma'] * abs(step_length))
self.C1_update = self.C1 - np.eye(self.C1.shape[0]) # Subtract identity to compute \Delta p
self.C2 = cholesky(np.eye(self.C1.shape[0]) - np.dot(np.transpose(self.C1), self.C1))
self.step_length = step_length
# Unroll everything to compute the update as a matrix multiplication
V_unroll = state['V'].reshape(1, -1) # (N x 3) -> (1 x 3N)
P_unroll = V_unroll * self.params['masses'] # Elementwise multiplication
# construct matrix that is (#aux mom per DOF + 1) x (DOF)
P_tot = np.vstack([P_unroll, state['aux_momentum_CN']]) # (M+1 x 3N)
friction_contrib = np.dot(self.C1_update, P_tot) # (M+1 x 3N)
noise = np.dot(self.C2, np.random.standard_normal(P_tot.shape))
noise_contrib = noise * np.sqrt(self.params['masses'] * self.params['kT']) # The masses are broadcasted here
update = friction_contrib + noise_contrib
self.dV = (update[0,:] / self.params['masses']).reshape(-1, state['V'].shape[1])
self.V = state['V'] + self.dV
self.dAux = update[1:,:]
self.Aux = state['aux_momentum_CN'] + self.dAux
KE = utils.compute_kinetic_energy(self.V, self.params['masses'])
self.state_update = {
'V': self.V,
'aux_momentum_CN': self.Aux,
'kinetic_energy': KE,
}
return self.state_update
class NoseHooverNVT(Update):
"""
Update velocities using massive Nose-Hoover chains
DOI: 10.1080/00268979600100761
Params:
masses ({nparticle,} ndarray): masses for each particle
kT (float): temperature in energy
Qs ({chain_length, natom} ndarray): auxiliary masses
nc (int): number of integration substeps
name (str): name of update (default 'nosehooverchain')
"""
h5_keys = ['V', 'aux_position_NH', 'aux_velocity_NH']
h5_shapes = [('natom', 3), ('naux', 'natom'), ('naux', 'natom')]
h5_types = ['f', 'f', 'f']
requirements = set(['V', 'masses', 'aux_position_NH', 'aux_velocity_NH'])
def __init__(self,
masses,
kT,
Qs,
nc=5,
name='nosehooverchain',
):
self.params = {
'masses': np.reshape(masses, (-1, 1)),
'kT': kT,
'Qs': Qs,
'nc': nc,
'name': name,
}
self.aux_q = None
self.aux_v = None
self.aux_a = None
self.V = None
w1 = 1.0 / (2.0 - 2.0**(1.0/3.0))
w3 = w1
w2 = 1.0 - w1 - w3
self.ws = np.array([w1, w2, w3])
self.M = len(Qs)
self.state_update = {}
@classmethod
def build(cls, masses, kT, tau=0.5 * units.PS_TO_AU, chain_length=5, nc=5, dim=3, mass_weight=True):
"""
Construct the update for molecular system with auxiliary masses based on rules given in reference
References:
doi: 10.1021/j100151a052
Args:
masses ({nparticle,} ndarray): masses of original dofs
kT (float): temperature in energy units
tau (float): 'natural timescale' to set the masses of the NHCs using Q = kT tau^2 (default 500 fs in au)
chain_length (int): length of NHC per atom (default 5)
nc (int): number of integration substeps for NHCs (default 5)
dim (int): dimension of original system (default 3)
mass_weight (bool): True to multiply Qs by ratio of particle mass / hydrogen mass (default True)
Returns:
NHC_update: a constructed NHC thermostat update
"""
if mass_weight:
mass_r = np.reshape(masses, (1, -1)) / utils.symbol_to_mass(['H'])[0]
else:
mass_r = np.ones((1, np.size(masses)))
Qs = np.ones((chain_length, np.size(mass_r))) * kT * mass_r * tau **2
Qs[0] *= dim
return cls(masses, kT, Qs, nc)
@staticmethod
def initialize(kT, Qs):
"""
Create initial positions and velocities of auxiliary degrees of freedom,
positions are set to zero and velocities are boltzmann distributed
Args:
kT (float): Temperature in energy units
Qs ({chain_length, natom} ndarray): masses of NHCs
Returns:
({chain_length, natom} ndarray, {chain_length, natom} ndarray): tuple of initial auxiliary positions and auxiliary velocities sampled from Boltzmann distribution
"""
aux_q = np.zeros_like(Qs)
factor = np.sqrt(kT/Qs)
aux_v = np.random.normal(scale=factor)
return aux_q, aux_v
def compute_nose_kinetic_energy(self, velocities, masses):
"""
Calculate kinetic energy corresponding to NHC velocities
Args:
velocities ({chain_length, natom} ndarray): NHC velocities
masses ({chain_length, natom} ndarray): NHC masses
Returns:
(float): NHC kinetic energy
"""
return 0.5 * np.sum(masses * velocities**2)
def compute_nose_potential_energy(self, coordinates, gkt, gnkt):
"""
Calculate potential energy corresponding to NHC coordinates
Args:
coordinates ({chain_length, natom} ndarray): NHC coordinates
gkt (float): temperature in energy units
gnkt (float): dofs per chain * temperature in energy
Returns:
(float): NHC potential energy
"""
return np.sum(coordinates[0] * gnkt) + np.sum(coordinates[1:] * gkt)
def update(self, step_length, state):
self.aux_q = np.copy(state['aux_position_NH'])
self.aux_v = np.copy(state['aux_velocity_NH'])
# Atomwise KE*2
akin = np.sum(state['V']**2 * self.params['masses'], axis=1)
scale = np.ones_like(akin)
self.aux_a = np.zeros_like(self.aux_q)
self.gnkt = np.shape(state['V'])[-1] * self.params['kT']
self.gkt = self.params['kT']
self.aux_a[0] = (akin - self.gnkt) / self.params['Qs'][0]
self.aux_a[1:] = (self.params['Qs'][:-1] * self.aux_v[:-1]**2 - self.gkt) / self.params['Qs'][1:]
for k in range(self.params['nc']): # loop of integrations substeps
for w in self.ws: # loop of steps in Yoshida Suzuki integrator
# This is sort of hacky due to translation from TeraChem, which
# was itself translated from DOI: 10.1080/00268979600100761
# appendix A
wdts2 = w * step_length / self.params['nc']
wdts4 = wdts2 * 0.5
wdts8 = wdts4 * 0.5
self.aux_v[self.M-1] += self.aux_a[self.M-1] * wdts4
# Intra chain coupling M to 0
for Mi in range(self.M-1):
aa = np.exp(-wdts8 * self.aux_v[self.M-(Mi+1)])
self.aux_v[self.M-1-(Mi+1)] = self.aux_v[self.M-1-(Mi+1)] * aa**2 + wdts4 * aa * self.aux_a[self.M-1-(Mi+1)]
# Update kinetic energy
aa = np.exp(-wdts2 * self.aux_v[0])
scale *= aa
self.aux_a[0] = (akin * scale**2 - self.gnkt) / self.params['Qs'][0]
# Update positions
self.aux_q += wdts2 * self.aux_v
# Intra chain coupling 0 to M
for Mi in range(self.M-1):
aa = np.exp(-wdts8 * self.aux_v[Mi+1])
self.aux_v[Mi] = self.aux_v[Mi] * aa**2 + wdts4 * aa * self.aux_a[Mi]
self.aux_a[Mi+1] = (self.params['Qs'][Mi] * self.aux_v[Mi]**2 - self.gkt) / self.params['Qs'][Mi+1]
self.aux_v[self.M-1] += self.aux_a[self.M-1] * wdts4
# All this work to rescale velocities
self.V = state['V'] * np.reshape(scale, (-1, 1))
self.energy = self.compute_nose_kinetic_energy(self.aux_v, self.params['Qs'])
self.energy += self.compute_nose_potential_energy(self.aux_q, self.gkt, self.gnkt)
KE = utils.compute_kinetic_energy(self.V, self.params['masses'])
self.state_update = {
'V' : self.V,
'aux_position_NH': self.aux_q,
'aux_velocity_NH': self.aux_v,
'NHC_energy': self.energy,
'kinetic_energy': KE,
}
return self.state_update
class IsokineticNoseHoover(NoseHooverNVT):
"""
Update velocities using massive Nose-Hoover chains that contain joint isokinetic constraint
Params:
masses ({nparticle,} ndarray): masses for each original particles
kT (float): temperature in energy
Qs ({2, L, nparticle, dim} ndarray): auxiliary masses
nc (int): number of integration substeps
name (str): name of update (default 'nosehooverchain')
References:
https://www.tandfonline.com/doi/pdf/10.1080/00268976.2013.844369
https://journals.aps.org/prl/pdf/10.1103/PhysRevLett.93.150201
https://aip.scitation.org/doi/pdf/10.1063/1.1534582?class=pdf
"""
@classmethod
def build(cls, masses, kT, L=4, tau=0.5 * units.PS_TO_AU, nc=5, dim=3, mass_weight=False):
"""
Construct the update for molecular system with auxiliary masses based on rules given in
doi: 10.1021/j100151a052
Args:
masses ({nparticle,} ndarray): masses of original dofs
kT (float): temperature in energy units
L (int): number of auxiliary dofs per original dof (default 4)
tau (float): 'natural timescale' to set the masses of the NHCs using Q = kT tau^2 (default 10 fs in au)
damptime (float): rate of damping for Ornstein-Uhlenbeck/Langevin process applied to last NHC dofs (default 10 fs in au)
nc (int): number of integration substeps for NHCs (default 5)
dim (int): dimension of original system (default 3)
mass_weight (bool): True to multiply Qs by ratio of particle mass / hydrogen mass (default False)
Returns:
NHC_update: a constructed Isokinetic NHC thermostat update
"""
if mass_weight:
mass_r = np.reshape(masses, (1, 1, -1, 1)) / utils.symbol_to_mass(['H'])[0]
else:
mass_r = np.ones((1, 1, np.size(masses), 1))
Qs = np.ones((2, L, np.size(mass_r), dim)) * kT * mass_r * tau **2
return cls(masses, kT, Qs, nc)
def update(self, step_length, state):
self.aux_v = np.copy(state['aux_velocity_NH'])
self.V = np.copy(state['V'])
self.L = float(np.shape(self.params['Qs'])[1])
self.lmbd = self.L * self.params['kT']
for k in range(self.params['nc']): # loop of integrations substeps
for w in self.ws: # loop of steps in Yoshida Suzuki integrator
# step_length generally already the total \Delta t / 2, making
# sub_step = w_i * \Delta t / 2 / nc
sub_step = w * step_length / self.params['nc']
half_sub_step = 0.5 * sub_step
# Take half substep for vk2
G = (self.params['Qs'][0] * self.aux_v[0]**2 - self.params['kT']) / self.params['Qs'][1]
self.aux_v[1] += half_sub_step * G
# Take substep for v, vk1
aa = np.exp(-sub_step * self.aux_v[1])
tt = self.V**2 * self.params['masses'] + self.L / (self.L + 1.0) * np.sum(self.params['Qs'][0]*(self.aux_v[0]**2)*(aa**2), axis=0)
srtop = np.sqrt(self.lmbd/tt)
self.V = self.V * srtop
self.aux_v[0] = self.aux_v[0] * srtop * aa
# Take half substep for vk2
G = (self.params['Qs'][0] * self.aux_v[0]**2 - self.params['kT']) / self.params['Qs'][1]
self.aux_v[1] += half_sub_step * G
KE = utils.compute_kinetic_energy(self.V, self.params['masses'])
self.state_update = {
'V' : self.V,
'aux_position_NH': self.aux_q,
'aux_velocity_NH': self.aux_v,
'kinetic_energy': KE,
}
return self.state_update
class NoseHooverLangevin(Update):
"""
Update the last auxiliary velocity in each NHC with Langevin thermostatting
Params:
kT (float): temperature in energy
Qs ({2, L, nparticle, dim} ndarray): auxiliary masses
damptime (float): rate of damping for Ornstein-Uhlenbeck/Langevin process applied to last NHC dofs (default 10 fs in au)
name (str): name of update (default 'nosehooverlangevin')
References:
https://www.tandfonline.com/doi/pdf/10.1080/00268976.2013.844369
https://journals.aps.org/prl/pdf/10.1103/PhysRevLett.93.150201
https://aip.scitation.org/doi/pdf/10.1063/1.1534582?class=pdf
"""
def __init__(self,
kT,
Qs,
damptime,
name='nosehooverlangevin',
):
self.params = {
'kT': kT,
'Qs': Qs,
'damptime': damptime,
'gamma': 1.0 / damptime,
'name': name,
}
self.requirements = set(['aux_velocity_NH'])
self.step_length = None
self.c1 = None
self.c2 = None
self.sigma = np.sqrt(2.0 * self.params['gamma'] * self.params['kT'] / self.params['Qs'][-1])
self.state_update = {}
def update(self, step_length, state):
if self.step_length != step_length:
self.c1 = np.exp(-self.params['gamma'] * abs(step_length))
self.c2 = np.sqrt((1.0 - np.exp(-2.0 * self.params['gamma'] * abs(step_length))) * 0.5 / self.params['gamma'])
self.step_length = step_length
self.aux_v = np.copy(state['aux_velocity_NH'])
self.aux_v[-1] = self.c1 * self.aux_v[-1] + self.sigma * self.c2 * np.random.standard_normal(np.shape(self.aux_v[-1]))
self.state_update = {
'aux_velocity_NH': self.aux_v,
}
return self.state_update
#class NoseHooverSphericalNPT(Update):
# """
# Update velocities using massive Nose-Hoover chains
# DOI: 10.1080/00268979600100761
#
# state required:
# V: velocities
# masses: masses of each degree of freedom
# (a vector should also work with broadcasting for atoms)
# aux_position_NH: numpy array of auxiliary positions
# aux_velocity_NH: numpy array of auxiliary velocities
#
# Params:
# kT: temperature in energy
# Qs: numpy array of masses with shape (chain_length, natom)
# nc: number of integration substeps
# """
# h5_keys = ['V', 'aux_position_NH', 'aux_velocity_NH']
# h5_shapes = [('natom', 3), ('naux', 'natom'), ('naux', 'natom')]
# h5_types = ['f', 'f', 'f']
# requirements = set(['V', 'masses', 'aux_position_NH', 'aux_velocity_NH'])
#
# def __init__(self,
# kT,
# Pext,
# Qs,
# nc=5,
# name='nosehooverchain',
# ):
# self.params = {
# 'kT': kT,
# 'Qs': Qs,
# 'nc': nc,
# 'name': name,
# }
# self.aux_q = None
# self.aux_v = None
# self.aux_a = None
# self.V = None
# w1 = 0.41449077179437571194
# w3 = -0.65796308717750284778
# self.ws = np.array([w1, w1, w3, w1, w1])
# self.M = len(Qs)
# self.state_update = {}
#
# @classmethod
# def build(cls, kT, masses, tau=0.5 * units.PS_TO_AU, chain_length=5, nc=5, dim=3, mass_weight=True):
# """
# Construct the update for molecular system with auxiliary masses based on rules given in
# doi: 10.1021/j100151a052
#
# Args:
# kT: Temperature in energy units
# masses: np.array of atomic masses
# tau: relaxation time scale
# chain_length: length of Nose-Hoover chain
# nc: number of Yoshida-Suzuki integration substeps used to integrate NHC degrees of freedom
# dim: number of degrees of freedom per particle
# mass_weight: if True, will scale masses of NHCs by mass_i / mass_H
# where mass_i is the mass of atom i and mass_H is a proton mass
#
# Returns:
# NHC_update: a constructed NHC thermostat update
# """
# if mass_weight:
# mass_r = np.reshape(masses, (1, -1)) / utils.symbol_to_mass(['H'])[0]
# else:
# mass_r = np.ones((1, np.size(masses)+1))
# Qs = np.ones((chain_length, np.size(mass_r))) * kT * mass_r * tau **2
# Qs[0, :] *= dim
# Qs[0, -1] *= dim
# return cls(kT, Qs, nc)
#
# def initialize(self, kT=None, Qs=None):
# """
# Create initial positions and velocities of auxiliary degrees of freedom,
# positions are set to zero and velocities are boltzmann distributed
#
# Args:
# kT: Temperature in energy units
# Qs: np.array (chain_length, natom) of masses of NHCs
#
# Returns:
# aux_q, aux_v: auxiliary variables for position and velocity
# """
# if kT is None:
# kT = self.params['kT']
# if Qs is None:
# Qs = self.params['Qs']
#
# aux_q = np.zeros_like(Qs)
# factor = np.sqrt(kT/Qs)
# aux_v = np.random.normal(scale=factor)
# return aux_q, aux_v
#
# def compute_nose_kinetic_energy(self, velocities, masses):
# return 0.5 * np.sum(velocities ** 2 * masses)
#
# def compute_nose_potential_energy(self, coordinates, gkt, gnkt):
# return np.sum(coordinates[0] * gnkt) + np.sum(coordinates[1:] * gkt)
#
# def update(self, step_length, state):
# self.aux_q = np.copy(state['aux_position_NH'])
# self.aux_v = np.copy(state['aux_velocity_NH'])
# # Atomwise KE (note the factor of two)
# akin = np.sum(state['V']**2, axis=1) * np.reshape(state['masses'], (-1, ))
# vkin = vmass * vlogv**2
# kin = np.concatenate([akin, vkin])
# scale = np.ones_like(kin)
# self.aux_a = np.zeros_like(self.aux_q)
# self.gnkt = np.shape(state['V'])[-1] * self.params['kT']
# self.gkt = self.params['kT']
# self.aux_a[0] = (kin - self.gnkt) / self.params['Qs'][0]
# self.aux_a[1:] = (self.params['Qs'][:-1] * self.aux_v[:-1]**2 - self.gkt) / self.params['Qs'][1:]
# self.aux_a_V = 3.0 * (self.Pint - self.params['pressure']) / vmass #TODO
#
# for k in range(self.params['nc']): # loop of integrations substeps
# for w in self.ws: # loop of steps in Yoshida Suzuki integrator
# # This is sort of hacky due to translation from TeraChem, which
# # was itself translated from DOI: 10.1080/00268979600100761
# # appendix A
# wdts2 = w * step_length / self.params['nc']
# wdts4 = wdts2 * 0.5
# wdts8 = wdts4 * 0.5
#
# self.aux_v[self.M-1] += self.aux_a[self.M-1] * wdts4
# # Intra chain coupling M to 0
# for Mi in range(self.M-1):
# aa = np.exp(-wdts8 * self.aux_v[self.M-(Mi+1)])
# self.aux_v[self.M-1-(Mi+1)] = self.aux_v[self.M-1-(Mi+1)] * aa**2 + wdts4 * aa * self.aux_a[self.M-1-(Mi+1)]
#
# # Update kinetic energy
# aa = np.exp(-wdts2 * self.aux_v[0])
# scale *= aa
# self.aux_a[0] = (akin * scale**2 - self.gnkt) / self.params['Qs'][0]
#
# # Update positions
# self.aux_q += wdts2 * self.aux_v
#
# # Intra chain coupling 0 to M
# for Mi in range(self.M-1):
# aa = np.exp(-wdts8 * self.aux_v[Mi+1])
# self.aux_v[Mi] = self.aux_v[Mi] * aa**2 + wdts4 * aa * self.aux_a[Mi]
# self.aux_a[Mi+1] = (self.params['Qs'][Mi] * self.aux_v[Mi]**2 - self.gkt) / self.params['Qs'][Mi+1]
#
# self.aux_v[self.M-1] += self.aux_a[self.M-1] * wdts4
#
# # All this work to rescale velocities
# self.V = state['V'] * np.reshape(scale, (-1, 1))
# self.energy = self.compute_nose_kinetic_energy(self.aux_v, self.params['Qs'])
# self.energy += self.compute_nose_potential_energy(self.aux_q, self.gkt, self.gnkt)
# self.state_update = {
# 'V' : self.V,
# 'aux_position_NH': self.aux_q,
# 'aux_velocity_NH': self.aux_v,
# 'NHC_energy': self.energy,
# }
# return self.state_update
#class NoseHooverNPTPositionUpdate(PositionUpdate):
# coeffs = np.array([1.0/6.0, 1.0/120.0, 1.0/5040.0, 1.0/362880.0])
#
# def update(self, step_length, state):
# vlogv =
# aa = np.exp(0.5 * step_length * vlogv)
# aa2 = aa * aa
# arg2 = (0.5 * vlogv * step_length) ** 2
# poly = (((self.coeffs[3] * arg2 + self.coeffs[2]) * arg2 + self.coeffs[1]) * arg2 + coeffs[0]) * arg2 + 1.0
# bb = aa * poly * step_length
# self.X = state['X'] * aa2 + state['V'] * bb
# self.aux_q = state['aux_position_NH'] + vlogv * step_length
# self.state_update = {
# 'X' : self.X,
# }
# return self.state_update
class DistanceAnchor(Update):
"""
Move atoms by mass weighted coordinates to given distance. Without being
wrapped by TimeDependent Update, the positions are held constant at
dist_stop. With it, they can be interpolated from their initial distance to
the final distance. The rate is determined by linearly going from the
interatomic distance at the time_start to the dist_stop at the time_stop.
Velocities of the selected atoms are also set to zero.
This update should be placed immediately before or after the position update.
Params:
mass1 (float): mass of first atom
mass2 (float): mass of second atom
atom_ind1 (int): first atom index to pull toward one another
atom_ind2 (int): second atom index to pull toward one another
dist_stop (float): distance at which to stop pulling the atoms together
interpolate (bool): True to linearly move the wells based on time_frac if Update is TimeDependent
"""
h5_keys = ['X']
h5_shapes = [('natom', 3)]
h5_types = ['f']
def __init__(self,
mass1,
mass2,
atom_ind1,
atom_ind2,
dist_stop,
interpolate=False,
name='distance_anchor',
):
self.params = {
'mass1': mass1,
'mass2': mass2,
'atom_ind1': atom_ind1,
'atom_ind2': atom_ind2,
'dist_stop': dist_stop,
'interpolate': interpolate,
'name' : name,
}
self.requirements = set(['X', 'V'])
self.time_frac = 1.0 # Use the time_frac to determine when to restart
self.X1_start = None
self.X2_start = None
self.X1_move = None
self.X2_move = None
self.X = None
self.V = None
self.state_update = {}
def reset(self, state):
# Compute vector between atoms and initial distance
self.X1_start = state['X'][self.params['atom_ind1'], :]
self.X2_start = state['X'][self.params['atom_ind2'], :]
vec_start = self.X2_start - self.X1_start
dist_start = np.linalg.norm(vec_start)
# Compute mass weighted distances that each atom should move
dist1 = (dist_start - self.params['dist_stop']) * self.params['mass2'] / (self.params['mass1'] + self.params['mass2']) / dist_start
dist2 = (dist_start - self.params['dist_stop']) * self.params['mass1'] / (self.params['mass1'] + self.params['mass2']) / dist_start
# Compute vector that atoms will travel along
self.X1_move = vec_start * dist1
self.X2_move = -vec_start * dist2
def update(self, step_length, state):
self.X = np.copy(state['X'])
self.V = np.copy(state['V'])
if self.params['interpolate']:
# Restart movement cycle
if state['time_frac'] <= self.time_frac:
self.reset(state)
self.time_frac = state['time_frac']
else:
self.reset(state)
self.time_frac = 1.0
# Linearly interpolate along vector as time goes by
self.X[self.params['atom_ind1'], :] = self.X1_start + self.time_frac * self.X1_move
self.X[self.params['atom_ind2'], :] = self.X2_start + self.time_frac * self.X2_move
# Remove velocities
self.V[self.params['atom_ind1'], :] = 0.0
self.V[self.params['atom_ind2'], :] = 0.0
self.state_update = {
'X' : self.X,
'V' : self.V,
}
return self.state_update
class Recenter(Update):
"""
Move center of mass to origin, remove center of mass
translational/rotational velocity.
Useful in combination with forces that do not preserve such quantities, such
as stochastic thermostats.
Should probably be placed prior to a position update.
Params:
masses ({nparticle,} ndarray): masses of particles
"""
h5_keys = ['X']
h5_shapes = [('natom', 3)]
h5_types = ['f']
def __init__(self,
masses,
name='recenter',
):
self.params = {
'masses': masses,
'name' : name,
}
self.requirements = set(['X', 'V'])
self.X = None
self.V = None
self.state_update = {}
def update(self, step_length, state):
self.X, self.V = init.initialize_centered(state['X'], state['V'], self.params['masses'])
self.state_update = {
'X': self.X,
'V': self.V,
}
return self.state_update
class MetropolisHastings(Update):
"""
This update proceeds in two steps, the first step simply holds the position
and momentum of the state, the second checks whether the new state is
probable, and if not the state is reset to the previous with flipped
momentum.
The order of integration should generally be (according to Free Energy Computations):
[Thermostat, MetropolisHastings, Velocity, Position, Velocity, MetropolisHastings, Thermostat]
Params:
masses ({nparticle,} ndarray): masses for particles
kT (float): temperature in energy
potential_key (str): state key that corresponds to desired potential energy to check
"""
h5_keys = []
h5_shapes = []
h5_types = []
def __init__(self,
masses,
kT,
potential_key='potential_energy',
name='hmc',
):
self.requirements = set(['X', 'V', potential_key])
self.params = {
'masses': masses,
'kT': kT,
'potential_key': potential_key,
}
self.potential_key = potential_key
self.counter = 0
self.X_init = None
self.V_init = None
self.PE_init = None
self.KE_init = None
self.PE_final = None
self.KE_final = None
self.state_update = {}
def update(self, step_length, state):
self.counter += 1
if self.counter % 2 == 1: # First call in integration loop, just tabulate current state
self.X_init = state['X']
self.V_init = state['V']
self.PE_init = state[self.potential_key]
self.KE_init = utils.compute_kinetic_energy(state['V'], self.params['masses'])
self.state_update = {}
else: # Second call in integration loop
self.PE_final = state[self.potential_key]
self.KE_final = utils.compute_kinetic_energy(state['V'], self.params['masses'])
diff = self.PE_final + self.KE_final - (self.PE_init + self.KE_init)
if np.random.uniform() < np.min(1.0, np.exp(-diff / self.params['kT'])):
self.state_update = {} # Keep current trajectory
else:
self.state_update = { # Revert to before, flip momentum
'X': self.X_init,
'V': -self.V_init,
}
return self.state_update
class BXDE(Update):
"""
This update proceeds in two steps, the first step simply holds the position
/momentum/gradient of the state, the second checks whether the new state has
crossed an energy barrier, if so the velocities are reflected away from the
barrier.
Different from the paper, the user may give a delta_PE which defines a
maximum energy to reflect from. This makes it easy to window the energy
within the adaptive scheme.
The order of integration should generally be:
[Thermostat, BXDE, Velocity, Position, Velocity, BXDE, Thermostat]
Params:
masses ({nparticle,} ndarray): masses for particles
PE_min (float): minimum potential energy allowed by barrier (default -np.inf)
dPE (float): Max energy allowed given by PE_min + dPE (default np.inf)
potential_name (str): used to get potential_energy and potential_gradient state values
adaptive (bool): True to dynamically change PE_min according to reference (default True)
nstep_sample (int): number of steps to sample for adaptive barriers (default 100)
name (str): update name (default 'bxde')
References:
doi: 10.1021/acs.jctc.8b00515
"""
h5_keys = []
h5_shapes = []
h5_types = []
def __init__(self,
masses,
PE_min=-np.inf,
dPE=np.inf,
potential_name='potential',
adaptive=True,
nstep_sample=100,
name='bxde',
):
self.requirements = set(['X', 'V', potential_name + '_energy', potential_name + '_gradient'])
self.params = {
'masses': np.reshape(masses, (-1, 1)),
'potential_key': potential_name + '_energy',
'gradient_key': potential_name + '_gradient',
'adaptive': adaptive,
'nstep_sample': nstep_sample,
}
self.PE_min = PE_min
self.dPE = dPE
self.potential_name = potential_name
self.adaptive = adaptive
self.nstep_sample = nstep_sample
self.counter = 0
self.curr_PE_max = None
self.X_init = None
self.V_init = None
self.V = None
self.PE_final = None
self.lmbda = None
self.state_update = {}
def update(self, step_length, state):
self.counter += 1
self.state_update = {}
if self.counter % 2 == 1: # First call in integration loop, just tabulate current state
self.X_init = state['X']
self.V_init = state['V']
self.PE_init = state[self.params['potential_key']]
self.G_init = state[self.params['gradient_key']]
else: # Second call in integration loop
self.PE_final = state[self.params['potential_key']]
if self.adaptive:
if self.PE_final > self.curr_PE_max:
if self.counter//2 > self.nstep_sample:
self.PE_min = self.curr_PE_max
self.counter = 0
else:
# Don't let PE_max go over PE_min + dPE
PE_cutoff = self.PE_min + self.dPE
if self.PE_min > -np.inf:
self.curr_PE_max = min(PE_cutoff, self.PE_final)
else:
self.curr_PE_max = self.PE_final
self.state_update['BXDE_PE_curr_max'] = self.curr_PE_max
self.state_update['BXDE_PE_min'] = self.PE_min
if (self.PE_final < self.PE_min):
gke = utils.compute_kinetic_energy_momentum(self.G_init, self.params['masses'])
self.lmbda = np.sum(self.G_init * self.V_init) / gke
self.V = self.V_init + self.lmbda * self.G_init / self.params['masses']
# Revert to before, reflect velocities about PE boundary
self.state_update[self.params['potential_key']] = self.PE_init
self.state_update['X'] = self.X_init
self.state_update['V'] = self.V
elif (self.PE_final > (self.PE_min + self.dPE) and self.PE_min > -np.inf):
gke = utils.compute_kinetic_energy_momentum(self.G_init, self.params['masses'])
self.lmbda = - np.sum(self.G_init * self.V_init) / gke
self.V = self.V_init + self.lmbda * self.G_init / self.params['masses']
# Revert to before, reflect velocities about PE boundary
self.state_update[self.params['potential_key']] = self.PE_init
self.state_update['X'] = self.X_init
self.state_update['V'] = self.V
return self.state_update
class FIRE(Update):
"""
Fast inertial relaxation engine step
Can be used to add a minimization step to the dynamics,
recommended use is to append to an existing MD ExplicitIntegrator
Params:
deltat_max (float): maximum time step
N_min (int): see ref
f_inc (float): see ref
f_dec (float): see ref
alpha_start (float): see ref
f_alpha (float): see ref
grad_key (str): key to pull gradient from state (default 'potential_gradient')
References:
doi 10.1103/PhysRevLett.97.170201
"""
h5_keys = []
h5_shapes = []
h5_types = []
def __init__(self,
deltat_max,
N_min=5,
f_inc=1.1,
f_dec=0.5,
alpha_start=0.1,
f_alpha=0.99,
grad_key='potential_gradient',
):
self.deltat_max = deltat_max
self.N_min = N_min
self.f_inc = f_inc
self.f_dec = f_dec
self.alpha_start = alpha_start
self.f_alpha = f_alpha
self.grad_key = grad_key
self.P = None
self.Ppos_nstep = 0
self.alpha = alpha_start
self.state_update = {}
def update(self, step_length, state):
self.P = - np.sum(state[self.grad_key] * state['V'])
if self.P > 0.0:
self.state_update = {
'V': (1.0 - self.alpha) * state['V'] + self.alpha * state[self.grad_key] / np.linalg.norm(state[self.grad_key]) * np.abs(state['V'])
}
self.Ppos_nstep += 1
if self.Ppos_nstep > self.N_min:
self.state_update['dt'] = np.min(state['dt'] * self.f_inc, self.deltat_max)
self.alpha = self.alpha * f_alpha
else:
self.state_update = {
'V' : np.zeros_like(state['V']),
'dt' : state['dt'] * self.f_dec,
}
self.alpha = self.alpha_start
self.Ppos_nstep = 0
return self.state_update
```
|
{
"source": "jefpadfi/pdfkivygui",
"score": 3
}
|
#### File: pdfkivygui/examples/demo.py
```python
from kivy.app import App
from pdfkivygui.pdfkivygui import Graph, BarGraph
from kivy.uix.label import Label
import pandas as pd
test_time = ["8:00", "9:00", "10:30", "11:00", "12:00", "1:00", "2:00", "3:00", "4:00"]
test_dollars = [6.57, 105.92, 5.00, 9.55, 10.25, 11.55, 4.15, 2.36, .30]
pandas_data = {"y": test_dollars, "x": test_time}
df = pd.DataFrame(pandas_data)
class Example(App):
def build(self):
graph_test = Graph()
graph_test.draw(df)
# bar_test = BarGraph()
# bar_test.x_tick_labels = test_time
graph_test.draw(df)
return graph_test
if __name__ == "__main__":
Example().run()
```
|
{
"source": "jefpadfi/pytm",
"score": 3
}
|
#### File: pytm/pytm/client.py
```python
import requests
from requests.compat import urljoin
from api.events import Events
from api.venues import Venues
import json
class PYTMClient(object):
def __init__(self, api_key):
self.api_key = api_key
self._events = None
self._venues = None
@property
def tm_events(self):
if not self._events:
self._events = Events(self.api_key)
return self._events
@property
def tm_venues(self):
if not self._venues:
self._venues = Venues(self.api_key)
return self._venues
if __name__ == '__main__':
a = PYTMClient('')
b = a.tm_events.get_event_list()
for x in b['_embedded']['events']:
print(x['name'])
v_info = a.tm_venues.get_venue(id='KovZpZAFaJeA')
for x in v_info['_embedded']['venues']:
print(x['name'])
```
|
{
"source": "jefp/amazon-connect-power-dialer",
"score": 2
}
|
#### File: amazon-connect-power-dialer/PowerDialer-getAvailAgents/lambda_function.py
```python
import json
import boto3
import os
def lambda_handler(event, context):
print(event)
CONNECT_INSTANCE_ID=event['params']['connectid']
CONNECT_QUEUE_ID=event['params']['queue']
connect_client = boto3.client('connect')
response = connect_client.get_current_metric_data(
InstanceId=CONNECT_INSTANCE_ID,
Filters={
'Queues': [
CONNECT_QUEUE_ID,
],
'Channels': [
'VOICE',
]
},
CurrentMetrics=[
{
'Name': 'AGENTS_AVAILABLE',
'Unit': 'COUNT'
},
],
)
print("Available Agents Metriics :" + str(response['MetricResults']))
if(response['MetricResults']):availAgents = int(response['MetricResults'][0]['Collections'][0]['Value'])
else: availAgents =0
return {"availAgents":availAgents}
```
#### File: amazon-connect-power-dialer/PowerDialer-SaveResults/lambda_function.py
```python
import json
import boto3
import os
from powerdialer import get_config
from boto3.dynamodb.conditions import Key
def lambda_handler(event, context):
s3_client = boto3.client('s3')
dynamodb = boto3.client('dynamodb')
print(event)
configTable = event["config"]["dialerconfigtable"]
dialerList = get_config('table-dialerlist', configTable)
bucket = get_config('iobucket', configTable)
response = dynamodb.scan(
TableName=dialerList,
Select='ALL_ATTRIBUTES')
data = response['Items']
while 'LastEvaluatedKey' in response:
response = dynamodb.scan(
TableName=dialerList,
Select='ALL_ATTRIBUTES',
ExclusiveStartKey=response['LastEvaluatedKey'])
data.extend(response['Items'])
datajson = json.dumps(data, ensure_ascii=False)
response = s3_client.put_object(Body=datajson,
Bucket=bucket,
Key='results/dialingResults.json',
ACL="bucket-owner-full-control")
return response
```
|
{
"source": "jefp/aws-cdr-mgmt-frontend-web",
"score": 2
}
|
#### File: aws-cdr-mgmt-frontend-web/functions/process-request.py
```python
import json
import boto3
import os
import uuid
import datetime as dt
from dateutil.tz import gettz
import time
import decimal
from boto3.dynamodb.conditions import Key, Attr
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['JOBS_TABLE'])
details_table = dynamodb.Table(os.environ['JOBS_DETAILS_TABLE'])
restoration_table = dynamodb.Table(os.environ['RESTORATION_TABLE'])
s3_client = boto3.client('s3')
def next_day(last):
return last+dt.timedelta(days=1)
def send_to_pack(id,userId):
sqs_client = boto3.client('sqs')
sqs_payload = json.dumps({
"id": id
})
sqs_client.send_message(QueueUrl=os.environ['SQS_PACK_URL'], MessageBody=sqs_payload)
table = dynamodb.Table(os.environ['JOBS_TABLE'])
table.update_item(
Key={'id': id, 'userId': userId},
UpdateExpression="set jobStatus = :val, lastUpdated = :val2",
ExpressionAttributeValues={
':val': 'sendingToPack',
':val2': str(dt.datetime.now())
}
)
def audit(file,id):
item = {
'jobId': id,
'file': file['Key'],
'fileStatus': file['StorageClass'],
'createdAt': str(dt.datetime.now())
}
if 'glacierStatus' in file:
item['glacierStatus']=file['glacierStatus']
details_table.put_item(Item=item)
def restore(file,id,userId):
fl = {}
fl['total']=1
fl['glacier']=0
if (file['StorageClass'] == 'DEEP_ARCHIVE' ) or (file['StorageClass'] == 'GLACIER' ):
response = s3_client.head_object(
Bucket=os.environ['CDR_BUCKET'],
Key=file['Key']
)
if 'Restore' in response:
file['glacierStatus']=response['Restore']
if response['Restore'].find('ongoing-request="false"') != -1:
copy_source = {
'Bucket': os.environ['CDR_BUCKET'],
'Key': file['Key']
}
s3_client.copy_object(
Bucket=os.environ['RESULT_BUCKET'],
CopySource=copy_source,
TaggingDirective='REPLACE',
Tagging="Type=Temp",
Key='private/'+id+'/'+file['Key']
)
else:
item = {
'reqId': file['Key'],
'reqId2': id,
'createdAt': str(dt.datetime.now()),
'userId': userId,
'ttl': int(time.time())+86400
}
restoration_table.put_item(Item=item)
fl['glacier']=1
else:
s3_client.restore_object(
Bucket=os.environ['CDR_BUCKET'],
Key=file['Key'],
RestoreRequest={'Days': 7, 'GlacierJobParameters': {'Tier': os.environ['RESTAURATION_TYPE']}}
)
item = {
'reqId': file['Key'],
'reqId2': id,
'createdAt': str(dt.datetime.now()),
'userId': userId,
'ttl': int(time.time())+86400
}
restoration_table.put_item(Item=item)
fl['glacier']=1
else:
copy_source = {
'Bucket': os.environ['CDR_BUCKET'],
'Key': file['Key']
}
s3_client.copy_object(
Bucket=os.environ['RESULT_BUCKET'],
CopySource=copy_source,
TaggingDirective='REPLACE',
Tagging="Type=Temp",
Key='private/'+id+'/'+file['Key']
)
audit(file,id)
return fl
def recover(d,id,userId):
fl = {}
fl['total']=0
fl['glacier']=0
finished=False
response = s3_client.list_objects_v2(
Bucket=os.environ['CDR_BUCKET'],
Prefix=str(d.year)+'/'+ str(d.month)+'/'+ str(d.day)+'/'
)
if (response['KeyCount']>0):
files = response['Contents']
for file in files:
flt=restore(file,id,userId)
fl['glacier']+=flt['glacier']
fl['total']+=flt['total']
finished = not response['IsTruncated']
if not finished:
next_token= response['NextContinuationToken']
while finished == False:
response = s3_client.list_objects_v2(
Bucket=os.environ['CDR_BUCKET'],
Prefix = str(d.year)+'/'+ str(d.month)+'/'+ str(d.day)+'/',
ContinuationToken = next_token
)
files = response['Contents']
for file in files:
audit(file,id)
flt=restore(file,id,userId)
fl['glacier']+=flt['glacier']
fl['total']+=flt['total']
finished = not response['IsTruncated']
if not finished:
next_token= response['NextContinuationToken']
return fl
def handler(event, context):
response = {}
id=event['id']
userId=event['userId']
response = table.query(
KeyConditionExpression=Key('id').eq(id) & Key('userId').eq(userId)
)
fr=dt.datetime.strptime(str(event['from']), '%Y-%m-%d')
to = dt.datetime.strptime(str(event['to']), '%Y-%m-%d')
current_glacier = 0
try:
current_glacier=int(str(response['Items'][0]['totalInGlacier']))
except:
table.update_item(
Key={'id': event['id'], 'userId': event['userId']},
UpdateExpression="set totalInGlacier = :val, totalFiles = :val2",
ExpressionAttributeValues={
':val': 0,
':val2': 0
})
print("first iteration")
table.update_item(
Key={'id': event['id'], 'userId': event['userId']},
UpdateExpression="set jobStatus = :val, lastUpdated = :val2, iteration = :val3",
ExpressionAttributeValues={
':val': 'processing',
':val2': str(dt.datetime.now()),
':val3': str(response['Items'][0]['from'])
}
)
#Empezar 1 hora antes
it=next_day(fr)
fl={}
fl['total']=0
fl['glacier']=0
try:
# while (it <= to):
flt=recover(it,id,userId)
fl['glacier']+=flt['glacier']
fl['total']+=flt['total']
table.update_item(
Key={'id': event['id'], 'userId': event['userId']},
ReturnValues="UPDATED_NEW",
UpdateExpression="set totalInGlacier = totalInGlacier + :val",
ExpressionAttributeValues={
':val': decimal.Decimal(flt['glacier'])
}
)
table.update_item(
Key={'id': event['id'], 'userId': event['userId']},
ReturnValues="UPDATED_NEW",
UpdateExpression="set totalFiles = totalFiles + :val",
ExpressionAttributeValues={
':val': decimal.Decimal(flt['total'])
}
)
#table.update_item(
# Key={'id': event['id'], 'userId': event['userId']},
# UpdateExpression="set totalFiles = :val",
# ExpressionAttributeValues={
# ':val': fl['total']
# })
if it==to and current_glacier == 0:
send_to_pack(id,userId)
except Exception as e:
table.update_item(
Key={'id': event['id'], 'userId': event['userId']},
UpdateExpression="set jobStatus = :val, lastUpdated = :val2, jobStatusDescription = :val3",
ExpressionAttributeValues={
':val': 'error',
':val2': str(dt.datetime.now()),
':val3': str(e)
})
raise e
response = {
"statusCode": 200,
"id": event['id'],
"from": it.strftime("%Y-%m-%d"),
"to": event['to'],
"filter": event['filter'],
"userId": event['userId'],
"continue": str(event['to']!=it.strftime("%Y-%m-%d"))
}
return response
```
#### File: aws-cdr-mgmt-frontend-web/functions/receiver.py
```python
import json
import boto3
import os
import uuid
import datetime as dt
from dateutil.tz import gettz
dynamodb = boto3.resource('dynamodb')
def handler(event, context):
response = {}
steps = boto3.client('stepfunctions')
try:
table = dynamodb.Table(os.environ['JOBS_TABLE'])
for record in event.get('Records'):
if record.get('eventName') in ('INSERT'):
record = record['dynamodb']
fr = record['NewImage']['from']['S']
to = record['NewImage']['to']['S']
filter = record['NewImage']['filter']['S']
id = record['NewImage']['id']['S']
userId= record['NewImage']['userId']['S']
item = {
'id': id,
'from': fr,
'to': to,
'filter': filter,
'userId': userId
}
response_s = steps.start_execution(
stateMachineArn=os.environ['STEPS_FUNC'],
name= id,
input = json.dumps(item)
)
table.update_item(
Key={'id': id, 'userId': userId},
UpdateExpression="set stepFunctionARN = :val",
ExpressionAttributeValues={
':val': response_s['executionArn']
}
)
response = {
"statusCode": 200
}
except Exception as e:
response = {
"statusCode": 403,
"error": str(e)
}
return response
```
|
{
"source": "jefp/aws-controltower-config-aggregator-notifier",
"score": 2
}
|
#### File: aws-controltower-config-aggregator-notifier/src/notify_config.py
```python
import json
import boto3
import os
from contextlib import closing
from tempfile import gettempdir
import datetime as dt
import re
from dateutil.tz import gettz
from boto3.dynamodb.conditions import Key, Attr
import botocore
dynamodb = boto3.resource('dynamodb')
def get_assume_role_credentials(role_arn):
sts_client = boto3.client('sts')
try:
assume_role_response = sts_client.assume_role(RoleArn=role_arn, RoleSessionName="AuditSESLambdaExecution")
return assume_role_response['Credentials']
except botocore.exceptions.ClientError as ex:
# Scrub error message for any internal account info leaks
print(str(ex))
if 'AccessDenied' in ex.response['Error']['Code']:
ex.response['Error']['Message'] = "AWS Lambda does not have permission to assume the IAM role."
else:
ex.response['Error']['Message'] = "InternalError"
ex.response['Error']['Code'] = "InternalError"
raise ex
def get_client(service, role_arn, assume_role):
"""Return the service boto client. It should be used instead of directly calling the client.
Keyword arguments:
service -- the service name used for calling the boto.client()
event -- the event variable given in the lambda handler
"""
if not assume_role:
return boto3.client(service)
credentials = get_assume_role_credentials(role_arn)
return boto3.client(service, aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken']
)
def get_all_tags(client, account_id):
list_to_return = []
list = client.list_tags_for_resource(ResourceId=account_id)
while True:
for tags in list['Tags']:
list_to_return.append(tags)
if 'Marker' in list:
next_marker = list['Marker']
list = client.list_tags_for_resource(ResourceId=account_id, fMarker=next_marker)
else:
break
return list_to_return
def get_tags(account_id):
organization_client = get_client('organizations', os.environ['MASTER_ROLE_ARN'],True)
account_tags = get_all_tags(organization_client, account_id)
result = {}
for tag in account_tags:
result[tag['Key']]=tag['Value']
return result
def send_email(rule_config,account_tags,details):
print(rule_config)
print(account_tags)
print(details)
if ( rule_config['NotificationEnabled' ] == False):
print("Email not sent by configuration")
return
mail_config = rule_config
lst = ['PrimaryOwner', 'GroupOwner', 'SecurityOwner','OperationOwner']
for config in lst:
if config in account_tags:
mail_config[config] = account_tags[config]
for config in lst:
if rule_config[config] is not None or rule_config[config] != "None" :
mail_config[config] = rule_config[config]
title = "Evaluación de política de compliance sobre recurso de AWS"
preheader = "{} - {}".format(details['resourceId'],details['newEvaluationResult']['complianceType'])
bg_color = "#990000"
if ('newEvaluationResult' in details and \
'complianceType' in details['newEvaluationResult'] and \
details['newEvaluationResult']['complianceType'] == "COMPLIANT"):
bg_color = "#007f00"
ses_client = get_client('ses', None,False)
template_data = '"awsAccountId":"{}",\
"awsRegion":"{}",\
"resourceType":"{}",\
"resourceId":"{}",\
"configRuleName":"{}",\
"complianceType":"{}",\
"configRuleInvokedTime":"{}",\
"resultRecordedTime":"{}",\
"notificationCreationTime":"{}",\
"MORE_INFO": "{}",\
"COMPANY": "{}",\
"SRC_LOGO": "{}",\
"PREHEADER": "{}",\
"TITLE": "{}",\
"BG_COLOR": "{}"'.format(
details['awsAccountId'],
details['awsRegion'],
details['resourceType'],
details['resourceId'],
details['configRuleName'],
details['newEvaluationResult']['complianceType'],
details['newEvaluationResult']['configRuleInvokedTime'],
details['newEvaluationResult']['resultRecordedTime'],
details['notificationCreationTime'],
os.environ['MORE_INFO'],
os.environ['COMPANY'],
os.environ['SRC_LOGO'],
preheader,
title,
bg_color
)
if re.match(r"[^@]+@[^@]+\.[^@]+", mail_config['PrimaryOwner']):
print("Primary owner {} is valid".format(mail_config['PrimaryOwner']))
else:
print("Email not sent. PrimaryOwner is not valid")
return
if re.match(r"[^@]+@[^@]+\.[^@]+", mail_config['GroupOwner']):
print("Group owner {} is valid".format(mail_config['GroupOwner']))
else:
print("Email not sent. GroupOwner is not valid")
return
response = ses_client.send_templated_email(
Source=os.environ['SES_EMAIL_SENDER'],
Destination={
'ToAddresses': [
mail_config['PrimaryOwner'],
],
'CcAddresses': [
mail_config['GroupOwner'],
]
},
ReplyToAddresses=[
os.environ['SES_EMAIL_REPLY_TO'],
],
Template=os.environ['SES_TEMPLATE_NAME'],
TemplateData='{'+template_data+'}',
ConfigurationSetName=os.environ['SES_CONFIGURATION_SET']
)
return response
def get_config(rule):
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
dresponse = table.query(
KeyConditionExpression=Key('id').eq('DefaultConfig')
)
config = { 'NotificationEnabled': False,
'PrimaryOwner': None,
'GroupOwner': None,
'SecurityOwner': None,
'OperationOwner': None
}
if (len(dresponse['Items']) == 1):
for k in dresponse['Items'][0]:
config[k]=dresponse['Items'][0][k]
#get key id for current rule
rule_response = table.scan()
for rule_id in rule_response['Items']:
if rule.startswith( rule_id['id'] ):
for k in rule_id:
config[k]=rule_id[k]
break
return config
def lambda_handler(event, context):
for i in range(len(event['Records'])):
message = event['Records'][i]['Sns']['Message']
messageJ = json.loads(message)
details = messageJ['detail']
rule_config = get_config(details['configRuleName'])
account_tags = get_tags(details['awsAccountId'])
send_email(rule_config,account_tags,details)
return
```
|
{
"source": "jefperito/mtgz",
"score": 3
}
|
#### File: jefperito/mtgz/main.py
```python
import re
import json
import argparse
from mtgz.colors import ColoredManaSymbol
from mtgz.search_engine import SearchEngine
from mtgz.services import DBUploader
def console():
arguments_parse = argparse.ArgumentParser(description='Magic: The Gathering Search Engine')
arguments_parse.add_argument('--name', metavar = '-n', nargs='*', help='look up for card\'s name')
arguments_parse.add_argument('--type', nargs='*', help='look up for card\'s type')
arguments_parse.add_argument('--text', nargs='*', help='look up for card\'s text')
arguments_parse.add_argument('--cmc', nargs='?', help='look up for card\'s cmc')
arguments_parse.add_argument('--rarity', nargs='?', help='look up for card\'s rarity')
arguments_parse.add_argument('--color', nargs='?', help='look up for card\'s color identity')
arguments_parse.add_argument('--upgrade', action='store_true', help='upgrade database')
return arguments_parse.parse_args()
def print_card(card):
painter = ColoredManaSymbol()
print('{0} {1}'.format(card['name'], (painter.color(card['manaCost']) if 'manaCost' in card else '')))
print('{0} {1}\n'.format(card['type'], '({0}/{1})'.format(card['power'], card['toughness']) if 'power' in card else ''))
if 'text' in card:
text = card['text']
for manacost in re.findall('{\w}|{\w/\w}', text):
text = text.replace(manacost, painter.color(manacost))
print(text)
print('------------------------------------------------')
def main():
DBUploader().upgrade()
arguments = console()
# mapped by card name
# TODO 'pickle' me
search_engine = SearchEngine(json.loads(open('AllCards.json').read()))
if arguments.name is not None:
search_engine.find_by('name', ' '.join(arguments.name))
if arguments.type is not None:
search_engine.find_by('type', ' '.join(arguments.type))
if arguments.text is not None:
search_engine.find_by('text', ' '.join(arguments.text))
if arguments.cmc is not None:
search_engine.find_by('cmc', arguments.cmc)
if arguments.color is not None:
search_engine.find_by('color', arguments.color)
if arguments.rarity is not None:
search_engine.find_by('rarity', arguments.rarity)
filtered_cards = search_engine.filter()
for card in filtered_cards: print_card(card)
print('\n{0} cards found'.format(len(filtered_cards)))
if __name__ == '__main__':
main()
```
#### File: mtgz/tests/search_engine_test.py
```python
import unittest
from mtgz.search_engine import SearchEngine
class TestSearchEngine(unittest.TestCase):
def setUp(self):
SearchEngine.find_by_arguments = {}
def test_find_by_with_one_parameters(self):
search_engine = SearchEngine()
search_engine.find_by('name', 'Black Lotus')
self.assertEqual({'name': 'Black Lotus'}, search_engine.find_by_arguments)
def test_find_by_with_many_parameters(self):
search_engine = SearchEngine()
search_engine.find_by('name', 'Black Lotus')
search_engine.find_by('type', 'Artifact')
search_engine.find_by('manaCost', '0')
self.assertEqual({'manaCost': '0', 'type': 'Artifact', 'name': 'Black Lotus'}, search_engine.find_by_arguments)
def test_filter_cards_by_name(self):
search_engine = SearchEngine(
{'Black Lotus': {
'name': 'Black Lotus'
},
'Ancestral Vision': {
'name': 'Ancestral Vision'
}
})
self.assertEqual([{'name': 'Black Lotus'}], search_engine.find_by('name', 'Black Lotus').filter())
def test_filter_cards_by_part_of_name(self):
search_engine = SearchEngine(
{'Black Lotus': {
'name': 'Black Lotus'
},
'Black Vise': {
'name': 'Black Vise'
},
'Thraximundar': {
'name': 'Thraximundar'
},
'Bringer of the Black Dawn': {
'name': 'Bringer of the Black Dawn'
}
})
filtered_cards = search_engine.find_by('name', 'Black').filter()
self.assertEqual(3, len(filtered_cards))
for card in filtered_cards:
self.assertTrue(card['name'] in ['Bringer of the Black Dawn', 'Black Vise', 'Black Lotus'])
def test_filter_card_no_cares_with_case_sensitive(self):
search_engine = SearchEngine(
{'Black Lotus': {
'name': '<NAME>',
'type': 'Artifact'
},
'Thraximundar': {
'name': 'Thraximundar',
'type': 'Legendary Creature - Zombie Assassin',
'power': 7,
'toughness': 7
}
})
filtered_cards = search_engine.find_by('type', 'artifact').filter()
self.assertEqual(1, len(filtered_cards))
self.assertEqual('Black Lotus', filtered_cards[0]['name'])
def test_filter_card_by_multiple_parameters(self):
search_engine = SearchEngine(
{'Black Lotus': {
'name': '<NAME>',
'type': 'Artifact',
'text': '{T}, Sacrifice Black Lotus: Add three mana of any one color to your mana pool.'
},
'Black Vise': {
'name': '<NAME>',
'type': 'Artifact',
'text': 'As Black Vise enters the battlefield, choose an opponent.'
},
'Black Knight': {
'name': '<NAME>',
'type': 'Creature - Human Knight',
'text': 'First strike\nProtection from white'
}
})
filtered_cards = search_engine.find_by('name', 'black').find_by('type', 'artifact').filter()
self.assertEqual(2, len(filtered_cards))
for card in filtered_cards:
self.assertTrue(card['name'] in ['Black Vise', 'Black Lotus'])
def test_filter_card_by_text(self):
search_engine = SearchEngine(
{'Black Lotus': {
'name': '<NAME>',
'type': 'Artifact',
'text': '{T}, Sacrifice Black Lotus: Add three mana of any one color to your mana pool.'
},
'Black Vise': {
'name': '<NAME>',
'type': 'Artifact',
'text': 'As Black Vise enters the battlefield, choose an opponent.'
},
'Black Knight': {
'name': '<NAME>',
'type': 'Creature - Human Knight',
'text': 'First strike\nProtection from white'
}
})
filtered_cards = search_engine.find_by('text', 'black').filter()
self.assertEqual(2, len(filtered_cards))
for card in filtered_cards:
self.assertTrue(card['name'] in ['Black Vise', 'Black Lotus'])
def test_filter_card_by_cmc(self):
search_engine = SearchEngine(
{'<NAME>': {
'name': '<NAME>',
'type': 'Artifact',
'text': '{T}, Sacrifice Black Lotus: Add three mana of any one color to your mana pool.',
'cmc': '0'
},
'<NAME>': {
'name': '<NAME>',
'type': 'Artifact',
'text': 'As Black Vise enters the battlefield, choose an opponent.',
'cmc': '2'
},
'Black Knight': {
'name': '<NAME>',
'type': 'Creature - Human Knight',
'text': 'First strike\nProtection from white',
'cmc': '2'
}
})
filtered_cards = search_engine.find_by('cmc', '2').filter()
self.assertEqual(2, len(filtered_cards))
for card in filtered_cards:
self.assertTrue(card['name'] in ['Black Vise', 'Black Knight'])
def test_filter_card_by_color_identity(self):
search_engine = SearchEngine(
{'<NAME>us': {
'name': '<NAME>',
'type': 'Artifact',
'text': '{T}, Sacrifice Black Lotus: Add three mana of any one color to your mana pool.',
'cmc': '0'
},
'Black Vise': {
'name': '<NAME>',
'type': 'Artifact',
'text': 'As Black Vise enters the battlefield, choose an opponent.',
'cmc': '2'
},
'Black Knight': {
'name': '<NAME>',
'type': 'Creature - Human Knight',
'text': 'First strike\nProtection from white',
'cmc': '2',
'colorIdentity': ['B']
},
'Thraximundar': {
'name': 'Thraximundar',
'type': 'Legendary Creature - Zombie Assassin',
'text': 'Whenever Thraximundar attacks, defending player sacrifices a creature.Whenever a player sacrifices a creature, you may put a +1/+1 counter on Thraximundar.',
'cmc': '7',
'colorIdentity': ['U', 'B', 'R']
}
})
filtered_cards = search_engine.find_by('color', 'b').filter()
self.assertEqual(2, len(filtered_cards))
for card in filtered_cards:
self.assertTrue(card['name'] in ['Thraximundar', 'Black Knight'])
def test_filter_incolor_card(self):
search_engine = SearchEngine(
{'<NAME>us': {
'name': '<NAME>',
'type': 'Artifact',
'text': '{T}, Sacrifice Black Lotus: Add three mana of any one color to your mana pool.',
'cmc': '0'
},
'Black Vise': {
'name': '<NAME>',
'type': 'Artifact',
'text': 'As Black Vise enters the battlefield, choose an opponent.',
'cmc': '2'
},
'Black Knight': {
'name': '<NAME>',
'type': 'Creature - Human Knight',
'text': 'First strike\nProtection from white',
'cmc': '2',
'colorIdentity': ['B']
},
'Thraximundar': {
'name': 'Thraximundar',
'type': 'Legendary Creature - Zombie Assassin',
'text': 'Whenever Thraximundar attacks, defending player sacrifices a creature.Whenever a player sacrifices a creature, you may put a +1/+1 counter on Thraximundar.',
'cmc': '7',
'colorIdentity': ['U', 'B', 'R']
}
})
filtered_cards = search_engine.find_by('color', 'c').filter()
self.assertEqual(2, len(filtered_cards))
for card in filtered_cards:
self.assertTrue(card['name'] in ['Black Lotus', 'Black Vise'])
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jefp/serverless-demo",
"score": 2
}
|
#### File: serverless-demo/functions/process.py
```python
import json
import boto3
import os
from contextlib import closing
from tempfile import gettempdir
import datetime as dt
from dateutil.tz import gettz
dynamodb = boto3.resource('dynamodb')
def ssml(device,severity):
return "<speak>New "+severity+" alarm on device"+ device +"</speak>"
def handler(event, context):
for i in range(len(event['Records'])):
message = event['Records'][i]['Sns']['Message']
messageID = event['Records'][i]['Sns']['MessageId']
polly = boto3.client('polly')
s3 = boto3.client('s3')
print(json.loads(message)['device'])
response = polly.synthesize_speech(
OutputFormat='mp3',
Text = ssml(json.loads(message)['device'],json.loads(message)['severity']),
VoiceId = 'Joanna',
TextType='ssml'
)
if "AudioStream" in response:
with closing(response["AudioStream"]) as stream:
output = os.path.join(gettempdir(), messageID )
try:
with open(output, "wb") as file:
file.write(stream.read())
except IOError as error:
print(error)
sys.exit(-1)
table = dynamodb.Table(os.environ['DYNAMO_TABLE'])
table.update_item(
Key={'MessageId': messageID},
UpdateExpression="set VoiceAlarmCreatedAt = :val",
ExpressionAttributeValues={
':val': str(dt.datetime.now())
}
)
s3.upload_file('/tmp/' + messageID, os.environ['S3'], messageID + ".mp3")
return
```
|
{
"source": "jefraim/mqtt_sub_handler",
"score": 2
}
|
#### File: mqtt_sub_handler/tests/test_configuration.py
```python
import pytest
from mqtt_sub_handler.configuration import SubscriptionsConfiguration, ApplicationConfiguration
def test_get_but_subscription_file_does_not_exist():
with pytest.raises(FileNotFoundError):
SubscriptionsConfiguration('subscriptions_not_exist.ini')
def test_subscriptions_get_all_subscriptions():
subscriptions = SubscriptionsConfiguration('./tests/subscriptions.ini')
data = subscriptions.get_all_subscriptions()
assert len(data) == 3
assert data[0].Topic == 'topic/test/one'
assert data[0].QOS == '0'
assert data[0].ActionType == 'command'
assert data[0].Action == 'gammu-smsd-inject TEXT 123456 -text "All your base are belong to us one"'
assert len(data[0].ParameterFields) == 0
assert data[1].Topic == 'topic/test/two'
assert data[1].QOS == '1'
assert data[1].ActionType == 'command'
assert data[1].Action == 'gammu-smsd-inject TEXT 123456 -text "All your base are belong to us two"'
assert len(data[1].ParameterFields) == 0
assert data[2].Topic == 'topic/test/three'
assert data[2].QOS == '2'
assert data[2].ActionType == 'command'
assert data[2].Action == 'gammu-smsd-inject TEXT {} -text "{}"'
assert len(data[2].ParameterFields) == 2
assert data[2].ParameterFields[0] == "from"
assert data[2].ParameterFields[1] == "message"
def test_subscriptions_get_all_topics():
subscriptions = SubscriptionsConfiguration('./tests/subscriptions.ini')
data = subscriptions.get_all_topics()
assert len(data) == 3
assert data[0][0] == 'topic/test/one'
assert data[0][1] == '0'
assert data[1][0] == 'topic/test/two'
assert data[1][1] == '1'
assert data[2][0] == 'topic/test/three'
assert data[2][1] == '2'
def test_get_topic_subscriptions():
subscriptions = SubscriptionsConfiguration('./tests/subscriptions.ini')
data = subscriptions.get_topic_subscriptions('topic/test/one')
assert len(data) == 1
assert data[0].Topic == 'topic/test/one'
assert data[0].QOS == '0'
assert data[0].ActionType == 'command'
assert data[0].Action == 'gammu-smsd-inject TEXT 123456 -text "All your base are belong to us one"'
def test_subscriptions_get_all_subscriptions():
subscriptions = SubscriptionsConfiguration('./tests/subscriptions2.ini')
data = subscriptions.get_topic_subscriptions('topic/test/one')
assert len(data) == 2
assert data[0].Topic == 'topic/test/one'
assert data[0].QOS == '0'
assert data[0].ActionType == 'command'
assert data[0].Action == 'gammu-smsd-inject TEXT 123456 -text "All your base are belong to us one"'
assert data[1].Topic == 'topic/test/one'
assert data[1].QOS == '1'
assert data[1].ActionType == 'command'
assert data[1].Action == 'gammu-smsd-inject TEXT 123456 -text "All your base are belong to us two"'
def test_app_get():
app = ApplicationConfiguration('tests/config.ini')
assert app.get('mqtt_host') == 'localhost'
# if __name__ == '__main__':
# unittest.main()
```
|
{
"source": "JEfrainHV/Payo_bot",
"score": 2
}
|
#### File: JEfrainHV/Payo_bot/pybot.py
```python
from urllib.request import urlopen
from telegram import Update
from telegram.ext import Updater, CommandHandler, CallbackContext, ConversationHandler, Filters, MessageHandler, \
CallbackQueryHandler
from telegram import InlineKeyboardMarkup, InlineKeyboardButton, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply, \
BotCommand,bot,Bot
from datetime import datetime, timedelta, date, time
import json, urllib
import requests
INPUT_TEXT = 0
def start(update, context):
boton1 = InlineKeyboardButton(
text='Rashid',
callback_data='rashid')
boton2 = InlineKeyboardButton(
text='Share Exp',
callback_data='share')
boton3 = InlineKeyboardButton(
text='Imbuements',
callback_data='imbuements')
boton4 = InlineKeyboardButton(
text='1',
url='www.tibia.com')
boton5 = InlineKeyboardButton(
text='2',
url='www.tibia.com')
boton6 = InlineKeyboardButton(
text='3',
url='www.tibia.com')
# update.message.reply_text(f'Bienvenido Humano')
update.message.reply_text(
text='Selecciona una Opcion',
reply_markup=InlineKeyboardMarkup([
[boton1], [boton2],
[boton3], [boton4],
[boton5], [boton6]
])
)
updater = Updater('1763562323:AAG2vkKIJXGMBo6koOWyaqQTbZ2ZCCWaeXk')
def guilds(update, context):
update.message.reply_text('Probando funcionalidad del codigo QR')
def rashid(update: Update, context: CallbackContext) -> None:
#update.message.reply_text('Con esto veremos donde esta Rashid el dia de hoy')
context.bot.send_message(chat_id=update.effective_chat.id, text="Con esto veremos donde esta Rashid el dia de hoy")
hoy = datetime.today()
#print (hoy)
ayer = hoy - timedelta(hours=3)
#print( ayer)
hoy=hoy.strftime("%Y-%m-%d %H:%M:%S.%f")
day = date.weekday(ayer)
dia = day
#print("el dia es: " + str(dia))
if dia == 0:
#print('hoy es Lunes, Rashid está en Svargrond')
#update.message.reply_text('hoy es Lunes, Rashid está en Svargrond')
context.bot.send_message(chat_id=update.effective_chat.id, text="hoy es Lunes, Rashid está en Svargrond")
elif dia == 1:
#print('hoy es Martes, Rashid está en Liberty Bay')
#update.message.reply_text('hoy es Martes, Rashid está en Liberty Bay')
context.bot.send_message(chat_id=update.effective_chat.id, text="hoy es Martes, Rashid está en Liberty Bay")
elif dia == 2:
#print('hoy es Miercoles, Rashid está en Port Hope')
#update.message.reply_text('hoy es Miercoles, Rashid está en Port Hope')
context.bot.send_message(chat_id=update.effective_chat.id, text="hoy es Miercoles, Rashid está en Port Hope")
elif dia == 3:
#print('hoy es Jueves, Rashid está en Ankrahmun')
#update.message.reply_text('hoy es Jueves, Rashid está en Ankrahmun')
context.bot.send_message(chat_id=update.effective_chat.id, text="hoy es Jueves, Rashid está en Ankrahmun")
elif dia == 4:
#print('hoy es Viernes, Rashid está en Darashia')
#update.message.reply_text('hoy es Viernes, Rashid está en Darashia')
context.bot.send_message(chat_id=update.effective_chat.id, text="hoy es Viernes, Rashid está en Darashia")
elif dia == 5:
#print('hoy es Sabado, Rashid está en Edron')
#update.message.reply_text('hoy es Sabado, Rashid está en Edron')
context.bot.send_message(chat_id=update.effective_chat.id, text="hoy es Sabado, Rashid está en Edron")
elif dia == 6:
#print('hoy es Domingo, Rashid está en Carlin')
#update.message.reply_text('hoy es Domingo, Rashid está en Carlin')
context.bot.send_message(chat_id=update.effective_chat.id, text="hoy es Domingo, Rashid está en Carlin")
else:
print('error')
def rashid_callback(update, context):
query = update.callback_query
query.answer()
hoy = datetime.today()
# print (hoy)
ayer = hoy - timedelta(hours=3)
# print( ayer)
# hoy=hoy.strftime("%Y-%m-%d %H:%M:%S.%f")
day = date.weekday(ayer)
dia = day
# print("el dia es: " + str(dia))
if dia == 0:
# print('hoy es Lunes, Rashid está en Svargrond')
# update.message.reply_text('hoy es Lunes, Rashid está en Svargrond')
context.bot.send_message(chat_id=update.effective_chat.id, text="hoy es Lunes, Rashid está en Svargrond")
elif dia == 1:
# print('hoy es Martes, Rashid está en Liberty Bay')
# update.message.reply_text('hoy es Martes, Rashid está en Liberty Bay')
context.bot.send_message(chat_id=update.effective_chat.id, text="hoy es Martes, Rashid está en Liberty Bay")
elif dia == 2:
# print('hoy es Miercoles, Rashid está en Port Hope')
# update.message.reply_text('hoy es Miercoles, Rashid está en Port Hope')
context.bot.send_message(chat_id=update.effective_chat.id, text="hoy es Miercoles, Rashid está en Port Hope")
elif dia == 3:
# print('hoy es Jueves, Rashid está en Ankrahmun')
# update.message.reply_text('hoy es Jueves, Rashid está en Ankrahmun')
context.bot.send_message(chat_id=update.effective_chat.id, text="hoy es Jueves, Rashid está en Ankrahmun")
elif dia == 4:
# print('hoy es Viernes, Rashid está en Darashia')
# update.message.reply_text('hoy es Viernes, Rashid está en Darashia')
context.bot.send_message(chat_id=update.effective_chat.id, text="hoy es Viernes, Rashid está en Darashia")
elif dia == 5:
# print('hoy es Sabado, Rashid está en Edron')
# update.message.reply_text('hoy es Sabado, Rashid está en Edron')
context.bot.send_message(chat_id=update.effective_chat.id, text="hoy es Sabado, Rashid está en Edron")
elif dia == 6:
# print('hoy es Domingo, Rashid está en Carlin')
# update.message.reply_text('hoy es Domingo, Rashid está en Carlin')
context.bot.send_message(chat_id=update.effective_chat.id, text="hoy es Domingo, Rashid está en Carlin")
else:
print('error')
def share(update: Update, context: CallbackContext) -> None:
#update.message.reply_text('Que nivel Eres?')
context.bot.send_message(chat_id=update.effective_chat.id, text="Que Nivel Eres ???")
return INPUT_TEXT
def share_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text(text='Que Nivel Eres ???')
context.bot.send_message(chat_id=update.effective_chat.id, text="Que Nivel Eres ???")
return INPUT_TEXT
def calc_share(update, context):
text = update.message.text
lvl = int(text)
print(lvl)
lvlmax = 0
lvlmin = 0
# update.message.reply_text('Compartes Experiencia con')
context.bot.send_message(chat_id=update.effective_chat.id, text="Compartes Experiencia con :")
lvlmax = (lvl / 2) * 3
lvlmax = round(lvlmax)
lvlmin = (lvl / 3) * 2
lvlmin = round(lvlmin)
context.bot.send_message(chat_id=update.effective_chat.id, text="Nivel Maximo :" + str(lvlmax))
context.bot.send_message(chat_id=update.effective_chat.id, text="Nivel Minimo :" + str(lvlmin))
#update.message.reply_text('Max: ' + str(lvlmax))
#update.message.reply_text('Min: ' + str(lvlmin))
print(lvlmin)
print(lvlmax)
return ConversationHandler.END
def strike(update: Update, context: CallbackContext) -> None:
#update.message.reply_text('20 Protective charms\n25 Sabretooth\n5 Vexclaw Talon')
context.bot.send_message(chat_id=update.effective_chat.id, text="20 Protective charms, 25 Sabretooth, 5 Vexclaw Talon")
def void(update: Update, context: CallbackContext) -> None:
#update.message.reply_text('25 rope belts\n25 silencer claws\n5 Grimeleech Wings')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 rope belts, 25 silencer claws, 5 Grimeleech Wings")
def vampire(update: Update, context: CallbackContext) -> None:
#update.message.reply_text('25 vampire teeth\n15 bloody pincers\n5 piece of death brains ')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 vampire teeth, 15 bloody pincers, 5 piece of death brains ")
def mort(update, context):
#update.message.reply_text('25 Pile of Grave Earth + \n20 Demonic Skeletal Hands + \n5 Petrified Screams')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Pile of Grave Earth, 20 Demonic Skeletal Hands, 5 Petrified Screams")
def mort_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text('25 Pile of Grave Earth + \n20 Demonic Skeletal Hands + \n5 Petrified Screams')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Pile of Grave Earth, 20 Demonic Skeletal Hands, 5 Petrified Screams")
def protec_mort(update, context):
#update.message.reply_text('25 Flask of Embalming Fluid + \n20 Gloom Wolf Furs + \n5 Mystical Hourglasses')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Flask of Embalming Fluid, 20 Gloom Wolf Furs, 5 Mystical Hourglasses")
def protec_mort_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text('25 Flask of Embalming Fluid + \n20 Gloom Wolf Furs + \n5 Mystical Hourglasses')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Flask of Embalming Fluid, 20 Gloom Wolf Furs, 5 Mystical Hourglasses")
def energy(update, context):
#update.message.reply_text('25 Rorc Feathers + 5 Peacock Feather Fans + 1 Energy Vein')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Rorc Feathers + 5 Peacock Feather Fans + 1 Energy Vein")
def energy_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text('25 Rorc Feathers + 5 Peacock Feather Fans + 1 Energy Vein')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Rorc Feathers + 5 Peacock Feather Fans + 1 Energy Vein")
def protec_energy(update, context):
#update.message.reply_text('20 Wyvern Talismans + 15 Crawler Head Platings + 10 Wyrm Scales')
context.bot.send_message(chat_id=update.effective_chat.id, text="20 Wyvern Talismans + 15 Crawler Head Platings + 10 Wyrm Scales")
def protec_energy_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text('20 Wyvern Talismans + 15 Crawler Head Platings + 10 Wyrm Scales')
context.bot.send_message(chat_id=update.effective_chat.id, text="20 Wyvern Talismans + 15 Crawler Head Platings + 10 Wyrm Scales")
def tera(update, context):
#update.message.reply_text('25 Swamp Grasses + 20 Poisonous Slimes + 2 Slime Hearts')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Swamp Grasses + 20 Poisonous Slimes + 2 Slime Hearts")
def tera_callback(update,context):
query = update.callback_query
query.answer()
#query.edit_message_text('25 Swamp Grasses + 20 Poisonous Slimes + 2 Slime Hearts')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Swamp Grasses + 20 Poisonous Slimes + 2 Slime Hearts")
def protec_tera(update, context):
#update.message.reply_text('25 Piece of Swampling Wood + 20 Snake Skins + 10 Brimstone Fangss')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Piece of Swampling Wood + 20 Snake Skins + 10 Brimstone Fangss")
def protec_tera_callback(update,context):
query = update.callback_query
query.answer()
#query.edit_message_text('25 Piece of Swampling Wood + 20 Snake Skins + 10 Brimstone Fangss')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Piece of Swampling Wood + 20 Snake Skins + 10 Brimstone Fangss")
def fire(update, context):
#update.message.reply_text('25 Fiery Hearts + 5 Green Dragon Scales + 5 Demon Horns')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Piece of Swampling Wood + 20 Snake Skins + 10 Brimstone Fangss")
def fire_callback(update,context):
query = update.callback_query
query.answer()
#query.edit_message_text('25 Fiery Hearts + 5 Green Dragon Scales + 5 Demon Horns')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Fiery Hearts + 5 Green Dragon Scales + 5 Demon Horns")
def protec_fire(update, context):
#update.message.reply_text('20 Green Dragon Leathers + 10 Blazing Bones + 5 Draken Sulphurs')
context.bot.send_message(chat_id=update.effective_chat.id,text="20 Green Dragon Leathers + 10 Blazing Bones + 5 Draken Sulphurs")
def protec_fire_callback(update,context):
query = update.callback_query
query.answer()
#query.edit_message_text('20 Green Dragon Leathers + 10 Blazing Bones + 5 Draken Sulphurs')
context.bot.send_message(chat_id=update.effective_chat.id, text="20 Green Dragon Leathers + 10 Blazing Bones + 5 Draken Sulphurs")
def ice(update, context):
#update.message.reply_text('25 Frosty Hearts + 10 Seacrest Hairs + 5 Polar Bear Paws')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Frosty Hearts + 10 Seacrest Hairs + 5 Polar Bear Paws")
def ice_callback(update,context):
query = update.callback_query
query.answer()
#query.edit_message_text('25 Frosty Hearts + 10 Seacrest Hairs + 5 Polar Bear Paws')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Frosty Hearts + 10 Seacrest Hairs + 5 Polar Bear Paws")
def protec_ice(update, context):
#update.message.reply_text('25 Winter Wolf Furs + 15 Thick Furs + 10 Deepling Warts')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Winter Wolf Furs + 15 Thick Furs + 10 Deepling Warts")
def protec_ice_callback(update,context):
query = update.callback_query
query.answer()
#query.edit_message_text('25 Winter Wolf Furs + 15 Thick Furs + 10 Deepling Warts')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Winter Wolf Furs + 15 Thick Furs + 10 Deepling Warts")
def protec_holy(update, context):
#update.message.reply_text('25 Cultish Robes + 25 Cultish Masks + 20 Hellspawn Tails')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Cultish Robes + 25 Cultish Masks + 20 Hellspawn Tails")
def protect_holly_callback(update,context):
query = update.callback_query
query.answer()
#query.edit_message_text('25 Cultish Robes + 25 Cultish Masks + 20 Hellspawn Tails')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Cultish Robes + 25 Cultish Masks + 20 Hellspawn Tails")
def club_skill(update, context):
#update.message.reply_text('20 Cyclops Toe + 15 Ogre Nose Rings + 10 Warmasters Wristguards')
context.bot.send_message(chat_id=update.effective_chat.id, text="20 Cyclops Toe + 15 Ogre Nose Rings + 10 Warmasters Wristguards")
def club_skill_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text('20 Cyclops Toe + 15 Ogre Nose Rings + 10 Warmasters Wristguards')
context.bot.send_message(chat_id=update.effective_chat.id, text="20 Cyclops Toe + 15 Ogre Nose Rings + 10 Warmasters Wristguards")
def axe_skill(update, context):
#update.message.reply_text('20 Orc Tooth + 25 Battle Stones + 20 Moohtant Horns')
context.bot.send_message(chat_id=update.effective_chat.id, text="20 Orc Tooth + 25 Battle Stones + 20 Moohtant Horns")
def axe_skill_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text('20 Orc Tooth + 25 Battle Stones + 20 Moohtant Horns')
context.bot.send_message(chat_id=update.effective_chat.id, text="20 Orc Tooth + 25 Battle Stones + 20 Moohtant Horns")
def sword_skill(update, context):
#update.message.reply_text('25 Lions Mane + 25 Moohtah Shells + 5 War Crystals')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Lions Mane + 25 Moohtah Shells + 5 War Crystals")
def sword_skill_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text('25 Lions Mane + 25 Moohtah Shells + 5 War Crystals')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Lions Mane + 25 Moohtah Shells + 5 War Crystals")
def shield_skill(update, context):
#update.message.reply_text('20 Piece of Scarab Shell + 25 Brimstone Shells + 25 Frazzle Skins')
context.bot.send_message(chat_id=update.effective_chat.id, text="20 Piece of Scarab Shell + 25 Brimstone Shells + 25 Frazzle Skins")
def shield_skill_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text('20 Piece of Scarab Shell + 25 Brimstone Shells + 25 Frazzle Skins')
context.bot.send_message(chat_id=update.effective_chat.id, text="20 Piece of Scarab Shell + 25 Brimstone Shells + 25 Frazzle Skins")
def magic_skill(update, context):
#update.message.reply_text('25 Elvish Talismans + 15 Broken Shamanic Staffs + 15 Strand of Medusa Hair')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Elvish Talismans + 15 Broken Shamanic Staffs + 15 Strand of Medusa Hair")
def magic_skill_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text('25 Elvish Talismans + 15 Broken Shamanic Staffs + 15 Strand of Medusa Hair')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Elvish Talismans + 15 Broken Shamanic Staffs + 15 Strand of Medusa Hair")
def distance_skill(update, context):
#update.message.reply_text('25 Elven Scouting Glasses + 20 Elven Hoofs + 10 Metal Spikes')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Elven Scouting Glasses + 20 Elven Hoofs + 10 Metal Spikes")
def distance_skill_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text('25 Elven Scouting Glasses + 20 Elven Hoofs + 10 Metal Spikes')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 Elven Scouting Glasses + 20 Elven Hoofs + 10 Metal Spikes")
def backpack(update, context):
#update.message.reply_text('20 Fairy Wings + 10 Little Bowl of Myrrhs + 5 Goosebump Leather')
context.bot.send_message(chat_id=update.effective_chat.id, text="20 Fairy Wings + 10 Little Bowl of Myrrhs + 5 Goosebump Leather")
def backpack_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text('20 Fairy Wings + 10 Little Bowl of Myrrhs + 5 Goosebump Leather')
context.bot.send_message(chat_id=update.effective_chat.id, text="20 Fairy Wings + 10 Little Bowl of Myrrhs + 5 Goosebump Leather")
def speed(update, context):
#update.message.reply_text('15 Damselfly Wings + 25 Compasses + 20 Waspoid Wings')
context.bot.send_message(chat_id=update.effective_chat.id, text="15 Damselfly Wings + 25 Compasses + 20 Waspoid Wings")
def speed_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text('15 Damselfly Wings + 25 Compasses + 20 Waspoid Wings')
context.bot.send_message(chat_id=update.effective_chat.id, text="15 Damselfly Wings + 25 Compasses + 20 Waspoid Wings")
def antiparalyze(update, context):
#update.message.reply_text('20 Wereboar Hooves + 15 Crystallized Angers + 5 Quills')
context.bot.send_message(chat_id=update.effective_chat.id, text="20 Wereboar Hooves + 15 Crystallized Angers + 5 Quills")
def antiparalyze_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text('20 Wereboar Hooves + 15 Crystallized Angers + 5 Quills')
context.bot.send_message(chat_id=update.effective_chat.id, text="20 Wereboar Hooves + 15 Crystallized Angers + 5 Quills")
def imbuements(update, context):
query_imb = update.callback_query
query_imb.answer()
boton7 = InlineKeyboardButton(
text='Void',
callback_data='void_callback')
boton8 = InlineKeyboardButton(
text='Life Leech',
callback_data='vampire_callback')
boton9 = InlineKeyboardButton(
text='Critico',
callback_data='strike_callback')
boton10 = InlineKeyboardButton(
text='Proteccion',
callback_data='proteccion_callback')
boton11 = InlineKeyboardButton(
text='Daño',
callback_data='dano_callback')
boton12 = InlineKeyboardButton(
text='Aumento Skills',
callback_data='skill')
boton13 = InlineKeyboardButton(
text='Otros',
callback_data='otros_callback')
query_imb.edit_message_text(
text='Imbuements',
reply_markup=InlineKeyboardMarkup([
[boton7],
[boton8],
[boton9],
[boton10],
[boton11],
[boton12],
[boton13]
])
)
def proteccion_callback (update, context):
query_imb = update.callback_query
query_imb.answer()
boton14 = InlineKeyboardButton(
text='Mort',
callback_data='protec_mort_callback')
boton15 = InlineKeyboardButton(
text='Holy',
callback_data='protec_holy_callback')
boton16 = InlineKeyboardButton(
text='Fire',
callback_data='protec_fire_callback')
boton17 = InlineKeyboardButton(
text='Ice',
callback_data='protec_ice_callback')
boton18 = InlineKeyboardButton(
text='Tera',
callback_data='protec_tera_callback')
boton19 = InlineKeyboardButton(
text='Energy',
callback_data='protec_energy_callback')
query_imb.edit_message_text(
text='Proteccion Elemental',
reply_markup=InlineKeyboardMarkup([
[boton14],
[boton15],
[boton16],
[boton17],
[boton18],
[boton19]
])
)
def dano_callback(update, context):
query_imb = update.callback_query
query_imb.answer()
boton20 = InlineKeyboardButton(
text='Mort',
callback_data='mort_callback')
boton21 = InlineKeyboardButton(
text='Fire',
callback_data='fire_callback')
boton22 = InlineKeyboardButton(
text='Ice',
callback_data='ice_callback')
boton23 = InlineKeyboardButton(
text='Tera',
callback_data='tera_callback')
boton24 = InlineKeyboardButton(
text='Energy',
callback_data='energy_callback')
query_imb.edit_message_text(
text='Daño Elemental',
reply_markup=InlineKeyboardMarkup([
[boton20],
[boton21],
[boton22],
[boton23],
[boton24]
])
)
def skill_callback(update, context):
query_imb = update.callback_query
query_imb.answer()
boton25 = InlineKeyboardButton(
text='Axe',
callback_data='axe_skill_callback')
boton26 = InlineKeyboardButton(
text='Club',
callback_data='club_skill_callback')
boton27 = InlineKeyboardButton(
text='Sword',
callback_data='sword_skill_callback')
boton28 = InlineKeyboardButton(
text='Shielding',
callback_data='shield_skill_callback')
boton29 = InlineKeyboardButton(
text='Magic Level',
callback_data='magic_skill_callback')
boton30 = InlineKeyboardButton(
text='Distance',
callback_data='distance_skill_callback')
query_imb.edit_message_text(
text='Daño Elemental',
reply_markup=InlineKeyboardMarkup([
[boton25],
[boton26],
[boton27],
[boton28],
[boton29],
[boton30]
])
)
def otros_callback(update, context):
query_imb = update.callback_query
query_imb.answer()
boton31 = InlineKeyboardButton(
text='BackPack Capacity',
callback_data='backpack_callback')
boton32 = InlineKeyboardButton(
text='Aumento Speed',
callback_data='speed_callback')
boton33 = InlineKeyboardButton(
text='Antiparalyze',
callback_data='antiparalyze_callback')
query_imb.edit_message_text(
text='Otros Imbuements',
reply_markup=InlineKeyboardMarkup([
[boton31],
[boton32],
[boton33]
])
)
def vampire_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text(text='25 vampire teeth\n15 bloody pincers\n5 piece of death brains')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 vampire teeth\n15 bloody pincers\n5 piece of death brains")
def void_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text(text='25 rope belts\n25 silencer claws\n5 Grimeleech Wings')
context.bot.send_message(chat_id=update.effective_chat.id, text="25 rope belts\n25 silencer claws\n5 Grimeleech Wings")
def strike_callback(update, context):
query = update.callback_query
query.answer()
#query.edit_message_text(text='20 Protective charms\n25 Sabretooth\n5 Vexclaw Talon')
context.bot.send_message(chat_id=update.effective_chat.id, text="20 Protective charms\n25 Sabretooth\n5 Vexclaw Talon")
print('estamos dentro')
updater.dispatcher.add_handler(CommandHandler('guilds', guilds))
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('rashid', rashid),
CallbackQueryHandler(pattern='rashid', callback=rashid_callback)
],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('share', share),
CallbackQueryHandler(pattern='share', callback=share_callback)],
states={
INPUT_TEXT: [MessageHandler(Filters.text, calc_share)]
},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('strike', strike),
CallbackQueryHandler(pattern='strike', callback=strike_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('void', void),
CallbackQueryHandler(pattern='void', callback=void_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('vampire', vampire),
CallbackQueryHandler(pattern='vampire', callback=vampire_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(pattern='imbuements', callback=imbuements)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(pattern='proteccion', callback=proteccion_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(pattern='dano', callback=dano_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(pattern='skill', callback=skill_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CallbackQueryHandler(pattern='otros', callback=otros_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('mort', mort),
CallbackQueryHandler(pattern='mort', callback=mort_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('tera', tera),
CallbackQueryHandler(pattern='tera', callback=tera_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('energy', energy),
CallbackQueryHandler(pattern='energy', callback=energy_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('fire', fire),
CallbackQueryHandler(pattern='fire', callback=fire_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('ice', ice),
CallbackQueryHandler(pattern='ice', callback=ice_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('protec_mort', protec_mort),
CallbackQueryHandler(pattern='protec_mort', callback=protec_mort_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('protec_energy', protec_energy),
CallbackQueryHandler(pattern='protec_energy', callback=protec_energy_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('protec_tera', protec_tera),
CallbackQueryHandler(pattern='protec_tera', callback=protec_tera_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('protec_fire', protec_fire),
CallbackQueryHandler(pattern='protec_fire', callback=protec_fire_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('protec_ice', protec_ice),
CallbackQueryHandler(pattern='protec_ice', callback=protec_ice_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('protec_holy', protec_holy),
CallbackQueryHandler(pattern='protec_holy', callback=protect_holly_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('club_skill', club_skill),
CallbackQueryHandler(pattern='club_skill', callback=club_skill_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('axe_skill', axe_skill),
CallbackQueryHandler(pattern='axe_skill', callback=axe_skill_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('sword_skill', sword_skill),
CallbackQueryHandler(pattern='sword_skill', callback=sword_skill_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('shield_skill', shield_skill),
CallbackQueryHandler(pattern='shield_skill', callback=shield_skill_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('magic_skill', magic_skill),
CallbackQueryHandler(pattern='magic_skill', callback=magic_skill_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('distance_skill', distance_skill),
CallbackQueryHandler(pattern='distance_skill', callback=distance_skill_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('backpack', backpack),
CallbackQueryHandler(pattern='backpack', callback=backpack_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('speed', speed),
CallbackQueryHandler(pattern='speed', callback=speed_callback)],
states={},
fallbacks=[])
)
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('antiparalyze', antiparalyze),
CallbackQueryHandler(pattern='antiparalyze', callback=antiparalyze_callback)],
states={},
fallbacks=[])
)
updater.start_polling()
updater.idle()
```
|
{
"source": "jefri-netizen/belajarpython",
"score": 3
}
|
#### File: belajarpython/kelas_2b/etika.py
```python
import csv
class Hitung:
def Isi(self):
self.f = open('kelas_2b/Etika.csv','r')
self.reader = csv.reader(self.f)
for row in self.reader:
print ('Jumlah dari %s + %s adalah %s' %(row[0], row[1], float(row[0]) + float(row[1])))
self.f.close()
```
#### File: belajarpython/kelas_2c/Diann.py
```python
import csv
class Diann(object):
def Nama(self):
with open('kelas_2c/Dian.csv', 'r') as file:
sic = csv.reader(file, delimiter=',')
for row in sic:
print("Atas Nama",row[0], "Bekerja sebagai", row[1], "dan lulusan tahun", row[2])
```
#### File: belajarpython/kelas_2c/helmiazhar.py
```python
import csv
class helmi:
def stok(self):
tes = open('kelas_2c/stok.csv')
data = csv.reader(tes, delimiter= 'r')
for row in data:
print (row)
```
|
{
"source": "jefrysastre/dsbot",
"score": 2
}
|
#### File: algoritms/pyod/lmdd_algorithm.py
```python
from pyod.models.lmdd import LMDD
class LmddAlgorithm:
algorithm = LMDD
name = "LMDD"
import_code = "from pyod.models.lmdd import LMDD"
def __init__(self):
pass
@classmethod
def get_arg(cls, **args):
return {}
@classmethod
def get_args_code(cls, **args):
return ''
```
#### File: commands/base/command.py
```python
import random
class Command:
def __init__(self, parent, task_manager):
self.parent = parent
self.task_manager = task_manager
self.text = ""
self.responses = ["Empty"]
self.context_set = ""
self.complete = True
def print(self, level):
_level = ""
for _ in range(0,level):
_level += ' '
print("{0}-- {1}: {2}".format(
_level,
self.tag.capitalize(),
self.text
))
if hasattr(self,'children'):
for child in self.children:
child.print(level=level+2)
def run(self, context):
pass
def verify_params(self, context):
return []
def propagate_text(self, text):
if hasattr(self,'children'):
for child in self.children:
child.propagate_text(text)
def generate_code(self, code_generator, context):
if hasattr(self, 'children'):
for child in self.children:
child.generate_code(code_generator, context)
def report(self, message):
self.parent.report(message)
```
#### File: commands/base/greetings.py
```python
import random
from .command import Command
class GreetingsCommand(Command):
tag = "greeting"
patterns = ["Oi", "bom dia", "olá", "tudo bem?", "como vai?", "boa tarde", "boa noite"]
def __init__(self, parent, task_manager):
super(GreetingsCommand, self).__init__(parent, task_manager)
self.user_config_tag = "speaking"
self.responses = ["Olá!", "Bom te ver de novo!", "Opa, como eu posso ajudar?"]
def forward(self, text, context):
user_expertise = context['user_config']['commands']
user_expertise[self.user_config_tag] += .01
# normalize other commands user preferences
for key, value in user_expertise.items():
user_expertise[key] = value / 1.01
self.complete = True
return self.parent.backward(self.tag, random.choice(self.responses), context)
```
#### File: dsbot/corpus/bag_words.py
```python
import pickle
import nltk
import re
import numpy as np
from string import punctuation
# English
from nltk.stem.lancaster import LancasterStemmer
from nltk.stem.rslp import RSLPStemmer
class BagOfWordsCorpus:
def __init__(self, save_path, commands, verbose, force_training=False):
self.verbose = verbose
self.save_path = save_path
# English
# self.stemmer = LancasterStemmer()
# Portuguese
self.stemmer = RSLPStemmer()
self.stopwords = set(nltk.corpus.stopwords.words('portuguese') + list(punctuation))
self.commands = commands
if force_training:
self.load_corpus()
else:
try:
with open(save_path, "rb") as f:
self.words, self.labels, self.training, self.output = pickle.load(f)
except:
self.load_corpus()
def load_corpus(self):
words = []
labels = []
docs_x = []
docs_y = []
# for intent in data["intents"]:
for key, command in self.commands.items():
for pattern in command.patterns:
wrds = nltk.word_tokenize(pattern)
wrds = [word for word in wrds if word not in self.stopwords]
wrds = [self.stemmer.stem(w.lower()) for w in wrds]
words.extend(wrds)
docs_x.append(wrds)
docs_y.append(command.tag)
if command.tag not in labels:
labels.append(command.tag)
words = sorted(list(set(words)))
labels = sorted(labels)
training = []
output = []
out_empty = [0 for _ in range(len(labels))]
for x, wrds in enumerate(docs_x):
bag = []
for w in words:
if w in wrds:
bag.append(1)
else:
bag.append(0)
output_row = out_empty[:]
output_row[labels.index(docs_y[x])] = 1
training.append(bag)
output.append(output_row)
training = np.array(training)
output = np.array(output)
self.words = words
self.labels = labels
self.training = training
self.output = output
with open("data/data.pickle", "wb") as f:
pickle.dump((words, labels, training, output), f)
def encode(self, sentence):
bag = [0 for _ in range(len(self.words))]
wrds = nltk.word_tokenize(sentence)
wrds = [word for word in wrds if word not in self.stopwords]
wrds = [self.stemmer.stem(w.lower()) for w in wrds]
corrected_input = wrds
# corrent user input spelling caso seja entrada digitada
# corrected_input = []
# for userinput_word in s_words:
# # spell checking
# # userinput_word = reduce_lengthening(userinput_word)
# correct_word = spelling.correction(userinput_word)
# corrected_input.append(correct_word)
if self.verbose:
print("Mensagem do usuario corregida para: {0}".format(corrected_input))
for se in wrds:
for i, w in enumerate(self.words):
if w == se:
bag[i] = 1
return np.array(bag)
def reduce_lengthening(self, word):
pattern = re.compile(r"(.)\1{2,}")
return pattern.sub(r"\1\1", word)
def add(self, sentence, tag):
try:
# read the dataset
with open(self.save_path, "rb") as f:
self.labels, self.training, self.output = pickle.load(f)
x = self.encode([sentence])
# find the phrase in the dataset
if x in self.training:
return
y = [0 for _ in range(len(self.labels))]
y[self.labels.index(tag)] = 1
self.training.append(x)
self.output.append(y)
# add the current phrase to the dataset
with open(self.save_path, "wb") as f:
pickle.dump((self.labels, self.training, self.output), f)
except Exception as e:
print(e)
```
#### File: dsbot/inputs/dialog.py
```python
class Dialog:
def __init__(self, dialog = [], verbose=True):
self.dialog = dialog
self.index = -1
self.verbose = verbose
def input(self):
self.index += 1
try:
return self.dialog[self.index]
except Exception as e:
return input()
def __call__(self, *args, **kwargs):
return self.input()
```
#### File: dsbot/utils/SpellingCheck.py
```python
import re
from collections import Counter
class SpellingCheck():
def __init__(self, language='english'):
if language == 'english':
from .files.SpellingDataEN import data
_text = data
if language == 'portuguese':
from .files.SpellingDataPT import data
_text = data
self.dictionary = Counter(re.findall(r'\w+', _text.lower()))
def probability(self, word, n=-1):
if n == -1:
n = sum(self.dictionary.values())
return self.dictionary[word] / n
def correction(self, word):
"Most probable spelling correction for word."
return max(self.candidates(word), key=self.probability)
def candidates(self, word):
"Generate possible spelling corrections for word."
return (
self.known([word]) or
self.known(self.edits1(word)) or
self.known(self.edits2(word)) or
[word]
)
def known(self, words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in self.dictionary)
def edits1(self, word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz,.?!:-1234567890'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(self, word):
"All edits that are two edits away from `word`."
return (e2 for e1 in self.edits1(word) for e2 in self.edits1(e1))
```
#### File: jefrysastre/dsbot/setup.py
```python
from setuptools import setup, find_packages
# from setuptools.command.install import install as _install
# class Install(_install):
# def run(self):
# _install.do_egg_install(self)
# import nltk
# import ssl
#
# try:
# _create_unverified_https_context = ssl._create_unverified_context
# except AttributeError:
# pass
# else:
# ssl._create_default_https_context = _create_unverified_https_context
#
# nltk.download("stopwords")
setup(
name='dsbot',
version='0.0.9',
author='Jefry',
author_email='<EMAIL>',
packages=find_packages(),
url='http://pypi.python.org/pypi/dsbot/',
license='LICENSE.txt',
description='Chatbot framework ...',
# cmdclass={'install': Install},
install_requires=[
'matplotlib==3.3.1',
'nltk==3.5',
'numpy==1.18.5',
'TPOT==0.11.5',
'pyod==0.8.2',
'tqdm==4.48.2',
'h5py==2.10.0',
'tensorflow==2.3.0',
'scipy==1.4.1',
'scikit-learn==0.23.2',
'requests==2.24.0',
'regex==2020.7.14',
'pandas==1.1.1',
'Keras==2.4.3',
'sentence-transformers==0.3.8'
],
# setup_requires=['nltk']
)
```
|
{
"source": "JefterV/Cursoemvideo.py",
"score": 3
}
|
#### File: Cursoemvideo.py/Exercícios/ex100.py
```python
from random import randint
numeros = []
def sortear(self):
cont = 0
while cont <= 4:
numeros.append(randint(1, 100))
cont += 1
print(f'Os numeros sorteados são: {numeros}')
def pares(lista):
conta = 0
for valor in lista:
if valor % 2 == 0:
conta +=valor
print(f'A soma dos numeros pares é = {conta}')
numeros = []
sortear(numeros)
pares(numeros)
```
#### File: Cursoemvideo.py/Exercícios/ex101.py
```python
from datetime import datetime
def voto(ano):
atual = datetime.now().year
idade = atual - ano
print(f'Com {idade} anos. ',end='')
if idade <= 15:
print('NÃO VOTA')
elif 16 <= idade <= 17:
print('VOTO OPCIONAL')
elif idade >= 18 and idade <=64:
print('VOTO OBRIGATORIO')
elif idade >= 65:
print('VOTO OPCIONAL')
voto(int(input('Você nasceu em que ano? ')))
```
#### File: Cursoemvideo.py/Exercícios/ex106.py
```python
import BBC
def cabeçalho():
print(BBC.fazul,'-'*40)
print(f'{"SISTEMA DE AJUDA": ^40}')
print('-'*40)
print(BBC.limpa, end='')
def acesso(msg):
print(BBC.fmagenta,'#' * 40)
print(f' ACESSANDO O MANUAL DE {msg}')
print('#'*40)
print(BBC.limpa, end='')
def ajuda(msg):
print(BBC.fazulc)
help(msg)
print(BBC.limpa, end='')
while True:
cabeçalho()
sobre = str(input('Digite a biblioteca que deseja conhecer: '))
acesso(sobre)
ajuda(sobre)
```
|
{
"source": "JefterV/True-Comercializadora",
"score": 3
}
|
#### File: JefterV/True-Comercializadora/main.py
```python
import requests
from zipfile import ZipFile
import csv
import datetime
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from time import sleep
class main():
def __init__(self) -> None:
url = "https://datawarehouse-true.s3-sa-east-1.amazonaws.com/teste-true/teste_true_term.zip"
dirName = "./Dados/downloads/teste_true_term.zip"
dirArqs = "./Dados/arquivos"
log = "./Dados/logs/log.log "
dataHora = datetime.datetime.now()
log = open(log, "a+")
downloadArq = self.downloadZip(url, dirName)
if not downloadArq or type(downloadArq) != bool:
print("Error: Requisição falhou.")
log.writelines(f"\n[{dataHora}]: -> Requição falhou:{downloadArq}")
log.close()
return
extraindoZip = self.extrairZip(dirName, None)
if not isinstance(extraindoZip, list):
print("Error: Falha na extração dos arquivos.")
log.writelines(f"\n[{dataHora}]: -> Falha na extração dos arquivos:{extraindoZip}")
log.close()
return
csvArq = lerArquivo = self.lerArquivos("encad-termicas.csv")
if not csvArq or type(csvArq) != dict:
print("Error: Falha na leitura do arquivo excel")
log.writelines(f"\n[{dataHora}]: -> Falha na leitura do arquivo excel:{csvArq}")
log.close()
return
alterarDadosvar = self.alterarDados("TERM.DAT", "TERM_TRUE.DAT", csvArq)
if not alterarDadosvar or type(alterarDadosvar) != bool:
print("Error: Falha na alteração de dados")
log.writelines(f"\n[{dataHora}]: -> Falha na alteração de dados:{alterarDadosvar}")
log.close()
return
self.compactarArquivos(dirArqs)
# Baixar arquivo
def downloadZip(self, url, save_path) -> bool:
"""
Essa função recebe:
* url = str (URL DA REQUISIÇÃO)
* save_path = str (DIRETORIO ONDE IRÁ SALVAR O ARQUIVO)
RETURN True or False
True = Requisição bem sucedida
False = Requição falhou
"""
chunk_size = 128
respostaStatus = 403
try:
resposta = requests.get(url, stream=True)
with open(save_path, 'wb') as fd:
for chunk in resposta.iter_content(chunk_size=chunk_size):
fd.write(chunk)
except Exception as e:
return e
else:
respostaStatus = resposta.status_code
if respostaStatus == 200:
return True
else:
return resposta
# Extrair dados do ZIP
def extrairZip(self, dir, dirDestino) -> bool or list:
try:
extrairZip = ZipFile(dir, 'r')
extrairZip.extractall(path=dirDestino)
extrairZip.close()
except Exception as e:
return e
else:
return extrairZip.namelist()
# Ler Excel
def lerArquivos(self, dir) -> bool or dict:
try:
lendoArq = csv.reader(open(dir), delimiter = ',')
dados = {}
index = 0
for linha in lendoArq:
if index == 0:
index += 1
continue
dados[linha[0]] = linha[2:]
index += 1
except Exception as e:
return e
else:
return dados
# Alterar dados do arquivo TERM.DAT
def alterarDados(self,dirDat, dirNew, dictCSV: dict) -> bool:
dados = {}
dadosLinha = []
lenColuna = []
try:
lendoArquivoDat = open(dirDat, 'r')
newArq = open(dirNew, 'w+')
except Exception as e:
return e
else:
index = 0
# Lendo arquivo e alterando valores
for linha in lendoArquivoDat:
dadosLinha.clear()
tratandoLinha = linha.split(" ")
# Primeira linha
if index == 0:
index += 1
newArq.writelines(linha)
continue
# Segunda linha linha
elif index == 1:
newArq.writelines(linha)
for valor in tratandoLinha:
valor = valor.strip()
if len(valor) > 0:
lenColuna.append(len(valor))
index += 1
continue
ID = linha[1:4]
linha = linha.replace("\n", "")
indexIDName = lenColuna[0]+lenColuna[1]+4
tratandoLinha = linha[indexIDName:].split(" ")
for valor in tratandoLinha:
if len(valor) > 0:
dadosLinha.append(valor.strip())
dados[ID] = dadosLinha
# ESCREVENDO NO NOVO ARQUIVO
if ID in dictCSV.keys():
count = 0
for valor in dictCSV[ID]:
if dados[ID][count] != valor:
dados[ID][count] = valor.strip()
while (True):
if len(dados[ID][count]) < lenColuna[count+2]:
dados[ID][count] = " "+str(dados[ID][count])
else: break
count += 1
linha2 = f"{linha[:indexIDName]}{dados[ID][0]} {dados[ID][1]} {dados[ID][2]} {dados[ID][3]} {dados[ID][4]} {dados[ID][5]} {dados[ID][6]} {dados[ID][7]} {dados[ID][7]} {dados[ID][9]} {dados[ID][10]} {dados[ID][11]} {dados[ID][12]} {dados[ID][13]} {dados[ID][14]} {dados[ID][15]} {dados[ID][16]}\n"
newArq.writelines(linha2)
else:
newArq.writelines(linha+"\n")
lendoArquivoDat.close()
newArq.close()
return True
# Compactando arquivos
def compactarArquivos(self, dirName):
import zipfile
sleep(2)
zipArqs = ZipFile(dirName+'/teste_true_term.zip', 'w', zipfile.ZIP_DEFLATED)
zipArqs.write("TERM.DAT")
zipArqs.write("TERM_TRUE.DAT")
zipArqs.write("encad-termicas.csv")
zipArqs.close()
main()
```
|
{
"source": "JEFuller/dataclasses-configobj",
"score": 3
}
|
#### File: dataclasses-configobj/tests/test_core.py
```python
import unittest
from dataclasses import dataclass
from typing import List, Optional, Type, TypeVar
import configobj
import validate
from dataclasses_configobj import core
class CoreTestCase(unittest.TestCase):
def test_config(self):
spec = list(map(str.strip, """\
[foo]
bar = string
pip = integer\
""".split('\n')))
infile = list(map(str.strip, """\
[foo]
bar = one
pip = 1\
""".split('\n')))
root = configobj.ConfigObj(infile=infile, configspec=spec)
vtor = validate.Validator()
res = root.validate(vtor, preserve_errors=True)
self.assertEqual(res, True)
foo = root['foo']
self.assertIsNotNone(foo)
self.assertEqual(foo['bar'], 'one')
self.assertEqual(foo['pip'], 1)
def test_to_spec_1(self):
@dataclass
class Foo:
bar: str
pip: int
@dataclass
class Config:
foo: Foo
expectedSpec = list(map(str.strip, """\
[foo]
bar = string
pip = integer\
""".split('\n')))
root = configobj.ConfigObj()
foo = configobj.Section(root, 1, root)
root['foo'] = foo
foo.__setitem__('bar', 'string')
foo.__setitem__('pip', 'integer')
self.assertEqual(expectedSpec, root.write())
spec = core.to_spec(Config)
self.assertEqual(expectedSpec, spec.write())
def test_to_spec_2(self):
@dataclass
class Foo:
a: str
@dataclass
class Bar:
b: int
@dataclass
class Config:
pip: str
foo: Foo
bar: Bar
baz: str
expectedSpec = list(map(str.strip, """\
pip = string
baz = string
[foo]
a = string
[bar]
b = integer\
""".split('\n')))
root = configobj.ConfigObj()
root['pip'] = 'string'
root['baz'] = 'string'
foo = configobj.Section(root, 1, root)
root['foo'] = foo
foo.__setitem__('a', 'string')
bar = configobj.Section(root, 1, root)
root['bar'] = bar
bar.__setitem__('b', 'integer')
self.assertEqual(expectedSpec, root.write())
spec = core.to_spec(Config)
self.assertEqual(expectedSpec, spec.write())
def test_to_spec_3(self):
@dataclass
class Single:
other: str
@dataclass
class OneOfMany:
_name: str
val: str
@dataclass
class Config:
single: Single
_many: List[OneOfMany]
expectedSpec = list(map(str.strip, """\
[single]
other = string
[__many__]
val = string\
""".split('\n')))
spec = core.to_spec(Config)
self.assertEqual(expectedSpec, spec.write())
def test_to_spec_4(self):
@dataclass
class OneOfMany:
_name: str
val: str
@dataclass
class Wrapper:
_many: List[OneOfMany]
@dataclass
class Config:
wrapper: Wrapper
expectedSpec = list(map(str.strip, """\
[wrapper]
[[__many__]]
val = string\
""".split('\n')))
spec = core.to_spec(Config)
self.assertEqual(expectedSpec, spec.write())
def test_type(self):
T = TypeVar('T')
def doit(klass: Type[T]) -> T:
vars = {'other': 'test'}
return klass(**vars)
@dataclass
class Parent:
other: str
self.assertEqual(doit(Parent).other, 'test')
def test_lift_1(self):
@dataclass
class Single:
other: str
@dataclass
class OneOfMany:
_name: str
val: str
@dataclass
class Config:
single: Single
_many: List[OneOfMany]
infile = list(map(str.strip, """\
[single]
other = hello
[one]
val = apple
[two]
val = banana\
""".split('\n')))
expectedConfig = Config(
single=Single(other = 'hello'),
_many=[
OneOfMany(_name = 'one', val = 'apple'),
OneOfMany(_name = 'two', val = 'banana')
]
)
spec = core.to_spec(Config)
root = configobj.ConfigObj(infile=infile, configspec=spec)
config = core.lift(Config, root)
self.assertEqual(expectedConfig, config)
def test_lift_2(self):
@dataclass
class OneOfMany:
_name: str
val: str
@dataclass
class Wrapper:
_many: List[OneOfMany]
@dataclass
class Config:
wrapper: Wrapper
infile = list(map(str.strip, """\
[wrapper]
[[one]]
val = apple
[[two]]
val = banana\
""".split('\n')))
expectedConfig = Config(
wrapper=Wrapper(
_many=[
OneOfMany(_name = 'one', val = 'apple'),
OneOfMany(_name = 'two', val = 'banana')
]
)
)
spec = core.to_spec(Config)
root = configobj.ConfigObj(infile=infile, configspec=spec)
config = core.lift(Config, root)
self.assertEqual(expectedConfig, config)
def test_lift_3(self):
@dataclass
class Foo:
bar: str
pip: int
@dataclass
class OneOfMany:
_name: str
val: str
@dataclass
class Wrapper:
test: str
foo: Foo
_many: List[OneOfMany]
@dataclass
class Config:
wrapper: Wrapper
infile = list(map(str.strip, """\
[wrapper]
test = yes
[[foo]]
bar = testing
pip = 123
[[one]]
val = apple
[[two]]
val = banana\
""".split('\n')))
expectedConfig = Config(
wrapper=Wrapper(
test='yes',
foo=Foo('testing', 123),
_many=[
OneOfMany(_name = 'one', val = 'apple'),
OneOfMany(_name = 'two', val = 'banana')
]
)
)
spec = core.to_spec(Config)
root = configobj.ConfigObj(infile=infile, configspec=spec)
vtor = validate.Validator()
root.validate(vtor)
config = core.lift(Config, root)
self.assertEqual(expectedConfig, config)
def test_optional_root(self):
@dataclass
class Config:
required: str
optional: Optional[str] = None
expectedSpec = list(map(str.strip, """\
required = string
optional = string(default=None)\
""".split('\n')))
spec = core.to_spec(Config)
self.assertEqual(expectedSpec, spec.write())
here = configobj.ConfigObj(infile= ["required = yes", "optional = here"], configspec=spec)
vtor = validate.Validator()
here.validate(vtor)
self.assertEqual(Config('yes', 'here'), core.lift(Config, here))
empty = configobj.ConfigObj(infile= ["required = yes"], configspec=spec)
vtor = validate.Validator()
empty.validate(vtor)
self.assertEqual(Config('yes', None), core.lift(Config, empty))
def test_default_root(self):
@dataclass
class Config:
required: str
optional: str = 'defaultvalue'
expectedSpec = list(map(str.strip, """\
required = string
optional = string(default='defaultvalue')\
""".split('\n')))
spec = core.to_spec(Config)
self.assertEqual(expectedSpec, spec.write())
here = configobj.ConfigObj(infile= ["required = yes", "optional = here"], configspec=spec)
vtor = validate.Validator()
here.validate(vtor)
self.assertEqual(Config('yes', 'here'), core.lift(Config, here))
empty = configobj.ConfigObj(infile= ["required = yes"], configspec=spec)
vtor = validate.Validator()
empty.validate(vtor)
self.assertEqual(Config('yes', 'defaultvalue'), core.lift(Config, empty))
def test_readme_example(self):
@dataclass
class Single:
other: str
@dataclass
class OneOfMany:
_name: str
val: str
@dataclass
class Config:
single: Single
_many: List[OneOfMany]
optional: Optional[str] = None
withdefault: str = 'test123'
infile = list(map(str.strip, """\
[single]
other = hello
[one]
val = apple
[two]
val = banana\
""".split('\n')))
spec = core.to_spec(Config)
root = configobj.ConfigObj(infile=infile, configspec=spec)
validator = validate.Validator()
root.validate(validator)
expectedConfig = Config(
single=Single(other='hello'),
optional=None,
withdefault='test123',
_many=[
OneOfMany(_name='one', val='apple'),
OneOfMany(_name='two', val='banana')
]
)
config: Config = core.lift(Config, root)
self.assertEqual(expectedConfig, config)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.