harmdevries's picture
Update app.py
86cd028
raw
history blame
3.47 kB
import streamlit as st
def number_field(label, **kwargs):
c1, c2 = st.columns([2, 4])
c1.write(label)
return c2.number_input('', **kwargs)
def calc_exec_time(comp_flop, mem_bytes):
return (comp_flop/TFLOPS + mem_bytes/GB_S)*1000
def print_kernel_execution(c1, c2, comp_flop, mem_bytes):
arith_int = comp_flop/mem_bytes
exec_time = calc_exec_time(comp_flop, mem_bytes)
comp_flop = round(comp_flop/1e9, 2)
mem_bytes = round(mem_bytes/1e6, 2)
c1.write("GFLOP:")
c2.write(str(comp_flop))
c1.write("MB: ")
c2.write(str(mem_bytes))
c1.write("Arithm. intensity:")
c2.write(str(arith_int))
c1.write("Time (ms):")
c2.write(str(exec_time))
return exec_time
TFLOPS = 312e12
GB_S = 1935e9
st.header("Transformer parameters")
col1, col2 = st.columns([2, 4])
bs = number_field('Batch size', value=10)
h = number_field('Num heads', value=16)
d = number_field('Dimension', value=768)
n_start = number_field('Start seq', value=1)
n = number_field('End seq', value=1024)
l = number_field('Num layers', value=24)
st.header('Attention layer')
st.subheader('QKV projection')
st.caption("Multi-Head Attention")
mha_flop = 2*bs*1*d*3*d
mha_bytes = 2*bs*1*d + 2*3*d*d + 2*bs*1*3*d
c1, c2 = st.columns([2, 3])
qkv_mha_time = print_kernel_execution(c1, c2, mha_flop, mha_bytes)
st.caption("Multi-Query Attention")
mqa_flop = 2*bs*1*d*(1+2/h)*d
mqa_bytes = 2*bs*1*d + 2*(2/h)*d*d + 2*bs*1*(2/h)*d
c1, c2 = st.columns([2, 3])
qkv_mha_time = print_kernel_execution(c1, c2, mqa_flop, mqa_bytes)
st.subheader('QK gemm')
st.write("Note that calculation depends on sequence length (n)")
st.caption("Multi-Head Attention")
mha_flop = 2*bs*h*(d/h)*n
mha_bytes = 2*bs*h*(d/h) + 2*bs*h*n*(d/h) + 2*bs*h*n
c1, c2 = st.columns([2, 3])
att1_mha_time = print_kernel_execution(c1, c2, mha_flop, mha_bytes)
st.caption("Multi-Query Attention")
mqa_flop = 2*bs*h*(d/h)*n
mqa_bytes = 2*bs*h*(d/h) + 2*bs*n*(d/h) + 2*bs*h*n
c1, c2 = st.columns([2, 3])
att1_mqa_time = print_kernel_execution(c1, c2, mqa_flop, mqa_bytes)
st.subheader('Attention-value gemm')
st.write("Calculation depends on sequence length. We show numbers for maximum sequence length n.")
st.caption("Multi-Head Attention")
mha_flop = 2*bs*h*n*(d/h)
mha_bytes = 2*bs*h*n + 2*bs*h*n*(d/h) + 2*bs*h*(d/h)
c1, c2 = st.columns([2, 3])
att_mha_time = print_kernel_execution(c1, c2, mha_flop, mha_bytes)
st.caption("Multi-Query Attention")
mqa_flop = 2*bs*h*n*(d/h)
mqa_bytes = 2*bs*n*(d/h) + 2*bs*n*(d/h) + 2*bs*h*(d/h)
c1, c2 = st.columns([2, 3])
att_mqa_time = print_kernel_execution(c1, c2, mqa_flop, mqa_bytes)
st.subheader('Output projection')
out_flop = 2*bs*1*d*d
out_bytes = 2*bs*1*d + 2*d*d + 2*bs*1*d
c1, c2 = st.columns([2, 3])
out_time = print_kernel_execution(c1, c2, out_flop, out_bytes)
st.subheader('Element-wise ops')
st.write("We also need to take into the softmax layer and layer norm")
st.caption("Softmax")
softmax_bytes = 2*bs*h*n + 2*bs*h*n
c1, c2 = st.columns([2, 3])
softmax_time = print_kernel_execution(c1, c2, 0, softmax_bytes))
st.caption("Layer norm")
st.header('MLP')
st.subheader('First Linear')
mlp1_flop = 2*bs*1*d*4*d
mlp1_bytes = 2*bs*1*d + 2*d*4*d + 2*bs*1*4*d
c1, c2 = st.columns([2, 3])
mlp1_time = print_kernel_execution(c1, c2, mlp1_flop, mlp1_bytes)
st.subheader('Second Linear')
mlp2_flop = 2*bs*1*d*4*d
mlp2_bytes = 2*bs*1*d + 2*d*4*d + 2*bs*1*4*d
c1, c2 = st.columns([2, 3])
mlp2_time = print_kernel_execution(c1, c2, mlp2_flop, mlp2_bytes)