Spaces:
Sleeping
Sleeping
File size: 529 Bytes
9fe7c42 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 |
import torch
import torch.nn as nn
from .multi_head_attention import MultiHeadAttention
from .feed_forward import FeedForward
class TransformerBlock(nn.Module):
def __init__(self, Config):
super().__init__()
self.attn = MultiHeadAttention(Config)
self.ff = FeedForward(Config)
self.ln1 = nn.LayerNorm(Config.n_embed)
self.ln2 = nn.LayerNorm(Config.n_embed)
def forward(self,x):
x = x + self.attn(self.ln1(x))
x = x + self.ff(self.ln2(x))
return x |