gasmichel commited on
Commit
5d2c1d9
·
verified ·
1 Parent(s): df8246f

Upload folder using huggingface_hub

Browse files
.ipynb_checkpoints/config-checkpoint.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "UARPlay"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "config.LUARConfig",
7
+ "AutoModel": "model.UARPlay"
8
+ },
9
+ "embedding_size": 512,
10
+ "model_type": "LUAR",
11
+ "torch_dtype": "float32",
12
+ "transformers_version": "4.45.2",
13
+ "use_memory_efficient_attention": false
14
+ }
.ipynb_checkpoints/model-checkpoint.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from functools import partial
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from einops import rearrange, reduce, repeat
7
+ from transformers import AutoModel
8
+ import torch.nn.functional as F
9
+
10
+ # from models.layers import MemoryEfficientAttention, SelfAttention
11
+ from huggingface_hub import PyTorchModelHubMixin
12
+
13
+ from transformers import AutoModel, PreTrainedModel
14
+ from .config import LUARConfig
15
+ from huggingface_hub import PyTorchModelHubMixin
16
+ import math
17
+
18
+
19
+ class UARPlay(PreTrainedModel):
20
+ """Defines the SBERT model.
21
+ """
22
+ config_class = LUARConfig
23
+
24
+ def __init__(self, config):
25
+
26
+ super().__init__(config)
27
+ self.create_transformer()
28
+ self.linear = nn.Linear(self.hidden_size, config.embedding_size)
29
+
30
+ def attn_fn(self, k, q ,v) :
31
+ d_k = q.size(-1)
32
+ scores = torch.matmul(k, q.transpose(-2, -1)) / math.sqrt(d_k)
33
+ p_attn = F.softmax(scores, dim=-1)
34
+
35
+ return torch.matmul(p_attn, v)
36
+
37
+
38
+ def create_transformer(self):
39
+ """Creates the Transformer model.
40
+ """
41
+
42
+ self.transformer = AutoModel.from_pretrained("sentence-transformers/all-distilroberta-v1")
43
+ self.hidden_size = self.transformer.config.hidden_size
44
+ self.num_attention_heads = self.transformer.config.num_attention_heads
45
+ self.dim_head = self.hidden_size // self.num_attention_heads
46
+
47
+ def mean_pooling(self, token_embeddings, attention_mask):
48
+ """Mean Pooling as described in the SBERT paper.
49
+ """
50
+ input_mask_expanded = repeat(attention_mask, 'b l -> b l d', d=self.hidden_size).float()
51
+ sum_embeddings = reduce(token_embeddings * input_mask_expanded, 'b l d -> b d', 'sum')
52
+ sum_mask = torch.clamp(reduce(input_mask_expanded, 'b l d -> b d', 'sum'), min=1e-9)
53
+ return sum_embeddings / sum_mask
54
+
55
+ def get_episode_embeddings(self, data):
56
+ """Computes the Author Embedding.
57
+ """
58
+ # batch_size, num_sample_per_author, episode_length
59
+ input_ids, attention_mask = data[0].unsqueeze(1), data[1].unsqueeze(1)
60
+ B, N, E, _ = input_ids.shape
61
+
62
+ input_ids = rearrange(input_ids, 'b n e l -> (b n e) l')
63
+ attention_mask = rearrange(attention_mask, 'b n e l -> (b n e) l')
64
+
65
+ outputs = self.transformer(
66
+ input_ids=input_ids,
67
+ attention_mask=attention_mask,
68
+ return_dict=True,
69
+ output_hidden_states=True
70
+ )
71
+
72
+ # at this point, we're embedding individual "comments"
73
+ comment_embeddings = self.mean_pooling(outputs['last_hidden_state'], attention_mask)
74
+ comment_embeddings = rearrange(comment_embeddings, '(b n e) l -> (b n) e l', b=B, n=N, e=E)
75
+
76
+ # aggregate individual comments embeddings into episode embeddings
77
+ episode_embeddings = self.attn_fn(comment_embeddings, comment_embeddings, comment_embeddings)
78
+ episode_embeddings = reduce(episode_embeddings, 'b e l -> b l', 'max')
79
+
80
+ episode_embeddings = self.linear(episode_embeddings)
81
+ return episode_embeddings, comment_embeddings
82
+
83
+ def forward(self, input_ids, attention_mask):
84
+ """Calculates a fixed-length feature vector for a batch of episode samples.
85
+ """
86
+ data = [input_ids, attention_mask]
87
+ episode_embeddings,_ = self.get_episode_embeddings(data)
88
+
89
+ return episode_embeddings
90
+
91
+ def _model_forward(self, batch):
92
+ """Passes a batch of data through the model.
93
+ This is used in the lightning_trainer.py file.
94
+ """
95
+ data, _, _ = batch
96
+ episode_embeddings, comment_embeddings = self.forward(data)
97
+ # labels = torch.flatten(labels)
98
+
99
+ return episode_embeddings, comment_embeddings
__pycache__/config.cpython-39.pyc ADDED
Binary file (622 Bytes). View file
 
__pycache__/model.cpython-39.pyc ADDED
Binary file (3.33 kB). View file
 
config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "UARPlay"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "config.LUARConfig",
7
+ "AutoModel": "model.UARPlay"
8
+ },
9
+ "embedding_size": 512,
10
+ "model_type": "LUAR",
11
+ "torch_dtype": "float32",
12
+ "transformers_version": "4.45.2",
13
+ "use_memory_efficient_attention": false
14
+ }
config.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+ class LUARConfig(PretrainedConfig):
4
+ model_type = "LUAR"
5
+
6
+ def __init__(self,
7
+ embedding_size: int = 512,
8
+ **kwargs,
9
+ ):
10
+ self.embedding_size = embedding_size
11
+ super().__init__(**kwargs)
model.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from functools import partial
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from einops import rearrange, reduce, repeat
7
+ from transformers import AutoModel
8
+ import torch.nn.functional as F
9
+
10
+ # from models.layers import MemoryEfficientAttention, SelfAttention
11
+ from huggingface_hub import PyTorchModelHubMixin
12
+
13
+ from transformers import AutoModel, PreTrainedModel
14
+ from .config import LUARConfig
15
+ from huggingface_hub import PyTorchModelHubMixin
16
+ import math
17
+
18
+
19
+ class UARPlay(PreTrainedModel):
20
+ """Defines the SBERT model.
21
+ """
22
+ config_class = LUARConfig
23
+
24
+ def __init__(self, config):
25
+
26
+ super().__init__(config)
27
+ self.create_transformer()
28
+ self.linear = nn.Linear(self.hidden_size, config.embedding_size)
29
+
30
+ def attn_fn(self, k, q ,v) :
31
+ d_k = q.size(-1)
32
+ scores = torch.matmul(k, q.transpose(-2, -1)) / math.sqrt(d_k)
33
+ p_attn = F.softmax(scores, dim=-1)
34
+
35
+ return torch.matmul(p_attn, v)
36
+
37
+
38
+ def create_transformer(self):
39
+ """Creates the Transformer model.
40
+ """
41
+
42
+ self.transformer = AutoModel.from_pretrained("sentence-transformers/all-distilroberta-v1")
43
+ self.hidden_size = self.transformer.config.hidden_size
44
+ self.num_attention_heads = self.transformer.config.num_attention_heads
45
+ self.dim_head = self.hidden_size // self.num_attention_heads
46
+
47
+ def mean_pooling(self, token_embeddings, attention_mask):
48
+ """Mean Pooling as described in the SBERT paper.
49
+ """
50
+ input_mask_expanded = repeat(attention_mask, 'b l -> b l d', d=self.hidden_size).float()
51
+ sum_embeddings = reduce(token_embeddings * input_mask_expanded, 'b l d -> b d', 'sum')
52
+ sum_mask = torch.clamp(reduce(input_mask_expanded, 'b l d -> b d', 'sum'), min=1e-9)
53
+ return sum_embeddings / sum_mask
54
+
55
+ def get_episode_embeddings(self, data):
56
+ """Computes the Author Embedding.
57
+ """
58
+ # batch_size, num_sample_per_author, episode_length
59
+ input_ids, attention_mask = data[0].unsqueeze(1), data[1].unsqueeze(1)
60
+ B, N, E, _ = input_ids.shape
61
+
62
+ input_ids = rearrange(input_ids, 'b n e l -> (b n e) l')
63
+ attention_mask = rearrange(attention_mask, 'b n e l -> (b n e) l')
64
+
65
+ outputs = self.transformer(
66
+ input_ids=input_ids,
67
+ attention_mask=attention_mask,
68
+ return_dict=True,
69
+ output_hidden_states=True
70
+ )
71
+
72
+ # at this point, we're embedding individual "comments"
73
+ comment_embeddings = self.mean_pooling(outputs['last_hidden_state'], attention_mask)
74
+ comment_embeddings = rearrange(comment_embeddings, '(b n e) l -> (b n) e l', b=B, n=N, e=E)
75
+
76
+ # aggregate individual comments embeddings into episode embeddings
77
+ episode_embeddings = self.attn_fn(comment_embeddings, comment_embeddings, comment_embeddings)
78
+ episode_embeddings = reduce(episode_embeddings, 'b e l -> b l', 'max')
79
+
80
+ episode_embeddings = self.linear(episode_embeddings)
81
+ return episode_embeddings, comment_embeddings
82
+
83
+ def forward(self, input_ids, attention_mask):
84
+ """Calculates a fixed-length feature vector for a batch of episode samples.
85
+ """
86
+ data = [input_ids, attention_mask]
87
+ episode_embeddings,_ = self.get_episode_embeddings(data)
88
+
89
+ return episode_embeddings
90
+
91
+ def _model_forward(self, batch):
92
+ """Passes a batch of data through the model.
93
+ This is used in the lightning_trainer.py file.
94
+ """
95
+ data, _, _ = batch
96
+ episode_embeddings, comment_embeddings = self.forward(data)
97
+ # labels = torch.flatten(labels)
98
+
99
+ return episode_embeddings, comment_embeddings
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39be85ca6bf1822c7eec6d1459e4ae147973ce8338c3cdb5914ca7a2d999f7d1
3
+ size 330061440