MingLi commited on
Commit
9f13819
·
1 Parent(s): 28b1090

fork and bug fix from https://github.com/AkaliKong/iLoRA

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. README.md +65 -0
  3. SASRecModules_ori.py +95 -0
  4. data/_init_.py +1 -0
  5. data/data_interface.py +190 -0
  6. data/lastfm_data.py +87 -0
  7. data/movielens_data.py +102 -0
  8. data/ref/lastfm/Test_data.df +3 -0
  9. data/ref/lastfm/Val_data.df +3 -0
  10. data/ref/lastfm/id2name.txt +4606 -0
  11. data/ref/lastfm/train_data.df +3 -0
  12. data/ref/movielens/Test_data.df +3 -0
  13. data/ref/movielens/Val_data.df +3 -0
  14. data/ref/movielens/train_data.df +3 -0
  15. data/ref/movielens/u.item +0 -0
  16. data/ref/steam/Test_data.df +3 -0
  17. data/ref/steam/id2name.txt +3581 -0
  18. data/ref/steam/test.ipynb +190 -0
  19. data/ref/steam/train_data.df +3 -0
  20. data/steam_data.py +103 -0
  21. debug/modeling_llama.py +886 -0
  22. debug/utils.py +0 -0
  23. main.py +149 -0
  24. model/__init__.py +0 -0
  25. model/mlp_projector.py +14 -0
  26. model/model_interface.py +492 -0
  27. model/peft/__init__.py +61 -0
  28. model/peft/import_utils.py +28 -0
  29. model/peft/mapping.py +134 -0
  30. model/peft/peft_model.py +1619 -0
  31. model/peft/tuners/__init__.py +27 -0
  32. model/peft/tuners/adalora.py +751 -0
  33. model/peft/tuners/adaption_prompt.py +368 -0
  34. model/peft/tuners/debug_utils.py +64 -0
  35. model/peft/tuners/gating.py +124 -0
  36. model/peft/tuners/lora.py +1033 -0
  37. model/peft/tuners/moelora.py +1164 -0
  38. model/peft/tuners/p_tuning.py +170 -0
  39. model/peft/tuners/prefix_tuning.py +109 -0
  40. model/peft/tuners/prompt_tuning.py +130 -0
  41. model/peft/tuners/test_moelora.py +97 -0
  42. model/peft/utils/__init__.py +42 -0
  43. model/peft/utils/config.py +222 -0
  44. model/peft/utils/hub_utils.py +29 -0
  45. model/peft/utils/other.py +271 -0
  46. model/peft/utils/save_and_load.py +137 -0
  47. model/router/_init_.py +0 -0
  48. model/router/cvr.py +73 -0
  49. model/router/nlpr.py +84 -0
  50. optims.py +43 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.df filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # iLoRA
2
+
3
+ #### Preparation
4
+
5
+ 1. Prepare the environment:
6
+
7
+ ```python
8
+ git clone
9
+ cd iLoRA
10
+ pip install -r requirements.txt
11
+ ```
12
+
13
+ 2. Prepare the pre-trained huggingface model of Llama2-7B (https://huggingface.co/meta-llama/Llama-2-7b-hf).
14
+ 3. Modify the paths inside the .sh file.
15
+
16
+
17
+ #### Train iLoRA
18
+
19
+ Train iLoRA with a single A100 GPU on MovieLens dataset:
20
+
21
+ ```python
22
+ sh train_movielens.sh
23
+ ```
24
+
25
+ Train iLoRA with a single A100 GPU on Steam dataset:
26
+
27
+ ```
28
+ sh train_steam.sh
29
+ ```
30
+
31
+ Train iLoRA with a single A100 GPU on LastFM dataset:
32
+
33
+ ```
34
+ sh train_lastfm.sh
35
+ ```
36
+
37
+ Note that: set the `llm_path` argument with your own directory path of the Llama2 model.
38
+
39
+ ##### For the environmental issues mentioned by everyone during the reproduction process, we have attempted to help resolve them and have listed some solutions:
40
+
41
+ If you encounter an error with your transformers/generation/utils.py file, please replace it with the debug/utils.py file we have provided in your environment.
42
+
43
+ If you encounter an error with your transformers/models/llama/modeling_llama.py file, please replace it with the debug/modeling_llama.py file.
44
+
45
+ Thank you all for your attention to our work! Wishing you success in your research.
46
+
47
+ ##### Evaluate iLoRA
48
+
49
+ Test iLoRA with a single A100 GPU on MovieLens dataset:
50
+
51
+ ```
52
+ sh test_movielens.sh
53
+ ```
54
+
55
+ Test iLoRA with a single A100 GPU on Steam dataset:
56
+
57
+ ```
58
+ sh test_steam.sh
59
+ ```
60
+
61
+ Test iLoRA with a single A100 GPU on LastFM dataset:
62
+
63
+ ```
64
+ sh test_lastfm.sh
65
+ ```
SASRecModules_ori.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+
6
+
7
+ class PositionwiseFeedForward(nn.Module):
8
+ def __init__(self, d_in, d_hid, dropout=0.1):
9
+ super().__init__()
10
+ self.w_1 = nn.Conv1d(d_in, d_hid, 1)
11
+ self.w_2 = nn.Conv1d(d_hid, d_in, 1)
12
+ self.layer_norm = nn.LayerNorm(d_in)
13
+ self.dropout = nn.Dropout(dropout)
14
+
15
+ def forward(self, x):
16
+ residual = x
17
+ output = x.transpose(1, 2)
18
+ output = self.w_2(F.relu(self.w_1(output)))
19
+ output = output.transpose(1, 2)
20
+ output = self.dropout(output)
21
+ output = self.layer_norm(output + residual)
22
+ return output
23
+
24
+
25
+
26
+ class MultiHeadAttention(nn.Module):
27
+ def __init__(self, hidden_size, num_units, num_heads, dropout_rate):
28
+ super().__init__()
29
+ self.hidden_size = hidden_size
30
+ self.num_heads = num_heads
31
+ assert hidden_size % num_heads == 0
32
+
33
+ self.linear_q = nn.Linear(hidden_size, num_units)
34
+ self.linear_k = nn.Linear(hidden_size, num_units)
35
+ self.linear_v = nn.Linear(hidden_size, num_units)
36
+ self.dropout = nn.Dropout(dropout_rate)
37
+ self.softmax = nn.Softmax(dim=-1)
38
+
39
+
40
+ def forward(self, queries, keys):
41
+ """
42
+ :param queries: A 3d tensor with shape of [N, T_q, C_q]
43
+ :param keys: A 3d tensor with shape of [N, T_k, C_k]
44
+
45
+ :return: A 3d tensor with shape of (N, T_q, C)
46
+
47
+ """
48
+ Q = self.linear_q(queries) # (N, T_q, C)
49
+ K = self.linear_k(keys) # (N, T_k, C)
50
+ V = self.linear_v(keys) # (N, T_k, C)
51
+
52
+ # Split and Concat
53
+ split_size = self.hidden_size // self.num_heads
54
+ Q_ = torch.cat(torch.split(Q, split_size, dim=2), dim=0) # (h*N, T_q, C/h)
55
+ K_ = torch.cat(torch.split(K, split_size, dim=2), dim=0) # (h*N, T_k, C/h)
56
+ V_ = torch.cat(torch.split(V, split_size, dim=2), dim=0) # (h*N, T_k, C/h)
57
+
58
+ # Multiplication
59
+ matmul_output = torch.bmm(Q_, K_.transpose(1, 2)) / self.hidden_size ** 0.5 # (h*N, T_q, T_k)
60
+
61
+ # Key Masking
62
+ key_mask = torch.sign(torch.abs(keys.sum(dim=-1))).repeat(self.num_heads, 1) # (h*N, T_k)
63
+ key_mask_reshaped = key_mask.unsqueeze(1).repeat(1, queries.shape[1], 1) # (h*N, T_q, T_k)
64
+ key_paddings = torch.ones_like(matmul_output) * (-2 ** 32 + 1)
65
+ matmul_output_m1 = torch.where(torch.eq(key_mask_reshaped, 0), key_paddings, matmul_output) # (h*N, T_q, T_k)
66
+
67
+ # Causality - Future Blinding
68
+ diag_vals = torch.ones_like(matmul_output[0, :, :]) # (T_q, T_k)
69
+ tril = torch.tril(diag_vals) # (T_q, T_k)
70
+ causality_mask = tril.unsqueeze(0).repeat(matmul_output.shape[0], 1, 1) # (h*N, T_q, T_k)
71
+ causality_paddings = torch.ones_like(causality_mask) * (-2 ** 32 + 1)
72
+ matmul_output_m2 = torch.where(torch.eq(causality_mask, 0), causality_paddings, matmul_output_m1) # (h*N, T_q, T_k)
73
+
74
+ # Activation
75
+ matmul_output_sm = self.softmax(matmul_output_m2) # (h*N, T_q, T_k)
76
+
77
+ # Query Masking
78
+ query_mask = torch.sign(torch.abs(queries.sum(dim=-1))).repeat(self.num_heads, 1) # (h*N, T_q)
79
+ query_mask = query_mask.unsqueeze(-1).repeat(1, 1, keys.shape[1]) # (h*N, T_q, T_k)
80
+ matmul_output_qm = matmul_output_sm * query_mask
81
+
82
+ # Dropout
83
+ matmul_output_dropout = self.dropout(matmul_output_qm)
84
+
85
+ # Weighted Sum
86
+ output_ws = torch.bmm(matmul_output_dropout, V_) # ( h*N, T_q, C/h)
87
+
88
+ # Restore Shape
89
+ output = torch.cat(torch.split(output_ws, output_ws.shape[0] // self.num_heads, dim=0), dim=2) # (N, T_q, C)
90
+
91
+ # Residual Connection
92
+ output_res = output + queries
93
+
94
+ return output_res
95
+
data/_init_.py ADDED
@@ -0,0 +1 @@
 
 
1
+
data/data_interface.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import importlib
3
+ import pickle as pkl
4
+ import pytorch_lightning as pl
5
+ from torch.utils.data import DataLoader
6
+ from torch.utils.data.sampler import WeightedRandomSampler
7
+
8
+ import random
9
+ import torch
10
+ import argparse
11
+ from transformers import LlamaForCausalLM, LlamaTokenizer
12
+ import os
13
+
14
+
15
+
16
+ class TrainCollater:
17
+ def __init__(self,
18
+ prompt_list=None,
19
+ llm_tokenizer=None,
20
+ train=False,
21
+ terminator="\n",
22
+ max_step=1):
23
+ self.prompt_list = prompt_list
24
+ self.llm_tokenizer = llm_tokenizer
25
+ self.train=train
26
+ self.terminator = terminator
27
+ self.max_step = max_step
28
+ self.cur_step = 1
29
+
30
+ def __call__(self, batch):
31
+ if isinstance(self.prompt_list,list):
32
+ instruction = random.choice(self.prompt_list)
33
+ inputs_text = instruction if isinstance(instruction, list) else [instruction] * len(batch)
34
+ else:
35
+ instruction = sample["instruction_input"] if "instruction_input" in sample else None
36
+ inputs_text = instruction if isinstance(instruction, list) else [instruction] * len(batch)
37
+
38
+ thresh_hold = self.cur_step/self.max_step
39
+ p = random.random()
40
+ if p < thresh_hold or not self.train:
41
+ for i, sample in enumerate(batch):
42
+ input_text=inputs_text[i]
43
+ if '[HistoryHere]' in input_text:
44
+ insert_prompt=", ".join([seq_title+' [HistoryEmb]' for seq_title in sample['seq_name']])
45
+ input_text=input_text.replace('[HistoryHere]',insert_prompt)
46
+ if '[CansHere]' in input_text:
47
+ insert_prompt=", ".join([can_title+' [CansEmb]' for can_title in sample['cans_name']])
48
+ input_text=input_text.replace('[CansHere]',insert_prompt)
49
+ if '[TargetHere]' in input_text:
50
+ insert_prompt=insert_prompt=", ".join([sample['correct_answer']+' [ItemEmb]'])
51
+ input_text=input_text.replace('[TargetHere]',insert_prompt)
52
+
53
+ inputs_text[i]=input_text
54
+ flag = False
55
+ else:
56
+ for i, sample in enumerate(batch):
57
+ input_text=inputs_text[i]
58
+ if '[HistoryHere]' in input_text:
59
+ insert_prompt=", ".join([seq_title+' [PH]' for seq_title in sample['seq_name']])
60
+ input_text=input_text.replace('[HistoryHere]',insert_prompt)
61
+ if '[CansHere]' in input_text:
62
+ insert_prompt=", ".join([can_title+' [PH]' for can_title in sample['cans_name']])
63
+ input_text=input_text.replace('[CansHere]',insert_prompt)
64
+
65
+ inputs_text[i]=input_text
66
+ flag = True
67
+ self.cur_step += 1
68
+
69
+ targets_text = [sample['correct_answer'] for sample in batch]
70
+
71
+ if self.train:
72
+ targets_text=[target_text+self.terminator for target_text in targets_text]
73
+ inputs_pair = [[p, t] for p, t in zip(inputs_text, targets_text)]
74
+
75
+ batch_tokens = self.llm_tokenizer(
76
+ inputs_pair,
77
+ return_tensors="pt",
78
+ padding="longest",
79
+ truncation=False,
80
+ add_special_tokens=True,
81
+ return_attention_mask=True,
82
+ return_token_type_ids=True)
83
+ new_batch={"tokens":batch_tokens,
84
+ "seq":torch.stack([torch.tensor(sample['seq']) for sample in batch], dim=0),
85
+ "cans":torch.stack([torch.tensor(sample['cans']) for sample in batch], dim=0),
86
+ "len_seq":torch.stack([torch.tensor(sample['len_seq']) for sample in batch], dim=0),
87
+ "len_cans":torch.stack([torch.tensor(sample['len_cans']) for sample in batch], dim=0),
88
+ "item_id": torch.stack([torch.tensor(sample['item_id']) for sample in batch], dim=0),
89
+ "flag":flag,
90
+ }
91
+
92
+
93
+ else:
94
+ batch_tokens = self.llm_tokenizer(
95
+ inputs_text,
96
+ return_tensors="pt",
97
+ padding="longest",
98
+ truncation=False,
99
+ add_special_tokens=True,
100
+ return_token_type_ids=True)
101
+ cans_name=[sample['cans_name'] for sample in batch]
102
+ new_batch={"tokens":batch_tokens,
103
+ "seq":torch.stack([torch.tensor(sample['seq']) for sample in batch], dim=0),
104
+ "cans":torch.stack([torch.tensor(sample['cans']) for sample in batch], dim=0),
105
+ "len_seq":torch.stack([torch.tensor(sample['len_seq']) for sample in batch], dim=0),
106
+ "len_cans":torch.stack([torch.tensor(sample['len_cans']) for sample in batch], dim=0),
107
+ "item_id": torch.stack([torch.tensor(sample['item_id']) for sample in batch], dim=0),
108
+ "correct_answer": targets_text,
109
+ "cans_name": cans_name,
110
+
111
+ }
112
+
113
+ return new_batch
114
+
115
+ class DInterface(pl.LightningDataModule):
116
+
117
+ def __init__(self,
118
+ llm_tokenizer=None,
119
+ num_workers=8,
120
+ dataset='',
121
+ **kwargs):
122
+ super().__init__()
123
+ self.num_workers = num_workers
124
+ self.llm_tokenizer=llm_tokenizer
125
+ self.dataset = dataset
126
+ self.kwargs = kwargs
127
+ self.batch_size = kwargs['batch_size']
128
+ self.max_epochs = kwargs['max_epochs']
129
+ self.load_data_module()
130
+ self.load_prompt(kwargs['prompt_path'])
131
+
132
+ self.trainset = self.instancialize(stage='train')
133
+ self.valset = self.instancialize(stage='val')
134
+ self.testset = self.instancialize(stage='test')
135
+ self.max_steps = self.max_epochs*(len(self.trainset)//self.batch_size)//self.num_workers
136
+
137
+ def train_dataloader(self):
138
+ return DataLoader(self.trainset,
139
+ batch_size=self.batch_size,
140
+ num_workers=self.num_workers,
141
+ shuffle=True,
142
+ drop_last=True,
143
+ collate_fn=TrainCollater(prompt_list=self.prompt_list,llm_tokenizer=self.llm_tokenizer,train=True, max_step=self.max_steps))
144
+
145
+ def val_dataloader(self):
146
+ return DataLoader(self.valset,
147
+ batch_size=self.batch_size,
148
+ num_workers=self.num_workers,
149
+ shuffle=False,
150
+ collate_fn=TrainCollater(prompt_list=self.prompt_list,llm_tokenizer=self.llm_tokenizer,train=False))
151
+
152
+ def test_dataloader(self):
153
+ return DataLoader(self.testset,
154
+ batch_size=self.batch_size,
155
+ num_workers=self.num_workers,
156
+ shuffle=False,
157
+
158
+ drop_last=True,
159
+
160
+ collate_fn=TrainCollater(prompt_list=self.prompt_list,llm_tokenizer=self.llm_tokenizer,train=False))
161
+
162
+ def load_data_module(self):
163
+ name = self.dataset
164
+ camel_name = ''.join([i.capitalize() for i in name.split('_')])
165
+ try:
166
+ self.data_module = getattr(importlib.import_module(
167
+ '.'+name, package=__package__), camel_name)
168
+ except:
169
+ raise ValueError(
170
+ f'Invalid Dataset File Name or Invalid Class Name data.{name}.{camel_name}')
171
+
172
+ def instancialize(self, **other_args):
173
+ class_args = inspect.getargspec(self.data_module.__init__).args[1:]
174
+ inkeys = self.kwargs.keys()
175
+ args1 = {}
176
+ for arg in class_args:
177
+ if arg in inkeys:
178
+ args1[arg] = self.kwargs[arg]
179
+ args1.update(other_args)
180
+ return self.data_module(**args1)
181
+
182
+ def load_prompt(self,prompt_path):
183
+ if os.path.isfile(prompt_path):
184
+ with open(prompt_path, 'r') as f:
185
+ raw_prompts = f.read().splitlines()
186
+ self.prompt_list = [p.strip() for p in raw_prompts]
187
+ print('Load {} training prompts'.format(len(self.prompt_list)))
188
+ print('Prompt Example \n{}'.format(random.choice(self.prompt_list)))
189
+ else:
190
+ self.prompt_list = []
data/lastfm_data.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os.path as op
3
+ import numpy as np
4
+ import pickle as pkl
5
+ import torch.utils.data as data
6
+
7
+ import pandas as pd
8
+ import random
9
+
10
+ class LastfmData(data.Dataset):
11
+ def __init__(self, data_dir=r'data/ref/lastfm_ctr',
12
+ stage=None,
13
+ cans_num=10,
14
+ sep=", ",
15
+ no_augment=True):
16
+ self.__dict__.update(locals())
17
+ self.aug = (stage=='train') and not no_augment
18
+ self.padding_item_id=4606
19
+ self.check_files()
20
+
21
+ def __len__(self):
22
+ return len(self.session_data['seq'])
23
+
24
+ def __getitem__(self, i):
25
+ temp = self.session_data.iloc[i]
26
+ candidates = self.negative_sampling(temp['seq_unpad'],temp['next'])
27
+ cans_name=[self.item_id2name[can] for can in candidates]
28
+ sample = {
29
+ 'seq': temp['seq'],
30
+ 'seq_name': temp['seq_title'],
31
+ 'len_seq': temp['len_seq'],
32
+ 'seq_str': self.sep.join(temp['seq_title']),
33
+ 'cans': candidates,
34
+ 'cans_name': cans_name,
35
+ 'cans_str': self.sep.join(cans_name),
36
+ 'len_cans': self.cans_num,
37
+ 'item_id': temp['next'],
38
+ 'item_name': temp['next_item_name'],
39
+ 'correct_answer': temp['next_item_name']
40
+ }
41
+ return sample
42
+
43
+ def negative_sampling(self,seq_unpad,next_item):
44
+ canset=[i for i in list(self.item_id2name.keys()) if i not in seq_unpad and i!=next_item]
45
+ candidates=random.sample(canset, 1)
46
+ return candidates
47
+
48
+ def check_files(self):
49
+ self.item_id2name=self.get_music_id2name()
50
+ if self.stage=='train':
51
+ filename="train_data.df"
52
+ elif self.stage=='val':
53
+ filename="Val_data.df"
54
+ elif self.stage=='test':
55
+ filename="Test_data.df"
56
+ data_path=op.join(self.data_dir, filename)
57
+ self.session_data = self.session_data4frame(data_path, self.item_id2name)
58
+
59
+
60
+ def get_music_id2name(self):
61
+ music_id2name = dict()
62
+ item_path=op.join(self.data_dir, 'id2name.txt')
63
+ with open(item_path, 'r') as f:
64
+ for l in f.readlines():
65
+ ll = l.strip('\n').split('::')
66
+ music_id2name[int(ll[0])] = ll[1].strip()
67
+ return music_id2name
68
+
69
+ def session_data4frame(self, datapath, music_id2name):
70
+ train_data = pd.read_pickle(datapath)
71
+ train_data = train_data[train_data['len_seq'] >= 3]
72
+ def remove_padding(xx):
73
+ x = xx[:]
74
+ for i in range(10):
75
+ try:
76
+ x.remove(self.padding_item_id)
77
+ except:
78
+ break
79
+ return x
80
+ train_data['seq_unpad'] = train_data['seq'].apply(remove_padding)
81
+ def seq_to_title(x):
82
+ return [music_id2name[x_i] for x_i in x]
83
+ train_data['seq_title'] = train_data['seq_unpad'].apply(seq_to_title)
84
+ def next_item_title(x):
85
+ return music_id2name[x]
86
+ train_data['next_item_name'] = train_data['next'].apply(next_item_title)
87
+ return train_data
data/movielens_data.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os.path as op
3
+ import numpy as np
4
+ import pickle as pkl
5
+ import torch.utils.data as data
6
+
7
+ import pandas as pd
8
+ import random
9
+
10
+ class MovielensData(data.Dataset):
11
+ def __init__(self, data_dir=r'data/ref/movielens',
12
+ stage=None,
13
+ cans_num=10,
14
+ sep=", ",
15
+ no_augment=True):
16
+ self.__dict__.update(locals())
17
+ self.aug = (stage=='train') and not no_augment
18
+ self.padding_item_id=1682
19
+ self.padding_rating=0
20
+ self.check_files()
21
+
22
+ def __len__(self):
23
+ return len(self.session_data['seq'])
24
+
25
+ def __getitem__(self, i):
26
+ temp = self.session_data.iloc[i]
27
+ candidates = self.negative_sampling(temp['seq_unpad'],temp['next'])
28
+ cans_name=[self.item_id2name[can] for can in candidates]
29
+ sample = {
30
+ 'seq': temp['seq'],
31
+ 'seq_name': temp['seq_title'],
32
+ 'len_seq': temp['len_seq'],
33
+ 'seq_str': self.sep.join(temp['seq_title']),
34
+ 'cans': candidates,
35
+ 'cans_name': cans_name,
36
+ 'cans_str': self.sep.join(cans_name),
37
+ 'len_cans': self.cans_num,
38
+ 'item_id': temp['next'],
39
+ 'item_name': temp['next_item_name'],
40
+ 'correct_answer': temp['next_item_name']
41
+ }
42
+ return sample
43
+
44
+ def negative_sampling(self,seq_unpad,next_item):
45
+ canset=[i for i in list(self.item_id2name.keys()) if i not in seq_unpad and i!=next_item]
46
+ candidates=random.sample(canset, self.cans_num-1)+[next_item]
47
+ random.shuffle(candidates)
48
+ return candidates
49
+
50
+ def check_files(self):
51
+ self.item_id2name=self.get_movie_id2name()
52
+ if self.stage=='train':
53
+ filename="train_data.df"
54
+ elif self.stage=='val':
55
+ filename="Val_data.df"
56
+ elif self.stage=='test':
57
+ filename="Test_data.df"
58
+ data_path=op.join(self.data_dir, filename)
59
+ self.session_data = self.session_data4frame(data_path, self.item_id2name)
60
+
61
+ def get_mv_title(self,s):
62
+ sub_list=[", The", ", A", ", An"]
63
+ for sub_s in sub_list:
64
+ if sub_s in s:
65
+ return sub_s[2:]+" "+s.replace(sub_s,"")
66
+ return s
67
+
68
+ def get_movie_id2name(self):
69
+ movie_id2name = dict()
70
+ item_path=op.join(self.data_dir, 'u.item')
71
+ with open(item_path, 'r', encoding = "ISO-8859-1") as f:
72
+ for l in f.readlines():
73
+ ll = l.strip('\n').split('|')
74
+ movie_id2name[int(ll[0]) - 1] = self.get_mv_title(ll[1][:-7])
75
+ return movie_id2name
76
+
77
+ def session_data4frame(self, datapath, movie_id2name):
78
+ train_data = pd.read_pickle(datapath)
79
+ train_data = train_data[train_data['len_seq'] >= 3]
80
+ def remove_padding(xx):
81
+ x = xx[:]
82
+ for i in range(10):
83
+ try:
84
+ x.remove((self.padding_item_id,self.padding_rating))
85
+ except:
86
+ break
87
+ return x
88
+ train_data['seq_unpad'] = train_data['seq'].apply(remove_padding)
89
+ def seq_to_title(x):
90
+ return [movie_id2name[x_i[0]] for x_i in x]
91
+ train_data['seq_title'] = train_data['seq_unpad'].apply(seq_to_title)
92
+ def next_item_title(x):
93
+ return movie_id2name[x[0]]
94
+ train_data['next_item_name'] = train_data['next'].apply(next_item_title)
95
+ def get_id_from_tumple(x):
96
+ return x[0]
97
+ def get_id_from_list(x):
98
+ return [i[0] for i in x]
99
+ train_data['next'] = train_data['next'].apply(get_id_from_tumple)
100
+ train_data['seq'] = train_data['seq'].apply(get_id_from_list)
101
+ train_data['seq_unpad']=train_data['seq_unpad'].apply(get_id_from_list)
102
+ return train_data
data/ref/lastfm/Test_data.df ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cac058a2842b2a3ed4d39ed1fa0a742f5034a23c9851a2ffc157b4845d703570
3
+ size 6673
data/ref/lastfm/Val_data.df ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92a1ddb65ebdeff999f910b65660eb3d8c7cd85e6a19308d80c429481839f8b3
3
+ size 6690
data/ref/lastfm/id2name.txt ADDED
@@ -0,0 +1,4606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 0::Morcheeba
2
+ 1::Enigma
3
+ 2::Café Del Mar
4
+ 3::Fleetwood Mac
5
+ 4::China Crisis
6
+ 5::Loscil
7
+ 6::Chicane
8
+ 7::Sigue Sigue Sputnik
9
+ 8::Duran Duran
10
+ 9::Air
11
+ 10::Röyksopp
12
+ 11::Moby
13
+ 12::Depeche Mode
14
+ 13::Groove Armada
15
+ 14::INXS
16
+ 15::Deep Forest
17
+ 16::Porcupine Tree
18
+ 17::De/Vision
19
+ 18::Radiohead
20
+ 19::VAST
21
+ 20::Michael Jackson
22
+ 21::God Is an Astronaut
23
+ 22::Pink Floyd
24
+ 23::Planet Funk
25
+ 24::Scissor Sisters
26
+ 25::Mew
27
+ 26::Stereophonics
28
+ 27::Placebo
29
+ 28::Infected Mushroom
30
+ 29::Delerium
31
+ 30::Roxette
32
+ 31::Paradise Lost
33
+ 32::Jamiroquai
34
+ 33::James Blunt
35
+ 34::Reamonn
36
+ 35::Opeth
37
+ 36::Elton John
38
+ 37::Tears for Fears
39
+ 38::Death Cab for Cutie
40
+ 39::Savage Garden
41
+ 40::Darren Hayes
42
+ 41::Explosions in the Sky
43
+ 42::AC/DC
44
+ 43::Apocalyptica
45
+ 44::Megadeth
46
+ 45::Dream Theater
47
+ 46::Tiësto
48
+ 47::The Darkness
49
+ 48::VNV Nation
50
+ 49::Thirteen Senses
51
+ 50::Dave Gahan
52
+ 51::Apparat
53
+ 52::Def Leppard
54
+ 53::Zero 7
55
+ 54::Dope
56
+ 55::Audioslave
57
+ 56::Charon
58
+ 57::Amorphis
59
+ 58::Pain
60
+ 59::Sentenced
61
+ 60::Blank & Jones
62
+ 61::Mesh
63
+ 62::Keane
64
+ 63::Muse
65
+ 64::Bright Eyes
66
+ 65::Tom Waits
67
+ 66::Gogol Bordello
68
+ 67::CAKE
69
+ 68::Jeff Buckley
70
+ 69::Sparklehorse
71
+ 70::Modest Mouse
72
+ 71::Sunset Rubdown
73
+ 72::Nine Inch Nails
74
+ 73::Clap Your Hands Say Yeah
75
+ 74::Manu Chao
76
+ 75::Belle and Sebastian
77
+ 76::John Lennon
78
+ 77::Primal Scream
79
+ 78::The Velvet Underground
80
+ 79::Bon Jovi
81
+ 80::Johann Sebastian Bach
82
+ 81::Weezer
83
+ 82::Imogen Heap
84
+ 83::Janis Joplin
85
+ 84::Oh No Oh My
86
+ 85::The Cramps
87
+ 86::Panda Bear
88
+ 87::Gavin DeGraw
89
+ 88::Wilco
90
+ 89::Ben Kweller
91
+ 90::Everlast
92
+ 91::Menomena
93
+ 92::Turin Brakes
94
+ 93::DIR EN GREY
95
+ 94::My Chemical Romance
96
+ 95::Travis
97
+ 96::The Killers
98
+ 97::Tina Turner
99
+ 98::Katatonia
100
+ 99::Skillet
101
+ 100::宇多田ヒカル
102
+ 101::浜崎あゆみ
103
+ 102::UVERworld
104
+ 103::Linkin Park
105
+ 104::中島美嘉
106
+ 105::GACKT
107
+ 106::L'Arc~en~Ciel
108
+ 107::雅-MIYAVI-
109
+ 108::The All-American Rejects
110
+ 109::The Rasmus
111
+ 110::HYDE
112
+ 111::Simple Plan
113
+ 112::30 Seconds to Mars
114
+ 113::Three Days Grace
115
+ 114::Seether
116
+ 115::Lostprophets
117
+ 116::HIM
118
+ 117::Korn
119
+ 118::Within Temptation
120
+ 119::Shakira
121
+ 120::Epica
122
+ 121::Nickelback
123
+ 122::Noir Désir
124
+ 123::Tool
125
+ 124::Hawthorne Heights
126
+ 125::Snow Patrol
127
+ 126::Jimmy Eat World
128
+ 127::Ricky Martin
129
+ 128::Ryan Adams
130
+ 129::Shayne Ward
131
+ 130::Sting
132
+ 131::Sex Pistols
133
+ 132::Hoobastank
134
+ 133::They Might Be Giants
135
+ 134::Rie fu
136
+ 135::ギルガメッシュ
137
+ 136::ムック
138
+ 137::Il Divo
139
+ 138::Josh Groban
140
+ 139::倖田來未
141
+ 140::Type O Negative
142
+ 141::Lonestar
143
+ 142::KT Tunstall
144
+ 143::Vanessa Carlton
145
+ 144::Hinder
146
+ 145::Phil Collins
147
+ 146::Melt-Banana
148
+ 147::Goo Goo Dolls
149
+ 148::Krypteria
150
+ 149::Paula DeAnda
151
+ 150::Saliva
152
+ 151::Seether (Feat. Amy Lee)
153
+ 152::Puddle of Mudd
154
+ 153::Staind
155
+ 154::Royal Hunt
156
+ 155::Dream Evil
157
+ 156::Olivia
158
+ 157::Juno Reactor
159
+ 158::Polysics
160
+ 159::O-Zone
161
+ 160::Mark Lanegan
162
+ 161::12012
163
+ 162::Sadie
164
+ 163::Sami Yusuf
165
+ 164::Massari
166
+ 165::Saybia
167
+ 166::Beirut
168
+ 167::Arcade Fire
169
+ 168::Babyshambles
170
+ 169::The Decemberists
171
+ 170::The National
172
+ 171::Sigur Rós
173
+ 172::The Libertines
174
+ 173::Kate Nash
175
+ 174::Wolf Parade
176
+ 175::Animal Collective
177
+ 176::The Fray
178
+ 177::The Shins
179
+ 178::Lady Gaga
180
+ 179::Green Day
181
+ 180::Nirvana
182
+ 181::Mariah Carey
183
+ 182::JoJo
184
+ 183::Backstreet Boys
185
+ 184::Jesse McCartney
186
+ 185::Justin Bieber
187
+ 186::Usher
188
+ 187::Nick Carter
189
+ 188::Metro Station
190
+ 189::Jeremih
191
+ 190::Fall Out Boy
192
+ 191::Kristinia DeBarge
193
+ 192::Paris Hilton
194
+ 193::50 Cent
195
+ 194::New Kids on the Block
196
+ 195::Дима Билан
197
+ 196::London After Midnight
198
+ 197::Psyclon Nine
199
+ 198::The Crüxshadows
200
+ 199::KMFDM
201
+ 200::Mindless Self Indulgence
202
+ 201::Daft Punk
203
+ 202::Goldfrapp
204
+ 203::Madonna
205
+ 204::Dido
206
+ 205::Gorillaz
207
+ 206::The Cure
208
+ 207::Poets of the Fall
209
+ 208::OneRepublic
210
+ 209::System of a Down
211
+ 210::The Beatles
212
+ 211::Thom Yorke
213
+ 212::Massive Attack
214
+ 213::2Pac
215
+ 214::Rihanna
216
+ 215::Britney Spears
217
+ 216::Jennifer Lopez
218
+ 217::Katy Perry
219
+ 218::P!nk
220
+ 219::Black Eyed Peas
221
+ 220::Hilary Duff
222
+ 221::Fergie
223
+ 222::Ashley Tisdale
224
+ 223::Chris Brown
225
+ 224::Kanye West
226
+ 225::Avril Lavigne
227
+ 226::Taylor Swift
228
+ 227::The Pussycat Dolls
229
+ 228::Evanescence
230
+ 229::Akira Yamaoka
231
+ 230::blink-182
232
+ 231::Bat for Lashes
233
+ 232::The Smashing Pumpkins
234
+ 233::3OH!3
235
+ 234::Ke$ha
236
+ 235::Craig David
237
+ 236::Eminem
238
+ 237::Ne-Yo
239
+ 238::The Birthday Massacre
240
+ 239::Limp Bizkit
241
+ 240::Fort Minor
242
+ 241::Funeral for a Friend
243
+ 242::Finch
244
+ 243::Cold
245
+ 244::Papa Roach
246
+ 245::Paramore
247
+ 246::Flyleaf
248
+ 247::Bullet for My Valentine
249
+ 248::Angels & Airwaves
250
+ 249::LaFee
251
+ 250::In Flames
252
+ 251::Anathema
253
+ 252::Saosin
254
+ 253::Tracktor Bowling
255
+ 254::Pitchshifter
256
+ 255::P.O.D.
257
+ 256::U2
258
+ 257::Deftones
259
+ 258::Ill Niño
260
+ 259::Clan of Xymox
261
+ 260::Celldweller
262
+ 261::Static-X
263
+ 262::Fluke
264
+ 263::happysad
265
+ 264::Lacrimosa
266
+ 265::Lindsay Lohan
267
+ 266::Good Charlotte
268
+ 267::The Used
269
+ 268::Maroon 5
270
+ 269::Adam Lambert
271
+ 270::Kid Cudi
272
+ 271::Owl City
273
+ 272::The Pretty Reckless
274
+ 273::Metallica
275
+ 274::#####
276
+ 275::Fear Factory
277
+ 276::Rise Against
278
+ 277::Killswitch Engage
279
+ 278::Jane's Addiction
280
+ 279::Tiamat
281
+ 280::Nightwish
282
+ 281::Sixpence None the Richer
283
+ 282::t.A.T.u.
284
+ 283::Disturbed
285
+ 284::Jem
286
+ 285::Kerli
287
+ 286::Bruno Mars
288
+ 287::Slipknot
289
+ 288::Myslovitz
290
+ 289::Björk
291
+ 290::Rammstein
292
+ 291::A Perfect Circle
293
+ 292::Joe Satriani
294
+ 293::Stone Sour
295
+ 294::Editors
296
+ 295::Billy Talent
297
+ 296::Silverstein
298
+ 297::The Red Jumpsuit Apparatus
299
+ 298::Alesana
300
+ 299::Senses Fail
301
+ 300::Story of the Year
302
+ 301::Tactical Sekt
303
+ 302::Sepultura
304
+ 303::The Offspring
305
+ 304::Akon
306
+ 305::B.o.B
307
+ 306::Black Rebel Motorcycle Club
308
+ 307::Lykke Li
309
+ 308::Jay-Z
310
+ 309::Black Veil Brides
311
+ 310::Tokio Hotel
312
+ 311::Oomph!
313
+ 312::Guano Apes
314
+ 313::Red
315
+ 314::Breaking Benjamin
316
+ 315::Thousand Foot Krutch
317
+ 316::Jay-Z and Linkin Park
318
+ 317::Hollywood Undead
319
+ 318::Cypress Hill
320
+ 319::Lacuna Coil
321
+ 320::September
322
+ 321::Gregorian
323
+ 322::OK Go
324
+ 323::Fightstar
325
+ 324::Godsmack
326
+ 325::Keith Urban
327
+ 326::Rascal Flatts
328
+ 327::Brooks & Dunn
329
+ 328::Scooter
330
+ 329::Thursday
331
+ 330::Bloodhound Gang
332
+ 331::Robert Miles
333
+ 332::Automatic Loveletter
334
+ 333::Underoath
335
+ 334::Lupe Fiasco
336
+ 335::Dead by April
337
+ 336::Adema
338
+ 337::Dead by Sunrise
339
+ 338::Faktion
340
+ 339::We Are the Fallen
341
+ 340::Coma
342
+ 341::Drowning Pool
343
+ 342::Drake
344
+ 343::Sky Ferreira
345
+ 344::The Magic Numbers
346
+ 345::Mary Elizabeth McGlynn
347
+ 346::Brand New
348
+ 347::Will Smith
349
+ 348::Daughtry
350
+ 349::12 Stones
351
+ 350::Ferry Corsten
352
+ 351::Silversun Pickups
353
+ 352::Scary Kids Scaring Kids
354
+ 353::Xandria
355
+ 354::Kill Hannah
356
+ 355::Taking Back Sunday
357
+ 356::Hayden Panettiere
358
+ 357::Линда
359
+ 358::Smile Empty Soul
360
+ 359::Jason Derülo
361
+ 360::O.N.A.
362
+ 361::Chylińska
363
+ 362::Alexandre Desplat
364
+ 363::Emilie Autumn
365
+ 364::Fuel
366
+ 365::Stateless
367
+ 366::Robert Pattinson
368
+ 367::Trading Yesterday
369
+ 368::Armor for Sleep
370
+ 369::Bear McCreary
371
+ 370::I Am Ghost
372
+ 371::Xzibit
373
+ 372::Helios
374
+ 373::Emery
375
+ 374::Alexi Murdoch
376
+ 375::Anya Marina
377
+ 376::Mudvayne
378
+ 377::Chingy
379
+ 378::Plumb
380
+ 379::Taproot
381
+ 380::Jason Walker
382
+ 381::Mark Ronson
383
+ 382::Gavin Rossdale
384
+ 383::Blindside
385
+ 384::Amy Lee
386
+ 385::James Newton Howard
387
+ 386::Band of Skulls
388
+ 387::Sea Wolf
389
+ 388::The Letter Black
390
+ 389::Ja Rule
391
+ 390::Holly Brook
392
+ 391::Hurricane Bells
393
+ 392::Kylie Minogue
394
+ 393::Ashlee Simpson
395
+ 394::Lily Allen
396
+ 395::Heidi Montag
397
+ 396::New Order
398
+ 397::Japan
399
+ 398::The Smiths
400
+ 399::Joy Division
401
+ 400::The Sonics
402
+ 401::Southern Culture on the Skids
403
+ 402::Small Faces
404
+ 403::The Detroit Cobras
405
+ 404::The Pretty Things
406
+ 405::Talking Heads
407
+ 406::The Stranglers
408
+ 407::The Clash
409
+ 408::Roxy Music
410
+ 409::Alphaville
411
+ 410::Blondie
412
+ 411::Frank Sinatra
413
+ 412::The Human League
414
+ 413::Information Society
415
+ 414::Pet Shop Boys
416
+ 415::Ultravox
417
+ 416::Yazoo
418
+ 417::Heaven 17
419
+ 418::Siouxsie and the Banshees
420
+ 419::Misfits
421
+ 420::Dean Martin
422
+ 421::Dick Dale
423
+ 422::Motörhead
424
+ 423::Devo
425
+ 424::Split Enz
426
+ 425::The Cars
427
+ 426::Dead or Alive
428
+ 427::Oingo Boingo
429
+ 428::Eurythmics
430
+ 429::Visage
431
+ 430::Magazine
432
+ 431::Madness
433
+ 432::Ska-P
434
+ 433::The Specials
435
+ 434::Culture Club
436
+ 435::Elvis Costello
437
+ 436::Sublime
438
+ 437::A Flock of Seagulls
439
+ 438::Echo & The Bunnymen
440
+ 439::Cab Calloway
441
+ 440::The B-52's
442
+ 441::Butthole Surfers
443
+ 442::Nekromantix
444
+ 443::The Byrds
445
+ 444::The Mighty Mighty Bosstones
446
+ 445::Peggy Lee
447
+ 446::Corona
448
+ 447::The Jam
449
+ 448::Joe Jackson
450
+ 449::Messer Chups
451
+ 450::The Meteors
452
+ 451::Adam and the Ants
453
+ 452::The Zombies
454
+ 453::Barón Rojo
455
+ 454::Missing Persons
456
+ 455::Squeeze
457
+ 456::Akurat
458
+ 457::The Brian Setzer Orchestra
459
+ 458::Stray Cats
460
+ 459::Goldfinger
461
+ 460::Mecano
462
+ 461::Men at Work
463
+ 462::Squirrel Nut Zippers
464
+ 463::Harry Connick, Jr.
465
+ 464::Dick Dale and His Del-Tones
466
+ 465::The Kings of Nuthin'
467
+ 466::B-Movie
468
+ 467::Anne Clark
469
+ 468::Mad Sin
470
+ 469::Calabrese
471
+ 470::The Seeds
472
+ 471::The Litter
473
+ 472::Big Bad Voodoo Daddy
474
+ 473::13th Floor Elevators
475
+ 474::Holly Golightly
476
+ 475::IRA
477
+ 476::The Troggs
478
+ 477::Duffy
479
+ 478::Allison Iraheta
480
+ 479::Lena
481
+ 480::Jónsi
482
+ 481::Kalomoira
483
+ 482::Άννα Βισση
484
+ 483::Inna
485
+ 484::Jewel
486
+ 485::Fresno
487
+ 486::Hurts
488
+ 487::Miranda Cosgrove
489
+ 488::Wanessa
490
+ 489::The Script
491
+ 490::Alexander Rybak
492
+ 491::Cinema Bizarre
493
+ 492::Edyta Górniak
494
+ 493::ZAZ
495
+ 494::Cary Brothers
496
+ 495::Karl Wolf
497
+ 496::Boris
498
+ 497::Fever Ray
499
+ 498::Fennesz
500
+ 499::I Set My Friends On Fire
501
+ 500::Blue
502
+ 501::Jessica Simpson
503
+ 502::RBD
504
+ 503::Mandy Moore
505
+ 504::Paulina Rubio
506
+ 505::Fey
507
+ 506::Dannii Minogue
508
+ 507::Thalía
509
+ 508::A*Teens
510
+ 509::Geri Halliwell
511
+ 510::Atomic Kitten
512
+ 511::Andrés Calamaro
513
+ 512::Laibach
514
+ 513::David Bowie
515
+ 514::Einstürzende Neubauten
516
+ 515::Jesu
517
+ 516::Soundgarden
518
+ 517::Stone Temple Pilots
519
+ 518::Killing Joke
520
+ 519::Bauhaus
521
+ 520::Nitzer Ebb
522
+ 521::Ladytron
523
+ 522::Suede
524
+ 523::My Bloody Valentine
525
+ 524::Fugazi
526
+ 525::Godflesh
527
+ 526::Prong
528
+ 527::Filter
529
+ 528::Lard
530
+ 529::Noisettes
531
+ 530::VV Brown
532
+ 531::Sonique
533
+ 532::Robbie Williams
534
+ 533::Christina Aguilera
535
+ 534::Leona Lewis
536
+ 535::Beyoncé
537
+ 536::Justin Timberlake
538
+ 537::Monrose
539
+ 538::Cheryl Cole
540
+ 539::Sarah Connor
541
+ 540::M. Pokora
542
+ 541::Taio Cruz
543
+ 542::Girls Aloud
544
+ 543::Rachel Stevens
545
+ 544::Take That
546
+ 545::Iron Maiden
547
+ 546::Savatage
548
+ 547::Bruce Dickinson
549
+ 548::Children of Bodom
550
+ 549::Andre Matos
551
+ 550::Viper
552
+ 551::Matanza
553
+ 552::Nevermore
554
+ 553::Enya
555
+ 554::The Ting Tings
556
+ 555::Pearl Jam
557
+ 556::Isis
558
+ 557::Tori Amos
559
+ 558::The Mars Volta
560
+ 559::Panic! At the Disco
561
+ 560::The Fall of Troy
562
+ 561::Dżem
563
+ 562::Hard-Fi
564
+ 563::Bond
565
+ 564::Sophie Ellis-Bextor
566
+ 565::Rick Astley
567
+ 566::Solar Stone
568
+ 567::Ian Van Dahl
569
+ 568::Guru Josh Project
570
+ 569::Morrissey
571
+ 570::Frank Zappa
572
+ 571::Plaid
573
+ 572::Underworld
574
+ 573::Thomas Newman
575
+ 574::Junkie XL
576
+ 575::Sasha
577
+ 576::Glee Cast
578
+ 577::All Time Low
579
+ 578::Runner Runner
580
+ 579::And One
581
+ 580::Crystal Castles
582
+ 581::Anything Box
583
+ 582::Recoil
584
+ 583::Hubert Kah
585
+ 584::Sally Shapiro
586
+ 585::Ayria
587
+ 586::Red Flag
588
+ 587::The Frozen Autumn
589
+ 588::Cause & Effect
590
+ 589::Wham!
591
+ 590::Billy Idol
592
+ 591::Provision
593
+ 592::Peter Murphy
594
+ 593::Kaizers Orchestra
595
+ 594::Kent
596
+ 595::Iggy Pop
597
+ 596::Yann Tiersen
598
+ 597::Rush
599
+ 598::Frédéric Chopin
600
+ 599::Incubus
601
+ 600::Claude Debussy
602
+ 601::Faith No More
603
+ 602::Wolfgang Amadeus Mozart
604
+ 603::John Petrucci & Jordan Rudess
605
+ 604::Ludwig van Beethoven
606
+ 605::The String Quartet
607
+ 606::Yes
608
+ 607::Mike Oldfield
609
+ 608::Zu
610
+ 609::Temple of the Dog
611
+ 610::Bob Dylan
612
+ 611::Miles Davis
613
+ 612::John Coltrane
614
+ 613::Thelonious Monk
615
+ 614::Florence + the Machine
616
+ 615::The Cinematic Orchestra
617
+ 616::Yeasayer
618
+ 617::Tera Melos
619
+ 618::Bill Evans
620
+ 619::Animals as Leaders
621
+ 620::Arcturus
622
+ 621::Clutch
623
+ 622::Chet Baker
624
+ 623::Madeleine Peyroux
625
+ 624::Charlie Parker
626
+ 625::Charles Mingus
627
+ 626::Herbie Hancock
628
+ 627::Pat Metheny
629
+ 628::Bring Me The Horizon
630
+ 629::Escape The Fate
631
+ 630::MALICE MIZER
632
+ 631::Marilyn Manson
633
+ 632::Coldplay
634
+ 633::Nas
635
+ 634::Mos Def
636
+ 635::Gang Starr
637
+ 636::The Roots
638
+ 637::A Tribe Called Quest
639
+ 638::Sufjan Stevens
640
+ 639::Saul Williams
641
+ 640::Sunn O)))
642
+ 641::Vampire Weekend
643
+ 642::Sonic Youth
644
+ 643::Masta Ace
645
+ 644::De La Soul
646
+ 645::Jedi Mind Tricks
647
+ 646::ASIAN KUNG-FU GENERATION
648
+ 647::MF DOOM
649
+ 648::N.W.A
650
+ 649::Mike Patton
651
+ 650::Mr. Bungle
652
+ 651::Public Enemy
653
+ 652::Kool G Rap
654
+ 653::Talib Kweli
655
+ 654::Wu-Tang Clan
656
+ 655::Black Kids
657
+ 656::Merzbow
658
+ 657::Why?
659
+ 658::Method Man
660
+ 659::Drudkh
661
+ 660::Bubba Sparxxx
662
+ 661::Del tha Funkee Homosapien
663
+ 662::Wolves in the Throne Room
664
+ 663::Orgy
665
+ 664::Foo Fighters
666
+ 665::Combichrist
667
+ 666::Grendel
668
+ 667::Agonoize
669
+ 668::Hocico
670
+ 669::Feindflug
671
+ 670::Amduscia
672
+ 671::Dawn of Ashes
673
+ 672::She Wants Revenge
674
+ 673::Headscan
675
+ 674::Rotersand
676
+ 675::Blutengel
677
+ 676::Wynardtage
678
+ 677::Unheilig
679
+ 678::[:SITD:]
680
+ 679::L'Âme Immortelle
681
+ 680::Terminal Choice
682
+ 681::Suicidal Romance
683
+ 682::Liquid Divine
684
+ 683::Melotron
685
+ 684::Evil's Toy
686
+ 685::Vibrasphere
687
+ 686::Bamboo Forest
688
+ 687::Miss Construction
689
+ 688::Samsas Traum
690
+ 689::Zombie Girl
691
+ 690::Nurzery [Rhymes]
692
+ 691::Nachtmahr
693
+ 692::ASP
694
+ 693::Subway to Sally
695
+ 694::Caustic
696
+ 695::Diary of Dreams
697
+ 696::xotox
698
+ 697::Tamtrum
699
+ 698::Covenant
700
+ 699::Icon of Coil
701
+ 700::Funker Vogt
702
+ 701::God Module
703
+ 702::Noisuf-X
704
+ 703::Reaper
705
+ 704::Portishead
706
+ 705::David Guetta
707
+ 706::Cascada
708
+ 707::Paul van Dyk
709
+ 708::ATB
710
+ 709::Skeletal Family
711
+ 710::Dragonette
712
+ 711::Armin van Buuren
713
+ 712::Yelle
714
+ 713::Kyau vs. Albert
715
+ 714::Novaspace
716
+ 715::OceanLab
717
+ 716::Groove Coverage
718
+ 717::Fragma
719
+ 718::Sylver
720
+ 719::Markus Schulz
721
+ 720::IAMX
722
+ 721::Hot Chip
723
+ 722::Cryo
724
+ 723::Front Line Assembly
725
+ 724::Haujobb
726
+ 725::mind.in.a.box
727
+ 726::Absurd Minds
728
+ 727::Seabound
729
+ 728::Colony 5
730
+ 729::Apoptygma Berzerk
731
+ 730::Suicide Commando
732
+ 731::Neuroticfish
733
+ 732::Assemblage 23
734
+ 733::Edge of Dawn
735
+ 734::Cosmic Gate
736
+ 735::Above & Beyond
737
+ 736::Pride and Fall
738
+ 737::E-Craft
739
+ 738::Code 64
740
+ 739::Angels & Agony
741
+ 740::4 Strings
742
+ 741::XP8
743
+ 742::Bruderschaft
744
+ 743::Syrian
745
+ 744::Digitalism
746
+ 745::Justice
747
+ 746::Armand van Helden
748
+ 747::Simian Mobile Disco
749
+ 748::MSTRKRFT
750
+ 749::The Sisters of Mercy
751
+ 750::Holly Valance
752
+ 751::The Knife
753
+ 752::Porcelain and the Tramps
754
+ 753::Dope Stars Inc.
755
+ 754::Ashbury Heights
756
+ 755::Vive la Fête
757
+ 756::Diorama
758
+ 757::Belanova
759
+ 758::Alice in Videoland
760
+ 759::Theatre of Tragedy
761
+ 760::ADULT.
762
+ 761::Annie
763
+ 762::Front 242
764
+ 763::Robots in Disguise
765
+ 764::Alien Sex Fiend
766
+ 765::Christian Death
767
+ 766::Helalyn Flowers
768
+ 767::Shiny Toy Guns
769
+ 768::Claire Voyant
770
+ 769::Hungry Lucy
771
+ 770::Minerve
772
+ 771::Client
773
+ 772::Frozen Plasma
774
+ 773::Sohodolls
775
+ 774::Tiga
776
+ 775::Vitalic
777
+ 776::Fischerspooner
778
+ 777::Sebastian
779
+ 778::Project Pitchfork
780
+ 779::Deine Lakaien
781
+ 780::Panzer AG
782
+ 781::Imperative Reaction
783
+ 782::In Strict Confidence
784
+ 783::Decoded Feedback
785
+ 784::Le Tigre
786
+ 785::Paul Oakenfold
787
+ 786::Cinema Strange
788
+ 787::Uffie
789
+ 788::Aesthetic Perfection
790
+ 789::Safri Duo
791
+ 790::Peaches
792
+ 791::Blaqk Audio
793
+ 792::Alien Vampires
794
+ 793::Heimataerde
795
+ 794::David Vendetta
796
+ 795::Rank 1
797
+ 796::Faderhead
798
+ 797::Destroid
799
+ 798::Blind Passengers
800
+ 799::Beborn Beton
801
+ 800::Faith and the Muse
802
+ 801::Miss Kittin
803
+ 802::Chicks on Speed
804
+ 803::BT
805
+ 804::Darude
806
+ 805::Switchblade Symphony
807
+ 806::Lasgo
808
+ 807::Kittie
809
+ 808::Michigan
810
+ 809::Felix da Housecat
811
+ 810::Pulsedriver
812
+ 811::Lo-Fi-Fnk
813
+ 812::Welle:Erdball
814
+ 813::T.O.Y.
815
+ 814::Electrocute
816
+ 815::Pzychobitch
817
+ 816::Hallucinogen
818
+ 817::Astral Projection
819
+ 818::Razed in Black
820
+ 819::Dismantled
821
+ 820::Amon Amarth
822
+ 821::Kreator
823
+ 822::Queen
824
+ 823::Sodom
825
+ 824::Flotsam and Jetsam
826
+ 825::Sabaton
827
+ 826::Hirax
828
+ 827::Venom
829
+ 828::Biomechanical
830
+ 829::Turisas
831
+ 830::Warbringer
832
+ 831::Dismember
833
+ 832::Cut Copy
834
+ 833::Katie Melua
835
+ 834::Garbage
836
+ 835::Scorpions
837
+ 836::a-ha
838
+ 837::Blur
839
+ 838::Arctic Monkeys
840
+ 839::Bloc Party
841
+ 840::Interpol
842
+ 841::Toni Braxton
843
+ 842::Bob Marley
844
+ 843::Whitney Houston
845
+ 844::Mary J. Blige
846
+ 845::Brandy
847
+ 846::Monica
848
+ 847::Kelly Clarkson
849
+ 848::Alanis Morissette
850
+ 849::Cobra Starship
851
+ 850::Cher
852
+ 851::Wir sind Helden
853
+ 852::Tokyo Police Club
854
+ 853::The Kills
855
+ 854::The Strokes
856
+ 855::Yeah Yeah Yeahs
857
+ 856::She & Him
858
+ 857::Ra Ra Riot
859
+ 858::Local Natives
860
+ 859::3 Doors Down
861
+ 860::Gwen Stefani
862
+ 861::La Roux
863
+ 862::Alphabeat
864
+ 863::Oasis
865
+ 864::Space Cowboy
866
+ 865::Pitty
867
+ 866::Ellie Goulding
868
+ 867::The Kinks
869
+ 868::Chuck Berry
870
+ 869::Ennio Morricone
871
+ 870::Kings of Convenience
872
+ 871::Elliott Smith
873
+ 872::Squarepusher
874
+ 873::of Montreal
875
+ 874::Neil Young
876
+ 875::The Veronicas
877
+ 876::Marina & the Diamonds
878
+ 877::The xx
879
+ 878::Kaiser Chiefs
880
+ 879::Electric Light Orchestra
881
+ 880::Johnny Cash
882
+ 881::Buena Vista Social Club
883
+ 882::Ozzy Osbourne
884
+ 883::1200 Micrograms
885
+ 884::Autechre
886
+ 885::Carbon Based Lifeforms
887
+ 886::Casino Versus Japan
888
+ 887::Arovane
889
+ 888::Proem
890
+ 889::The Crystal Method
891
+ 890::Ochre
892
+ 891::Belinda
893
+ 892::August Burns Red
894
+ 893::Slayer
895
+ 894::Alice in Chains
896
+ 895::Pixies
897
+ 896::The Doors
898
+ 897::Tegan and Sara
899
+ 898::Amy Winehouse
900
+ 899::Kamelot
901
+ 900::The Cardigans
902
+ 901::Yanni
903
+ 902::At the Drive-In
904
+ 903::Simple Minds
905
+ 904::The Kooks
906
+ 905::Alesha Dixon
907
+ 906::Alan Silvestri
908
+ 907::Franz Ferdinand
909
+ 908::Patrick Wolf
910
+ 909::Phoenix
911
+ 910::The Police
912
+ 911::Roy Orbison
913
+ 912::Venetian Snares
914
+ 913::Shpongle
915
+ 914::Skank
916
+ 915::The Beach Boys
917
+ 916::Samael
918
+ 917::Black Sabbath
919
+ 918::White Lies
920
+ 919::Freemasons
921
+ 920::Calvin Harris
922
+ 921::Pendulum
923
+ 922::Genesis
924
+ 923::Paul McCartney
925
+ 924::The Jimi Hendrix Experience
926
+ 925::ABBA
927
+ 926::Matt & Kim
928
+ 927::Antonio Vivaldi
929
+ 928::Just Jack
930
+ 929::Natalie Imbruglia
931
+ 930::The Last Shadow Puppets
932
+ 931::Morbid Angel
933
+ 932::Fleet Foxes
934
+ 933::Jimi Hendrix
935
+ 934::Motel
936
+ 935::Clint Mansell
937
+ 936::Brian Eno
938
+ 937::Hammock
939
+ 938::Soundtrack
940
+ 939::Little Joy
941
+ 940::UNKLE
942
+ 941::5'nizza
943
+ 942::Thrice
944
+ 943::Gnarls Barkley
945
+ 944::Flying Lotus
946
+ 945::Raekwon
947
+ 946::Four Tet
948
+ 947::Prefuse 73
949
+ 948::The Avalanches
950
+ 949::The Black Keys
951
+ 950::Metronomy
952
+ 951::Coil
953
+ 952::Steve Jablonsky
954
+ 953::Lara Fabian
955
+ 954::Yiruma
956
+ 955::M.I.A.
957
+ 956::Brown Eyed Girls
958
+ 957::Brandon Flowers
959
+ 958::Serge Gainsbourg
960
+ 959::Beach House
961
+ 960::AFX
962
+ 961::Jane Air
963
+ 962::Wisp
964
+ 963::Dr. Dre
965
+ 964::Hande Yener
966
+ 965::Aerosmith
967
+ 966::Monster Magnet
968
+ 967::Destroyer
969
+ 968::Danny Elfman
970
+ 969::Basshunter
971
+ 970::Eric Prydz
972
+ 971::Spice Girls
973
+ 972::The Drums
974
+ 973::Idiot Pilot
975
+ 974::Biffy Clyro
976
+ 975::Foals
977
+ 976::Slash
978
+ 977::will.i.am
979
+ 978::City and Colour
980
+ 979::VersaEmerge
981
+ 980::Ella Fitzgerald
982
+ 981::The Flashbulb
983
+ 982::Amon Tobin
984
+ 983::µ-Ziq
985
+ 984::Chris Clark
986
+ 985::Kettel
987
+ 986::Clark
988
+ 987::Tycho
989
+ 988::Bola
990
+ 989::The Future Sound of London
991
+ 990::Wagon Christ
992
+ 991::Telefon Tel Aviv
993
+ 992::DJ Shadow
994
+ 993::Lenny Kravitz
995
+ 994::Distance
996
+ 995::The Dead Weather
997
+ 996::The Dust Brothers
998
+ 997::Serj Tankian
999
+ 998::Wintersun
1000
+ 999::The Whitest Boy Alive
1001
+ 1000::Colby O'Donis
1002
+ 1001::DJ Krush
1003
+ 1002::Sébastien Tellier
1004
+ 1003::Flunk
1005
+ 1004::DJ Vadim
1006
+ 1005::Paradiso Girls
1007
+ 1006::Cansei de Ser Sexy
1008
+ 1007::Kate Bush
1009
+ 1008::Jon Brion
1010
+ 1009::Slagsmålsklubben
1011
+ 1010::Blonde Redhead
1012
+ 1011::Steven Wilson
1013
+ 1012::Jet
1014
+ 1013::M83
1015
+ 1014::No-Man
1016
+ 1015::Sheryl Crow
1017
+ 1016::[unknown]
1018
+ 1017::Carole King
1019
+ 1018::Rainbow
1020
+ 1019::Róisín Murphy
1021
+ 1020::Biosphere
1022
+ 1021::Unter Null
1023
+ 1022::Jack Off Jill
1024
+ 1023::Tim Hecker
1025
+ 1024::Broken Bells
1026
+ 1025::Ratatat
1027
+ 1026::Soulfly
1028
+ 1027::The Horrors
1029
+ 1028::Marilyn Monroe
1030
+ 1029::Lumen
1031
+ 1030::Танцы Минус
1032
+ 1031::CocoRosie
1033
+ 1032::Wings
1034
+ 1033::Alcazar
1035
+ 1034::The Presets
1036
+ 1035::Mr. Oizo
1037
+ 1036::Chromeo
1038
+ 1037::Freddie Mercury
1039
+ 1038::Morphine
1040
+ 1039::The Dandy Warhols
1041
+ 1040::Raul Seixas
1042
+ 1041::The Cinematics
1043
+ 1042::The Postal Service
1044
+ 1043::UFO
1045
+ 1044::Die Ärzte
1046
+ 1045::Switchfoot
1047
+ 1046::Europe
1048
+ 1047::The Thermals
1049
+ 1048::Audio Bullys
1050
+ 1049::The Yardbirds
1051
+ 1050::Tricky
1052
+ 1051::Gin Blossoms
1053
+ 1052::Junior Senior
1054
+ 1053::Dusty Springfield
1055
+ 1054::Jamie Cullum
1056
+ 1055::Junior Boys
1057
+ 1056::Cujo
1058
+ 1057::ISAN
1059
+ 1058::Sandra
1060
+ 1059::Гражданская Оборона
1061
+ 1060::Nicholas Hooper
1062
+ 1061::Charlotte Sometimes
1063
+ 1062::ILS
1064
+ 1063::Survivor
1065
+ 1064::StoneBridge
1066
+ 1065::Kid Loco
1067
+ 1066::Nancy Sinatra
1068
+ 1067::System 7
1069
+ 1068::Yo La Tengo
1070
+ 1069::Subheim
1071
+ 1070::Arcana
1072
+ 1071::If These Trees Could Talk
1073
+ 1072::Peter Bjorn and John
1074
+ 1073::LMFAO
1075
+ 1074::Birdy Nam Nam
1076
+ 1075::Godspeed You! Black Emperor
1077
+ 1076::Stars of the Lid
1078
+ 1077::Christ.
1079
+ 1078::Bibio
1080
+ 1079::N*E*R*D
1081
+ 1080::Theory of a Deadman
1082
+ 1081::Tyler Bates
1083
+ 1082::Straylight Run
1084
+ 1083::Fanfarlo
1085
+ 1084::Something Corporate
1086
+ 1085::Nick Lachey
1087
+ 1086::Black Moth Super Rainbow
1088
+ 1087::Mirah
1089
+ 1088::Kate Ryan
1090
+ 1089::Manchester Orchestra
1091
+ 1090::Hans Zimmer & James Newton Howard
1092
+ 1091::RJD2
1093
+ 1092::Ulrich Schnauss
1094
+ 1093::D12
1095
+ 1094::Young Buck
1096
+ 1095::Albert Hammond, Jr.
1097
+ 1096::Aztec Camera
1098
+ 1097::Clinic
1099
+ 1098::Twista
1100
+ 1099::The Books
1101
+ 1100::Hercules and Love Affair
1102
+ 1101::B.B. King
1103
+ 1102::Moderat
1104
+ 1103::Eva Cassidy
1105
+ 1104::Jerry Goldsmith
1106
+ 1105::Axwell
1107
+ 1106::Empire of the Sun
1108
+ 1107::Plan B
1109
+ 1108::The Black Ghosts
1110
+ 1109::Suicidal Tendencies
1111
+ 1110::Hugh Laurie
1112
+ 1111::Detektivbyrån
1113
+ 1112::Gescom
1114
+ 1113::Polygon Window
1115
+ 1114::The Tuss
1116
+ 1115::Luke Vibert
1117
+ 1116::Clueso
1118
+ 1117::I Blame Coco
1119
+ 1118::Tunng
1120
+ 1119::Xavier Naidoo
1121
+ 1120::The Dø
1122
+ 1121::The Allman Brothers Band
1123
+ 1122::The Golden Filter
1124
+ 1123::Lil' Kim
1125
+ 1124::m-flo
1126
+ 1125::Kleerup
1127
+ 1126::Neon Indian
1128
+ 1127::Cinnamon Chasers
1129
+ 1128::Garou
1130
+ 1129::The Coral
1131
+ 1130::Secret Garden
1132
+ 1131::Trent Reznor
1133
+ 1132::Nancy Ajram
1134
+ 1133::James Taylor
1135
+ 1134::Lightning Bolt
1136
+ 1135::iiO
1137
+ 1136::Miss Kittin & The Hacker
1138
+ 1137::David Arnold
1139
+ 1138::Desmond Dekker
1140
+ 1139::Jan Delay
1141
+ 1140::Meat Beat Manifesto
1142
+ 1141::Die Fantastischen Vier
1143
+ 1142::Young Jeezy
1144
+ 1143::Amorphous Androgynous
1145
+ 1144::Steve Winwood
1146
+ 1145::Skalpel
1147
+ 1146::Death in Vegas
1148
+ 1147::Wavves
1149
+ 1148::Bear in Heaven
1150
+ 1149::Embrace
1151
+ 1150::Xavier Rudd
1152
+ 1151::Monolake
1153
+ 1152::John Powell
1154
+ 1153::Kevin Devine
1155
+ 1154::I Am Kloot
1156
+ 1155::Klaus Schulze
1157
+ 1156::Paul Desmond
1158
+ 1157::Dexter Gordon
1159
+ 1158::Dan Black
1160
+ 1159::Azure Ray
1161
+ 1160::Tiger Lou
1162
+ 1161::Trevor Rabin
1163
+ 1162::Sub Focus
1164
+ 1163::Bag Raiders
1165
+ 1164::Silver Jews
1166
+ 1165::India.Arie
1167
+ 1166::Adorable
1168
+ 1167::Fettes Brot
1169
+ 1168::Olly Murs
1170
+ 1169::Discovery
1171
+ 1170::Skrillex
1172
+ 1171::Dexys Midnight Runners
1173
+ 1172::Swayzak
1174
+ 1173::TERIYAKI BOYZ
1175
+ 1174::Leftfield
1176
+ 1175::ASHES dIVIDE
1177
+ 1176::John B
1178
+ 1177::Marco Beltrami
1179
+ 1178::Remy Ma
1180
+ 1179::Trina
1181
+ 1180::Shawnna
1182
+ 1181::Sixtoo
1183
+ 1182::Plastikman
1184
+ 1183::Dirty Vegas
1185
+ 1184::Benassi Bros.
1186
+ 1185::Agoria
1187
+ 1186::Green Sun
1188
+ 1187::The KLF
1189
+ 1188::Anahí
1190
+ 1189::Dulce María
1191
+ 1190::Christian Chávez
1192
+ 1191::Nicki Minaj
1193
+ 1192::Enrique Iglesias
1194
+ 1193::Red Hot Chili Peppers
1195
+ 1194::Bryan Adams
1196
+ 1195::Kid Abelha
1197
+ 1196::Jordin Sparks
1198
+ 1197::Alicia Keys
1199
+ 1198::Katharine McPhee
1200
+ 1199::Kat DeLuna
1201
+ 1200::Nelly Furtado
1202
+ 1201::Ciara
1203
+ 1202::Keri Hilson
1204
+ 1203::Jonas Brothers
1205
+ 1204::T.I.
1206
+ 1205::Colbie Caillat
1207
+ 1206::Jason Mraz
1208
+ 1207::Brad Paisley
1209
+ 1208::Sugarland
1210
+ 1209::Aaron Carter
1211
+ 1210::*NSYNC
1212
+ 1211::Miley Cyrus
1213
+ 1212::No Doubt
1214
+ 1213::Flo Rida
1215
+ 1214::Demi Lovato
1216
+ 1215::Selena Gomez & the Scene
1217
+ 1216::R.E.M.
1218
+ 1217::Emily Osment
1219
+ 1218::Cyndi Lauper
1220
+ 1219::Kelly Osbourne
1221
+ 1220::Dixie Chicks
1222
+ 1221::Timbaland
1223
+ 1222::Vanessa Hudgens
1224
+ 1223::Boys Like Girls
1225
+ 1224::Camp Rock
1226
+ 1225::Ace of Base
1227
+ 1226::Lady Antebellum
1228
+ 1227::Pitbull
1229
+ 1228::Selena Gomez
1230
+ 1229::Carrie Underwood
1231
+ 1230::Rednex
1232
+ 1231::Big Time Rush
1233
+ 1232::Gloriana
1234
+ 1233::Amy Macdonald
1235
+ 1234::Metric
1236
+ 1235::Jennette McCurdy
1237
+ 1236::Keke Palmer
1238
+ 1237::Dolly Parton
1239
+ 1238::Billy Ray Cyrus
1240
+ 1239::Pato Fu
1241
+ 1240::Cássia Eller
1242
+ 1241::Yellowcard
1243
+ 1242::Adele
1244
+ 1243::Leighton Meester
1245
+ 1244::Simon Curtis
1246
+ 1245::Two Door Cinema Club
1247
+ 1246::Jessie James
1248
+ 1247::Dierks Bentley
1249
+ 1248::High School Musical
1250
+ 1249::Kris Allen
1251
+ 1250::Christina Perri
1252
+ 1251::James Morrison
1253
+ 1252::Plain White T's
1254
+ 1253::Aqua
1255
+ 1254::Jason Aldean
1256
+ 1255::Blake Shelton
1257
+ 1256::Gary Allan
1258
+ 1257::Kenny Chesney
1259
+ 1258::Lee Ann Womack
1260
+ 1259::Matisyahu
1261
+ 1260::The Lonely Island
1262
+ 1261::Hanson
1263
+ 1262::Tila Tequila
1264
+ 1263::The Spill Canvas
1265
+ 1264::Drake Bell
1266
+ 1265::Edward Maya
1267
+ 1266::Ladyhawke
1268
+ 1267::Gabriella Cilmi
1269
+ 1268::Nando Reis
1270
+ 1269::TLC
1271
+ 1270::Paula Abdul
1272
+ 1271::Kellie Pickler
1273
+ 1272::Nelly
1274
+ 1273::John Denver
1275
+ 1274::Miranda!
1276
+ 1275::Boney M.
1277
+ 1276::Matt Nathanson
1278
+ 1277::Jay Sean
1279
+ 1278::Charice
1280
+ 1279::Flora
1281
+ 1280::Sean Kingston
1282
+ 1281::The Outfield
1283
+ 1282::Iyaz
1284
+ 1283::Fat Joe
1285
+ 1284::Céline Dion
1286
+ 1285::Ana Carolina
1287
+ 1286::The Faint
1288
+ 1287::The Cranberries
1289
+ 1288::Beck
1290
+ 1289::Cassie
1291
+ 1290::Ashanti
1292
+ 1291::Nicole Scherzinger
1293
+ 1292::Destiny's Child
1294
+ 1293::The Asteroids Galaxy Tour
1295
+ 1294::Los Campesinos!
1296
+ 1295::Mika
1297
+ 1296::MGMT
1298
+ 1297::Stevie Wonder
1299
+ 1298::Tiziano Ferro
1300
+ 1299::David Bisbal
1301
+ 1300::Wisin & Yandel
1302
+ 1301::Cherish
1303
+ 1302::Santigold
1304
+ 1303::Reba McEntire
1305
+ 1304::S Club 7
1306
+ 1305::Fool's Garden
1307
+ 1306::Gloria Estefan
1308
+ 1307::Aaliyah
1309
+ 1308::Celia Cruz
1310
+ 1309::Extreme
1311
+ 1310::Bananarama
1312
+ 1311::UB40
1313
+ 1312::Mr. Big
1314
+ 1313::Barry White
1315
+ 1314::Sean Paul
1316
+ 1315::KC and the Sunshine Band
1317
+ 1316::Phantom Planet
1318
+ 1317::Counting Crows
1319
+ 1318::Violent Femmes
1320
+ 1319::A.R. Rahman
1321
+ 1320::Soda Stereo
1322
+ 1321::DJ BoBo
1323
+ 1322::Modern Talking
1324
+ 1323::Gigi D'Agostino
1325
+ 1324::Lila Downs
1326
+ 1325::Tom Petty and the Heartbreakers
1327
+ 1326::Does It Offend You, Yeah?
1328
+ 1327::The Bangles
1329
+ 1328::Fito Páez
1330
+ 1329::Calle 13
1331
+ 1330::Missy Elliott
1332
+ 1331::Selena
1333
+ 1332::Estopa
1334
+ 1333::Fabolous
1335
+ 1334::Los Rodríguez
1336
+ 1335::The Weepies
1337
+ 1336::Lissy Trullie
1338
+ 1337::Bobby Valentino
1339
+ 1338::Fugees
1340
+ 1339::Kimya Dawson
1341
+ 1340::The Little Ones
1342
+ 1341::Bob Sinclar
1343
+ 1342::Kabah
1344
+ 1343::Don Omar
1345
+ 1344::Henry Purcell
1346
+ 1345::White Rabbits
1347
+ 1346::Marc-Antoine Charpentier
1348
+ 1347::Luigi Boccherini
1349
+ 1348::Tomaso Giovanni Albinoni
1350
+ 1349::Giovanni Battista Pergolesi
1351
+ 1350::Hot Chocolate
1352
+ 1351::Culture Beat
1353
+ 1352::The Chemical Brothers
1354
+ 1353::Led Zeppelin
1355
+ 1354::Sam Cooke
1356
+ 1355::Deltron 3030
1357
+ 1356::Too $hort
1358
+ 1357::Mudhoney
1359
+ 1358::nevershoutnever!
1360
+ 1359::David Cook
1361
+ 1360::David Archuleta
1362
+ 1361::Shania Twain
1363
+ 1362::The Who
1364
+ 1363::The Rolling Stones
1365
+ 1364::Enter Shikari
1366
+ 1365::Hannah Montana
1367
+ 1366::Scar Symmetry
1368
+ 1367::Patsy Cline
1369
+ 1368::Elvis Presley
1370
+ 1369::The Cheetah Girls
1371
+ 1370::Aly & AJ
1372
+ 1371::Everlife
1373
+ 1372::Weird Al Yankovic
1374
+ 1373::Liz Phair
1375
+ 1374::Nat King Cole
1376
+ 1375::Cryptopsy
1377
+ 1376::Louis Armstrong
1378
+ 1377::Relient K
1379
+ 1378::Wale
1380
+ 1379::Pink Martini
1381
+ 1380::Air Supply
1382
+ 1381::Judy Garland
1383
+ 1382::Bing Crosby
1384
+ 1383::Allstar Weekend
1385
+ 1384::Andy Williams
1386
+ 1385::Dinah Washington
1387
+ 1386::Jimmy Buffett
1388
+ 1387::Bobby Darin
1389
+ 1388::Eartha Kitt
1390
+ 1389::Martin L. Gore
1391
+ 1390::Judas Priest
1392
+ 1391::Tarot
1393
+ 1392::Deep Purple
1394
+ 1393::Mogwai
1395
+ 1394::Puscifer
1396
+ 1395::Eicca Toppinen
1397
+ 1396::The Cult
1398
+ 1397::Therion
1399
+ 1398::Eisbrecher
1400
+ 1399::Craig Armstrong
1401
+ 1400::Steppenwolf
1402
+ 1401::Vangelis
1403
+ 1402::Agua de Annique
1404
+ 1403::Rob Dougan
1405
+ 1404::Skunk Anansie
1406
+ 1405::John Murphy
1407
+ 1406::A Silver Mt. Zion
1408
+ 1407::Joanna Newsom
1409
+ 1408::Bonobo
1410
+ 1409::Saxon Shore
1411
+ 1410::Yndi Halda
1412
+ 1411::Jeniferever
1413
+ 1412::Móveis Coloniais de Acaju
1414
+ 1413::Era
1415
+ 1414::Skid Row
1416
+ 1415::Tarja
1417
+ 1416::菅野よう子
1418
+ 1417::Tristania
1419
+ 1418::Queensrÿche
1420
+ 1419::Creedence Clearwater Revival
1421
+ 1420::George Harrison
1422
+ 1421::Chico Buarque
1423
+ 1422::Maria Rita
1424
+ 1423::Rita Lee
1425
+ 1424::Eddie Vedder
1426
+ 1425::Esbjörn Svensson Trio
1427
+ 1426::Andrew Lloyd Webber
1428
+ 1427::Feist
1429
+ 1428::Billie Holiday
1430
+ 1429::Meiko
1431
+ 1430::Mallu Magalhães
1432
+ 1431::Cazuza
1433
+ 1432::Vanguart
1434
+ 1433::Amiina
1435
+ 1434::Joe Cocker
1436
+ 1435::William Fitzsimmons
1437
+ 1436::Joshua Radin
1438
+ 1437::Zé Ramalho
1439
+ 1438::Firehouse
1440
+ 1439::Tom Zé
1441
+ 1440::Gregory and the Hawk
1442
+ 1441::Melody Gardot
1443
+ 1442::Richard Marx
1444
+ 1443::Eric Clapton
1445
+ 1444::Maurice Ravel
1446
+ 1445::The Verve
1447
+ 1446::Сплин
1448
+ 1447::Archive
1449
+ 1448::Indochine
1450
+ 1449::Nightmares on Wax
1451
+ 1450::The Shadows
1452
+ 1451::Simon & Garfunkel
1453
+ 1452::Peter Gabriel
1454
+ 1453::Diana Krall
1455
+ 1454::Arvo Pärt
1456
+ 1455::Мумий Тролль
1457
+ 1456::Belinda Carlisle
1458
+ 1457::Gabriel Fauré
1459
+ 1458::Crustation
1460
+ 1459::Mandalay
1461
+ 1460::Sergei Prokofiev
1462
+ 1461::Dmitri Shostakovich
1463
+ 1462::Dinosaur Pile-Up
1464
+ 1463::Robert Schumann
1465
+ 1464::Edvard Grieg
1466
+ 1465::Carpathian Forest
1467
+ 1466::George Michael
1468
+ 1467::Spandau Ballet
1469
+ 1468::Nik Kershaw
1470
+ 1469::ABC
1471
+ 1470::Damien Rice
1472
+ 1471::Queens of the Stone Age
1473
+ 1472::Bob Marley & The Wailers
1474
+ 1473::Klaxons
1475
+ 1474::Howlin' Wolf
1476
+ 1475::Ray Charles
1477
+ 1476::Muddy Waters
1478
+ 1477::Prince
1479
+ 1478::Beastie Boys
1480
+ 1479::The White Stripes
1481
+ 1480::Dire Straits
1482
+ 1481::Gotthard
1483
+ 1482::Eagles
1484
+ 1483::Alice Cooper
1485
+ 1484::Dark Tranquillity
1486
+ 1485::At the Gates
1487
+ 1486::Atreyu
1488
+ 1487::Pantera
1489
+ 1488::Lamb of God
1490
+ 1489::Kyuss
1491
+ 1490::John Williams
1492
+ 1491::Helloween
1493
+ 1492::Stratovarius
1494
+ 1493::Journey
1495
+ 1494::Testament
1496
+ 1495::Death
1497
+ 1496::Yngwie Malmsteen
1498
+ 1497::Blind Guardian
1499
+ 1498::Kasabian
1500
+ 1499::The Donnas
1501
+ 1500::Iron Butterfly
1502
+ 1501::Orchestral Manoeuvres in the Dark
1503
+ 1502::Kraftwerk
1504
+ 1503::Melvins
1505
+ 1504::Guns N' Roses
1506
+ 1505::Mayhem
1507
+ 1506::Carcass
1508
+ 1507::Gamma Ray
1509
+ 1508::Annihilator
1510
+ 1509::Tenacious D
1511
+ 1510::W.A.S.P.
1512
+ 1511::Kansas
1513
+ 1512::The Mamas & The Papas
1514
+ 1513::Ramones
1515
+ 1514::Anthrax
1516
+ 1515::The Baseballs
1517
+ 1516::Blind Melon
1518
+ 1517::Avenged Sevenfold
1519
+ 1518::Hans Zimmer
1520
+ 1519::Mötley Crüe
1521
+ 1520::T. Rex
1522
+ 1521::Syd Barrett
1523
+ 1522::The Runaways
1524
+ 1523::Ratt
1525
+ 1524::Stryper
1526
+ 1525::Patti Smith
1527
+ 1526::Jerry Cantrell
1528
+ 1527::The Black Crowes
1529
+ 1528::L7
1530
+ 1529::Gary Numan
1531
+ 1530::Thomas Dolby
1532
+ 1531::Rage Against the Machine
1533
+ 1532::Blue Öyster Cult
1534
+ 1533::Frankie Goes to Hollywood
1535
+ 1534::Heart
1536
+ 1535::Twisted Sister
1537
+ 1536::Dio
1538
+ 1537::KISS
1539
+ 1538::Van Halen
1540
+ 1539::Joan Jett
1541
+ 1540::Airbourne
1542
+ 1541::D-A-D
1543
+ 1542::Howard Shore
1544
+ 1543::Boston
1545
+ 1544::Fantômas
1546
+ 1545::Kajagoogoo
1547
+ 1546::Lita Ford
1548
+ 1547::Vixen
1549
+ 1548::Hanoi Rocks
1550
+ 1549::Whitesnake
1551
+ 1550::Joan Jett and the Blackhearts
1552
+ 1551::Velvet Revolver
1553
+ 1552::CRASHDÏET
1554
+ 1553::Cheap Trick
1555
+ 1554::Sweet
1556
+ 1555::Lynyrd Skynyrd
1557
+ 1556::Manowar
1558
+ 1557::Thin Lizzy
1559
+ 1558::Cream
1560
+ 1559::Warrant
1561
+ 1560::Stevie Ray Vaughan and Double Trouble
1562
+ 1561::Buddy Guy
1563
+ 1562::The Animals
1564
+ 1563::ZZ Top
1565
+ 1564::Buddy Holly
1566
+ 1565::Dokken
1567
+ 1566::The Stooges
1568
+ 1567::Marillion
1569
+ 1568::The Fall
1570
+ 1569::Black Flag
1571
+ 1570::Dead Kennedys
1572
+ 1571::The Adicts
1573
+ 1572::Supergrass
1574
+ 1573::Quiet Riot
1575
+ 1574::David Lee Roth
1576
+ 1575::Saxon
1577
+ 1576::Ugly Kid Joe
1578
+ 1577::L.A. Guns
1579
+ 1578::Hole
1580
+ 1579::Bikini Kill
1581
+ 1580::The Corrs
1582
+ 1581::Uriah Heep
1583
+ 1582::Asia
1584
+ 1583::Dinosaur Jr.
1585
+ 1584::The Vines
1586
+ 1585::Status Quo
1587
+ 1586::Foreigner
1588
+ 1587::Poison
1589
+ 1588::Burzum
1590
+ 1589::Darkthrone
1591
+ 1590::Girlschool
1592
+ 1591::Cathedral
1593
+ 1592::Doro
1594
+ 1593::Cinderella
1595
+ 1594::Lou Reed
1596
+ 1595::Busted
1597
+ 1596::New York Dolls
1598
+ 1597::Kalmah
1599
+ 1598::Cutting Crew
1600
+ 1599::Faster Pussycat
1601
+ 1600::Accept
1602
+ 1601::White Lion
1603
+ 1602::Dropkick Murphys
1604
+ 1603::Diamond Head
1605
+ 1604::Robert Johnson
1606
+ 1605::Eliza Doolittle
1607
+ 1606::Anvil
1608
+ 1607::Winger
1609
+ 1608::Jefferson Airplane
1610
+ 1609::Nena
1611
+ 1610::Men Without Hats
1612
+ 1611::Animotion
1613
+ 1612::Rob Zombie
1614
+ 1613::Jeff Beck
1615
+ 1614::Big Country
1616
+ 1615::Modern English
1617
+ 1616::Gary Moore
1618
+ 1617::The Alan Parsons Project
1619
+ 1618::Tomahawk
1620
+ 1619::Eternal Tears of Sorrow
1621
+ 1620::Sebastian Bach
1622
+ 1621::Loreena McKennitt
1623
+ 1622::Wall of Voodoo
1624
+ 1623::Bow Wow Wow
1625
+ 1624::Agent Orange
1626
+ 1625::Little Walter
1627
+ 1626::George Thorogood & The Destroyers
1628
+ 1627::Great White
1629
+ 1628::Jason Becker
1630
+ 1629::The Buggles
1631
+ 1630::Tesla
1632
+ 1631::Screaming Trees
1633
+ 1632::Bad Company
1634
+ 1633::Steel Panther
1635
+ 1634::Babes in Toyland
1636
+ 1635::Joan Baez
1637
+ 1636::Mother Love Bone
1638
+ 1637::Triumph
1639
+ 1638::Ted Nugent
1640
+ 1639::Mad Season
1641
+ 1640::Voivod
1642
+ 1641::Woody Guthrie
1643
+ 1642::Generation X
1644
+ 1643::Bad English
1645
+ 1644::Impellitteri
1646
+ 1645::Johnny Thunders
1647
+ 1646::Don McLean
1648
+ 1647::Brian May
1649
+ 1648::The Knack
1650
+ 1649::Orbital
1651
+ 1650::Luther Vandross
1652
+ 1651::Quincy Jones
1653
+ 1652::Grace Jones
1654
+ 1653::Boz Scaggs
1655
+ 1654::Brainstorm
1656
+ 1655::The Dining Rooms
1657
+ 1656::Sérgio Mendes
1658
+ 1657::Al Green
1659
+ 1658::A Day to Remember
1660
+ 1659::Pillar
1661
+ 1660::Disciple
1662
+ 1661::Family Force 5
1663
+ 1662::Fireflight
1664
+ 1663::Talk Talk
1665
+ 1664::Prefab Sprout
1666
+ 1665::Icehouse
1667
+ 1666::Fiction Factory
1668
+ 1667::Jean-Michel Jarre
1669
+ 1668::Howard Jones
1670
+ 1669::Thompson Twins
1671
+ 1670::The Fixx
1672
+ 1671::Level 42
1673
+ 1672::Erasure
1674
+ 1673::Berlin
1675
+ 1674::The Psychedelic Furs
1676
+ 1675::Parliament
1677
+ 1676::Art of Noise
1678
+ 1677::XTC
1679
+ 1678::Pseudo Echo
1680
+ 1679::Midnight Oil
1681
+ 1680::Shriekback
1682
+ 1681::Wire
1683
+ 1682::Sparks
1684
+ 1683::Yellow Magic Orchestra
1685
+ 1684::John Foxx
1686
+ 1685::Landscape
1687
+ 1686::Scritti Politti
1688
+ 1687::Yello
1689
+ 1688::New Musik
1690
+ 1689::Cabaret Voltaire
1691
+ 1690::LCD Soundsystem
1692
+ 1691::Hall & Oates
1693
+ 1692::Arcadia
1694
+ 1693::The Damned
1695
+ 1694::Soft Cell
1696
+ 1695::The Associates
1697
+ 1696::Propaganda
1698
+ 1697::Go West
1699
+ 1698::Gang of Four
1700
+ 1699::Fad Gadget
1701
+ 1700::Boytronic
1702
+ 1701::Blancmange
1703
+ 1702::Classix Nouveaux
1704
+ 1703::Wang Chung
1705
+ 1704::The The
1706
+ 1705::The Communards
1707
+ 1706::Saga
1708
+ 1707::Telex
1709
+ 1708::!!!
1710
+ 1709::Blue Peter
1711
+ 1710::O.S.T.R.
1712
+ 1711::Kult
1713
+ 1712::Pidżama Porno
1714
+ 1713::Symphony X
1715
+ 1714::Alestorm
1716
+ 1715::Falconer
1717
+ 1716::Star One
1718
+ 1717:::wumpscut:
1719
+ 1718::Electronic
1720
+ 1719::Marc Almond
1721
+ 1720::Pulp
1722
+ 1721::Manic Street Preachers
1723
+ 1722::Wolfsheim
1724
+ 1723::The Good, the Bad & the Queen
1725
+ 1724::Rooney
1726
+ 1725::The Residents
1727
+ 1726::Bee Gees
1728
+ 1727::Tuxedomoon
1729
+ 1728::In Extremo
1730
+ 1729::Deutsch Amerikanische Freundschaft
1731
+ 1730::Dagoba
1732
+ 1731::Throbbing Gristle
1733
+ 1732::Captain Beefheart & His Magic Band
1734
+ 1733::The Legendary Pink Dots
1735
+ 1734::The Power Station
1736
+ 1735::Minus the Bear
1737
+ 1736::The Stone Roses
1738
+ 1737::Angelo Badalamenti
1739
+ 1738::At Vance
1740
+ 1739::The Pipettes
1741
+ 1740::Tocotronic
1742
+ 1741::Datarock
1743
+ 1742::Astor Piazzolla
1744
+ 1743::Oi Va Voi
1745
+ 1744::Electric Six
1746
+ 1745::Jarvis Cocker
1747
+ 1746::Neurotic Outsiders
1748
+ 1747::The Devils
1749
+ 1748::Gloria Gaynor
1750
+ 1749::Flowing Tears
1751
+ 1750::Perfume
1752
+ 1751::Versailles
1753
+ 1752::ルルティア
1754
+ 1753::múm
1755
+ 1754::Aphex Twin
1756
+ 1755::Boards of Canada
1757
+ 1756::Ellen Allien
1758
+ 1757::Daedelus
1759
+ 1758::B. Fleischmann
1760
+ 1759::Booka Shade
1761
+ 1760::Mujuice
1762
+ 1761::edIT
1763
+ 1762::Ellen Allien & Apparat
1764
+ 1763::Frog Pocket
1765
+ 1764::Rotator
1766
+ 1765::Spor
1767
+ 1766::Noisia
1768
+ 1767::Black Sun Empire
1769
+ 1768::The Orb
1770
+ 1769::Ricardo Villalobos
1771
+ 1770::William Basinski
1772
+ 1771::Pole
1773
+ 1772::Deadbeat
1774
+ 1773::Dntel
1775
+ 1774::Add N to (X)
1776
+ 1775::Brothomstates
1777
+ 1776::Dominik Eulberg
1778
+ 1777::Richie Hawtin
1779
+ 1778::Mathew Jonson
1780
+ 1779::Hexstatic
1781
+ 1780::Alva Noto + Ryuichi Sakamoto
1782
+ 1781::Kings of Leon
1783
+ 1782::Joss Stone
1784
+ 1783::Yeni Türkü
1785
+ 1784::Barış Manço
1786
+ 1785::Mor ve Ötesi
1787
+ 1786::Düş Sokağı Sakinleri
1788
+ 1787::Cradle of Filth
1789
+ 1788::Little Richard
1790
+ 1789::HammerFall
1791
+ 1790::Massacration
1792
+ 1791::Creed
1793
+ 1792::The Hellacopters
1794
+ 1793::Backyard Babies
1795
+ 1794::Kelly Rowland
1796
+ 1795::Gloria Trevi
1797
+ 1796::Ludacris
1798
+ 1797::Jennifer Hudson
1799
+ 1798::Michelle Williams
1800
+ 1799::Trey Songz
1801
+ 1800::Pyotr Ilyich Tchaikovsky
1802
+ 1801::Sade
1803
+ 1802::Max Richter
1804
+ 1803::Jack Johnson
1805
+ 1804::Jethro Tull
1806
+ 1805::The Album Leaf
1807
+ 1806::John Mayer
1808
+ 1807::Bruce Springsteen
1809
+ 1808::Trans-Siberian Orchestra
1810
+ 1809::Bill Withers
1811
+ 1810::Five Finger Death Punch
1812
+ 1811::Santana
1813
+ 1812::Smash Mouth
1814
+ 1813::Mike & The Mechanics
1815
+ 1814::Kim Wilde
1816
+ 1815::New Found Glory
1817
+ 1816::Georg Friedrich Händel
1818
+ 1817::Secondhand Serenade
1819
+ 1818::Starsailor
1820
+ 1819::Daddy Yankee
1821
+ 1820::B.J. Thomas
1822
+ 1821::OutKast
1823
+ 1822::Ludovico Einaudi
1824
+ 1823::Norah Jones
1825
+ 1824::Amethystium
1826
+ 1825::Diana Ross
1827
+ 1826::Chris Cornell
1828
+ 1827::Luciano Pavarotti
1829
+ 1828::Sarah Brightman
1830
+ 1829::Andrea Bocelli
1831
+ 1830::Vanessa-Mae
1832
+ 1831::The Rascals
1833
+ 1832::k-os
1834
+ 1833::Islands
1835
+ 1834::Martina McBride
1836
+ 1835::Juanes
1837
+ 1836::Garth Brooks
1838
+ 1837::Little Big Town
1839
+ 1838::Tim McGraw
1840
+ 1839::Faith Hill
1841
+ 1840::Glenn Miller
1842
+ 1841::Reik
1843
+ 1842::George Strait
1844
+ 1843::Alan Jackson
1845
+ 1844::Paul Young
1846
+ 1845::The Doobie Brothers
1847
+ 1846::Van Morrison
1848
+ 1847::LL Cool J
1849
+ 1848::Naked Eyes
1850
+ 1849::Sin Bandera
1851
+ 1850::Ringo Starr
1852
+ 1851::Carpenters
1853
+ 1852::Paul McCartney & Wings
1854
+ 1853::Matchbox Twenty
1855
+ 1854::The Moody Blues
1856
+ 1855::Barbra Streisand
1857
+ 1856::Supertramp
1858
+ 1857::The Jackson 5
1859
+ 1858::Sarah McLachlan
1860
+ 1859::America
1861
+ 1860::Jordan Pruitt
1862
+ 1861::Tarkan
1863
+ 1862::Alabama
1864
+ 1863::Montgomery Gentry
1865
+ 1864::Ronnie Milsap
1866
+ 1865::Sara Evans
1867
+ 1866::SHeDaisy
1868
+ 1867::Coheed and Cambria
1869
+ 1868::Big & Rich
1870
+ 1869::Sly & The Family Stone
1871
+ 1870::Chris Isaak
1872
+ 1871::Eiffel 65
1873
+ 1872::Tom Jones
1874
+ 1873::Leaves' Eyes
1875
+ 1874::Lionel Richie
1876
+ 1875::Jars of Clay
1877
+ 1876::The Ataris
1878
+ 1877::Hank Williams
1879
+ 1878::Willie Nelson
1880
+ 1879::Emmylou Harris
1881
+ 1880::All Saints
1882
+ 1881::Jim Croce
1883
+ 1882::The Ventures
1884
+ 1883::Kenny Loggins
1885
+ 1884::Grand Funk Railroad
1886
+ 1885::Alejandro Sanz
1887
+ 1886::The Go-Go's
1888
+ 1887::Adam Ant
1889
+ 1888::Quarterflash
1890
+ 1889::Rick Springfield
1891
+ 1890::Miranda Lambert
1892
+ 1891::Ben Harper
1893
+ 1892::Chicago
1894
+ 1893::LeAnn Rimes
1895
+ 1894::The Ronettes
1896
+ 1895::Jon Secada
1897
+ 1896::Mae
1898
+ 1897::Kellplanet
1899
+ 1898::Afro Celt Sound System
1900
+ 1899::The Presidents of the United States of America
1901
+ 1900::Procol Harum
1902
+ 1901::The Monkees
1903
+ 1902::Jónsi & Alex
1904
+ 1903::Terri Clark
1905
+ 1904::The Avett Brothers
1906
+ 1905::John Fogerty
1907
+ 1906::The Everly Brothers
1908
+ 1907::Ricardo Arjona
1909
+ 1908::The Hollies
1910
+ 1909::The Association
1911
+ 1910::Herman's Hermits
1912
+ 1911::Joni Mitchell
1913
+ 1912::REO Speedwagon
1914
+ 1913::Tina Arena
1915
+ 1914::Kitaro
1916
+ 1915::The Platters
1917
+ 1916::The Drifters
1918
+ 1917::Boyz II Men
1919
+ 1918::Kool & The Gang
1920
+ 1919::Stevie Nicks
1921
+ 1920::Don Henley
1922
+ 1921::The Lovin' Spoonful
1923
+ 1922::Chad & Jeremy
1924
+ 1923::Israel Kamakawiwo'ole
1925
+ 1924::Lou Rawls
1926
+ 1925::Dionne Warwick
1927
+ 1926::Lorrie Morgan
1928
+ 1927::Neil Sedaka
1929
+ 1928::Connie Francis
1930
+ 1929::Marc Anthony
1931
+ 1930::Richard Hawley
1932
+ 1931::Billy Ocean
1933
+ 1932::Zucchero
1934
+ 1933::The Pogues
1935
+ 1934::Conway Twitty
1936
+ 1935::The Oak Ridge Boys
1937
+ 1936::Randy Travis
1938
+ 1937::Blackhawk
1939
+ 1938::Michael McDonald
1940
+ 1939::Barry Manilow
1941
+ 1940::Harry Nilsson
1942
+ 1941::Peter, Paul & Mary
1943
+ 1942::The Temptations
1944
+ 1943::Tony Bennett
1945
+ 1944::Nickel Creek
1946
+ 1945::George Winston
1947
+ 1946::Dwight Yoakam
1948
+ 1947::Shivaree
1949
+ 1948::Sheena Easton
1950
+ 1949::Perry Como
1951
+ 1950::Xela
1952
+ 1951::Los Lobos
1953
+ 1952::The Guess Who
1954
+ 1953::The Boo Radleys
1955
+ 1954::Nick Lowe
1956
+ 1955::MC Hammer
1957
+ 1956::Melissa Etheridge
1958
+ 1957::Scott McKenzie
1959
+ 1958::Cassandra Wilson
1960
+ 1959::Steel Magnolia
1961
+ 1960::The Jesus Lizard
1962
+ 1961::Foetus
1963
+ 1962::The Birthday Party
1964
+ 1963::The Jesus and Mary Chain
1965
+ 1964::Big Black
1966
+ 1965::Shellac
1967
+ 1966::Default
1968
+ 1967::The Brand New Heavies
1969
+ 1968::Jim Morrison
1970
+ 1969::65daysofstatic
1971
+ 1970::Late of the Pier
1972
+ 1971::We Are Scientists
1973
+ 1972::Good Shoes
1974
+ 1973::Mystery Jets
1975
+ 1974::Maximum the Hormone
1976
+ 1975::Gloria
1977
+ 1976::McFly
1978
+ 1977::Asking Alexandria
1979
+ 1978::The Devil Wears Prada
1980
+ 1979::blessthefall
1981
+ 1980::YUI
1982
+ 1981::A Skylit Drive
1983
+ 1982::Flow
1984
+ 1983::the GazettE
1985
+ 1984::HIGH and MIGHTY COLOR
1986
+ 1985::Miss May I
1987
+ 1986::Attack Attack!
1988
+ 1987::The Word Alive
1989
+ 1988::From First to Last
1990
+ 1989::Pierce the Veil
1991
+ 1990::Before Their Eyes
1992
+ 1991::We Came As Romans
1993
+ 1992::Eyes Set to Kill
1994
+ 1993::Woe, Is Me
1995
+ 1994::X JAPAN
1996
+ 1995::アンティック-珈琲店-
1997
+ 1996::Drop Dead, Gorgeous
1998
+ 1997::ORANGE RANGE
1999
+ 1998::Andrew Bird
2000
+ 1999::Spoon
2001
+ 2000::Iron & Wine
2002
+ 2001::Dave Matthews Band
2003
+ 2002::The New Pornographers
2004
+ 2003::Billy Joel
2005
+ 2004::Barenaked Ladies
2006
+ 2005::Randy Newman
2007
+ 2006::Ben Folds
2008
+ 2007::Paul Simon
2009
+ 2008::Rufus Wainwright
2010
+ 2009::Ben Folds Five
2011
+ 2010::Neko Case
2012
+ 2011::Calexico
2013
+ 2012::Dashboard Confessional
2014
+ 2013::Regina Spektor
2015
+ 2014::Reel Big Fish
2016
+ 2015::Foxboro Hot Tubs
2017
+ 2016::Alien Ant Farm
2018
+ 2017::Screamin' Jay Hawkins
2019
+ 2018::A Fine Frenzy
2020
+ 2019::Bad Religion
2021
+ 2020::Mombojó
2022
+ 2021::Silverchair
2023
+ 2022::Little Boots
2024
+ 2023::Hey Monday
2025
+ 2024::Engenheiros do Hawaii
2026
+ 2025::Capital Inicial
2027
+ 2026::Legião Urbana
2028
+ 2027::Titãs
2029
+ 2028::HORSE the band
2030
+ 2029::As I Lay Dying
2031
+ 2030::Millencolin
2032
+ 2031::Breathe Carolina
2033
+ 2032::The Maine
2034
+ 2033::Forever the Sickest Kids
2035
+ 2034::Sum 41
2036
+ 2035::Mayday Parade
2037
+ 2036::We The Kings
2038
+ 2037::Cute Is What We Aim For
2039
+ 2038::Sonata Arctica
2040
+ 2039::The Fratellis
2041
+ 2040::Raimundos
2042
+ 2041::Faichecleres
2043
+ 2042::Charlie Brown Jr.
2044
+ 2043::Caetano Veloso
2045
+ 2044::Tiê
2046
+ 2045::Shinedown
2047
+ 2046::The Raconteurs
2048
+ 2047::Vasco Rossi
2049
+ 2048::Vega 4
2050
+ 2049::The Turtles
2051
+ 2050::Trivium
2052
+ 2051::Kid Rock
2053
+ 2052::The Classic Crime
2054
+ 2053::A Rocket to the Moon
2055
+ 2054::Bidê ou Balde
2056
+ 2055::Anberlin
2057
+ 2056::Lordi
2058
+ 2057::The Cab
2059
+ 2058::This Providence
2060
+ 2059::Slash's Snakepit
2061
+ 2060::Gym Class Heroes
2062
+ 2061::Murderdolls
2063
+ 2062::Rosa De Saron
2064
+ 2063::Eros Ramazzotti
2065
+ 2064::Chimarruts
2066
+ 2065::Daniel Powter
2067
+ 2066::Walls of Jericho
2068
+ 2067::Barão Vermelho
2069
+ 2068::MxPx
2070
+ 2069::Forgotten Boys
2071
+ 2070::Ingrid Michaelson
2072
+ 2071::Tiago Iorc
2073
+ 2072::Cachorro Grande
2074
+ 2073::Graforréia Xilarmônica
2075
+ 2074::Forfun
2076
+ 2075::Ira!
2077
+ 2076::Zebrahead
2078
+ 2077::Collide
2079
+ 2078::Mamonas Assassinas
2080
+ 2079::4 Non Blondes
2081
+ 2080::Bonde do Rolê
2082
+ 2081::Cash Cash
2083
+ 2082::CPM 22
2084
+ 2083::Beeshop
2085
+ 2084::The Academy Is...
2086
+ 2085::The Rocket Summer
2087
+ 2086::Artist Vs. Poet
2088
+ 2087::Drive
2089
+ 2088::Mushroomhead
2090
+ 2089::Alice Nine
2091
+ 2090::Fu Manchu
2092
+ 2091::Autoramas
2093
+ 2092::Rock Rocket
2094
+ 2093::Milton Nascimento
2095
+ 2094::The Friday Night Boys
2096
+ 2095::LM.C
2097
+ 2096::Gilberto Gil
2098
+ 2097::The Juliana Theory
2099
+ 2098::Bonde das Impostora
2100
+ 2099::Júpiter Maçã
2101
+ 2100::Deborah Blando
2102
+ 2101::Lipstick
2103
+ 2102::Faber Drive
2104
+ 2103::AFI
2105
+ 2104::Buckcherry
2106
+ 2105::Hardcore Superstar
2107
+ 2106::Vains of Jenna
2108
+ 2107::Sixx:A.M.
2109
+ 2108::Concrete Blonde
2110
+ 2109::Mott the Hoople
2111
+ 2110::鷺巣詩郎
2112
+ 2111::Burial
2113
+ 2112::Mono
2114
+ 2113::Gimmik
2115
+ 2114::36 Crazyfists
2116
+ 2115::Haste the Day
2117
+ 2116::Hadouken!
2118
+ 2117::Skinny Puppy
2119
+ 2118::The Enemy
2120
+ 2119::Boxcutter
2121
+ 2120::Дельфин
2122
+ 2121::Xploding Plastix
2123
+ 2122::Maxïmo Park
2124
+ 2123::Cult of Luna
2125
+ 2124::Sakura
2126
+ 2125::Flobots
2127
+ 2126::Neurosis
2128
+ 2127::Maybeshewill
2129
+ 2128::Dolphin
2130
+ 2129::Stigmata
2131
+ 2130::Everything Is Made in China
2132
+ 2131::Ocean Colour Scene
2133
+ 2132::The Pigeon Detectives
2134
+ 2133::The Streets
2135
+ 2134::Roadrunner United
2136
+ 2135::Pelican
2137
+ 2136::Rashamba
2138
+ 2137::Hol Baumann
2139
+ 2138::Jesus on Extasy
2140
+ 2139::Ef
2141
+ 2140::Rosetta
2142
+ 2141::Shitdisco
2143
+ 2142::Benga
2144
+ 2143::Moondog
2145
+ 2144::Glen Hansard
2146
+ 2145::Camouflage
2147
+ 2146::Cock Robin
2148
+ 2147::Simply Red
2149
+ 2148::Janet Jackson
2150
+ 2149::Gabrielle
2151
+ 2150::Saint Etienne
2152
+ 2151::Annie Lennox
2153
+ 2152::Crowded House
2154
+ 2153::Zoé
2155
+ 2154::Chris Rea
2156
+ 2155::DJ Sammy
2157
+ 2156::Inspiral Carpets
2158
+ 2157::Donna Summer
2159
+ 2158::The Jacksons
2160
+ 2159::Chaka Khan
2161
+ 2160::Chic
2162
+ 2161::Teena Marie
2163
+ 2162::Happy Mondays
2164
+ 2163::Toto
2165
+ 2164::Shakin' Stevens
2166
+ 2165::Milk Inc.
2167
+ 2166::Iris
2168
+ 2167::JLS
2169
+ 2168::Bonnie Tyler
2170
+ 2169::Seal
2171
+ 2170::Julio Iglesias
2172
+ 2171::Chris de Burgh
2173
+ 2172::Haddaway
2174
+ 2173::Dr. Alban
2175
+ 2174::2 Unlimited
2176
+ 2175::Black
2177
+ 2176::Basement Jaxx
2178
+ 2177::Shapeshifters
2179
+ 2178::The Searchers
2180
+ 2179::The Pretenders
2181
+ 2180::When in Rome
2182
+ 2181::James
2183
+ 2182::T'Pau
2184
+ 2183::Wet Wet Wet
2185
+ 2184::The Charlatans
2186
+ 2185::Alison Moyet
2187
+ 2186::The Blow Monkeys
2188
+ 2187::Mylo
2189
+ 2188::Band Aid
2190
+ 2189::The Beloved
2191
+ 2190::Texas
2192
+ 2191::Dubstar
2193
+ 2192::EMF
2194
+ 2193::Sash!
2195
+ 2194::Pat Benatar
2196
+ 2195::The Archies
2197
+ 2196::Commodores
2198
+ 2197::Loverboy
2199
+ 2198::Manfred Mann
2200
+ 2199::Del Amitri
2201
+ 2200::The Shamen
2202
+ 2201::Roger Sanchez
2203
+ 2202::R. Kelly
2204
+ 2203::Bronski Beat
2205
+ 2204::The Four Tops
2206
+ 2205::Andy Bell
2207
+ 2206::Cliff Richard
2208
+ 2207::Alexander O'Neal
2209
+ 2208::Tammy Wynette
2210
+ 2209::Lisa Stansfield
2211
+ 2210::Snap!
2212
+ 2211::Glen Campbell
2213
+ 2212::The Alarm
2214
+ 2213::Chumbawamba
2215
+ 2214::The Bluetones
2216
+ 2215::F.R. David
2217
+ 2216::Bros
2218
+ 2217::Sister Sledge
2219
+ 2218::Soul II Soul
2220
+ 2219::The Trammps
2221
+ 2220::Petula Clark
2222
+ 2221::Shannon
2223
+ 2222::Technotronic
2224
+ 2223::Alice DeeJay
2225
+ 2224::Ensiferum
2226
+ 2225::Angra
2227
+ 2226::Norther
2228
+ 2227::Insomnium
2229
+ 2228::Skyfire
2230
+ 2229::James LaBrie
2231
+ 2230::Lil' Wayne
2232
+ 2231::Blue October
2233
+ 2232::The Prodigy
2234
+ 2233::Bombay Bicycle Club
2235
+ 2234::The Scene Aesthetic
2236
+ 2235::Dolores O'Riordan
2237
+ 2236::Crossfade
2238
+ 2237::Noize MC
2239
+ 2238::Pink
2240
+ 2239::Jamelia
2241
+ 2240::Агата Кристи
2242
+ 2241::Amatory
2243
+ 2242::Sunrise Avenue
2244
+ 2243::Manga
2245
+ 2244::Evergreen Terrace
2246
+ 2245::Би-2
2247
+ 2246::Biohazard
2248
+ 2247::Кино
2249
+ 2248::Benny Benassi
2250
+ 2249::KoЯn
2251
+ 2250::Timo Maas
2252
+ 2251::Слот
2253
+ 2252::Novembre
2254
+ 2253::Unwritten Law
2255
+ 2254::Crazy Town
2256
+ 2255::DMX
2257
+ 2256::Пилот
2258
+ 2257::Evans Blue
2259
+ 2258::Propellerheads
2260
+ 2259::South Park
2261
+ 2260::Bomfunk MC's
2262
+ 2261::Кирпичи
2263
+ 2262::Король и Шут
2264
+ 2263::Lil Jon & The East Side Boyz
2265
+ 2264::Hush
2266
+ 2265::Lifehouse
2267
+ 2266::Bush
2268
+ 2267::Lovex
2269
+ 2268::In This Moment
2270
+ 2269::Black Tide
2271
+ 2270::Slaughter
2272
+ 2271::Pixie Lott
2273
+ 2272::Claudia Leitte
2274
+ 2273::Mylène Farmer
2275
+ 2274::8mm
2276
+ 2275::KLOQ
2277
+ 2276::Hooverphonic
2278
+ 2277::Kelis
2279
+ 2278::Melanie C
2280
+ 2279::Ivete Sangalo
2281
+ 2280::Babado Novo
2282
+ 2281::Rob Thomas
2283
+ 2282::Elliott Yamin
2284
+ 2283::Adriana Calcanhotto
2285
+ 2284::Camille
2286
+ 2285::Ani DiFranco
2287
+ 2286::Emma Bunton
2288
+ 2287::Danni Carlos
2289
+ 2288::The Notwist
2290
+ 2289::The Subways
2291
+ 2290::Mando Diao
2292
+ 2291::Kashmir
2293
+ 2292::Blue Foundation
2294
+ 2293::Lali Puna
2295
+ 2294::Broadcast
2296
+ 2295::Jackie Wilson
2297
+ 2296::Marvin Gaye
2298
+ 2297::The Isley Brothers
2299
+ 2298::Earth, Wind & Fire
2300
+ 2299::Sissel
2301
+ 2300::Alejandra Guzmán
2302
+ 2301::Giuseppe Verdi
2303
+ 2302::Miguel Bosé
2304
+ 2303::Johannes Brahms
2305
+ 2304::Mijares
2306
+ 2305::Charlotte Church
2307
+ 2306::Curtis Mayfield
2308
+ 2307::Antonín Dvořák
2309
+ 2308::Fatback Band
2310
+ 2309::Felix Mendelssohn
2311
+ 2310::Johann Pachelbel
2312
+ 2311::Diana Vickers
2313
+ 2312::Parachute
2314
+ 2313::Teddy Geiger
2315
+ 2314::Brie Larson
2316
+ 2315::HorrorPops
2317
+ 2316::Sandy e Junior
2318
+ 2317::The Distillers
2319
+ 2318::Kevin Federline
2320
+ 2319::Marisa Monte
2321
+ 2320::Erykah Badu
2322
+ 2321::Maria Bethânia
2323
+ 2322::Kate Voegele
2324
+ 2323::Los Hermanos
2325
+ 2324::Jeffree Star
2326
+ 2325::Sia
2327
+ 2326::Психея
2328
+ 2327::Emilíana Torrini
2329
+ 2328::The Calling
2330
+ 2329::Fatboy Slim
2331
+ 2330::Omarion
2332
+ 2331::Paolo Nutini
2333
+ 2332::The Hives
2334
+ 2333::Serebro
2335
+ 2334::Skye Sweetnam
2336
+ 2335::Anastacia
2337
+ 2336::The Medic Droid
2338
+ 2337::Schiller
2339
+ 2338::NX Zero
2340
+ 2339::LeToya
2341
+ 2340::Stars
2342
+ 2341::Kyo
2343
+ 2342::Nouvelle Vague
2344
+ 2343::Detonautas Roque Clube
2345
+ 2344::Fiona Apple
2346
+ 2345::Kudai
2347
+ 2346::Corinne Bailey Rae
2348
+ 2347::Bryan Ferry
2349
+ 2348::Julieta Venegas
2350
+ 2349::МакSим
2351
+ 2350::Vanessa da Mata
2352
+ 2351::Marcelo D2
2353
+ 2352::DJ Cam
2354
+ 2353::O Rappa
2355
+ 2354::Os Mutantes
2356
+ 2355::Lenine
2357
+ 2356::SR-71
2358
+ 2357::Antônio Carlos Jobim
2359
+ 2358::Plazma
2360
+ 2359::Freezepop
2361
+ 2360::American Hi-Fi
2362
+ 2361::Diddy
2363
+ 2362::Cartel
2364
+ 2363::Babasónicos
2365
+ 2364::Erin McCarley
2366
+ 2365::Lillix
2367
+ 2366::The Almost
2368
+ 2367::Сергей Лазарев
2369
+ 2368::Queen Latifah
2370
+ 2369::Ludov
2371
+ 2370::Gram
2372
+ 2371::Babyface
2373
+ 2372::Tribalistas
2374
+ 2373::Юлия Савичева
2375
+ 2374::Cibelle
2376
+ 2375::Bliss
2377
+ 2376::Vinicius de Moraes
2378
+ 2377::(hed) Planet Earth
2379
+ 2378::In-Grid
2380
+ 2379::Lemongrass
2381
+ 2380::Global Communication
2382
+ 2381::Rod Stewart
2383
+ 2382::John Legend
2384
+ 2383::Wyclef Jean
2385
+ 2384::Band of Horses
2386
+ 2385::Cat Power
2387
+ 2386::BrokeNCYDE
2388
+ 2387::Aloha From Hell
2389
+ 2388::The Pains of Being Pure at Heart
2390
+ 2389::+44
2391
+ 2390::Kevin Rudolf
2392
+ 2391::Joey Ramone
2393
+ 2392::Emperor
2394
+ 2393::Spock's Beard
2395
+ 2394::Pain of Salvation
2396
+ 2395::Meshuggah
2397
+ 2396::Red Sparowes
2398
+ 2397::Strapping Young Lad
2399
+ 2398::Antimatter
2400
+ 2399::Ulver
2401
+ 2400::Bathory
2402
+ 2401::Textures
2403
+ 2402::The Ocean
2404
+ 2403::Riverside
2405
+ 2404::Moonsorrow
2406
+ 2405::My Dying Bride
2407
+ 2406::Ozric Tentacles
2408
+ 2407::Younger Brother
2409
+ 2408::Fair to Midland
2410
+ 2409::Oceansize
2411
+ 2410::Enslaved
2412
+ 2411::Anna Ternheim
2413
+ 2412::Lake of Tears
2414
+ 2413::Aereogramme
2415
+ 2414::Dimmu Borgir
2416
+ 2415::Arch Enemy
2417
+ 2416::Virgin Prunes
2418
+ 2417::Fields of the Nephilim
2419
+ 2418::Ween
2420
+ 2419::Specimen
2421
+ 2420::White Zombie
2422
+ 2421::Plasmatics
2423
+ 2422::Ten Years After
2424
+ 2423::Faithless
2425
+ 2424::Kosheen
2426
+ 2425::Sugababes
2427
+ 2426::Laura Pausini
2428
+ 2427::Darin
2429
+ 2428::Nick Cave and the Bad Seeds
2430
+ 2429::Siobhan Donaghy
2431
+ 2430::Delta Goodrem
2432
+ 2431::Edyta Bartosiewicz
2433
+ 2432::Far East Movement
2434
+ 2433::Solange
2435
+ 2434::Mónica Naranjo
2436
+ 2435::Morandi
2437
+ 2436::Robyn
2438
+ 2437::Matt Wertz
2439
+ 2438::Courtney Love
2440
+ 2439::Lee Ryan
2441
+ 2440::Five for Fighting
2442
+ 2441::No Mercy
2443
+ 2442::Everything but the Girl
2444
+ 2443::Lamb
2445
+ 2444::Jon McLaughlin
2446
+ 2445::Amerie
2447
+ 2446::Jazmine Sullivan
2448
+ 2447::Sam Sparro
2449
+ 2448::Kasia Kowalska
2450
+ 2449::Elisa
2451
+ 2450::Kurt Nilsen
2452
+ 2451::Lucie Silvas
2453
+ 2452::Howie Day
2454
+ 2453::Beverley Knight
2455
+ 2454::Live
2456
+ 2455::Moloko
2457
+ 2456::Joe Purdy
2458
+ 2457::Melanie Fiona
2459
+ 2458::Varius Manx
2460
+ 2459::AaRON
2461
+ 2460::Pati Yang
2462
+ 2461::Jamie Foxx
2463
+ 2462::Meredith Brooks
2464
+ 2463::Paola & Chiara
2465
+ 2464::Husky Rescue
2466
+ 2465::Babylon Zoo
2467
+ 2466::Kate Havnevik
2468
+ 2467::Joan Osborne
2469
+ 2468::Skin
2470
+ 2469::La Bouche
2471
+ 2470::Mutya Buena
2472
+ 2471::All That Remains
2473
+ 2472::Eths
2474
+ 2473::Blood Stain Child
2475
+ 2474::Light This City
2476
+ 2475::Mnemic
2477
+ 2476::Delain
2478
+ 2477::The Agonist
2479
+ 2478::Finger Eleven
2480
+ 2479::Victoria Beckham
2481
+ 2480::Alison Krauss
2482
+ 2481::Jessica Andrews
2483
+ 2482::Emerson Drive
2484
+ 2483::Gretchen Wilson
2485
+ 2484::The Frames
2486
+ 2485::Lisa Hannigan
2487
+ 2486::Willow Smith
2488
+ 2487::Amanda Palmer
2489
+ 2488::Baths
2490
+ 2489::Lil B
2491
+ 2490::Behemoth
2492
+ 2491::Whitechapel
2493
+ 2492::All Shall Perish
2494
+ 2493::Despised Icon
2495
+ 2494::Suicide Silence
2496
+ 2495::Nile
2497
+ 2496::Cannibal Corpse
2498
+ 2497::Hatebreed
2499
+ 2498::Job for a Cowboy
2500
+ 2499::Carnifex
2501
+ 2500::Beneath the Massacre
2502
+ 2501::Annotations of an Autopsy
2503
+ 2502::Оригами
2504
+ 2503::DevilDriver
2505
+ 2504::2H Company
2506
+ 2505::Enduser
2507
+ 2506::Kid606
2508
+ 2507::SikTh
2509
+ 2508::Shitmat
2510
+ 2509::Machine Head
2511
+ 2510::Decapitated
2512
+ 2511::Russian Circles
2513
+ 2512::Otep
2514
+ 2513::Wednesday 13
2515
+ 2514::Dry Kill Logic
2516
+ 2515::Chimaira
2517
+ 2516::Glassjaw
2518
+ 2517::Aborted
2519
+ 2518::Madball
2520
+ 2519::Unleashed
2521
+ 2520::Ion Dissonance
2522
+ 2521::7раса
2523
+ 2522::Deicide
2524
+ 2523::Obituary
2525
+ 2524::The Exploited
2526
+ 2525::Shadows Fall
2527
+ 2526::Cavalera Conspiracy
2528
+ 2527::Bluetech
2529
+ 2528::7000$
2530
+ 2529::Kataklysm
2531
+ 2530::Clawfinger
2532
+ 2531::10 Years
2533
+ 2532::The Haunted
2534
+ 2533::Six Feet Under
2535
+ 2534::Damageplan
2536
+ 2535::Korea
2537
+ 2536::Modeselektor
2538
+ 2537::Crossbreed
2539
+ 2538::Pan Sonic
2540
+ 2539::Vital Remains
2541
+ 2540::Nasum
2542
+ 2541::Brujeria
2543
+ 2542::Atari Teenage Riot
2544
+ 2543::Rusko
2545
+ 2544::Animosity
2546
+ 2545::The Number Twelve Looks Like You
2547
+ 2546::The Bug
2548
+ 2547::Superjoint Ritual
2549
+ 2548::Bon Iver
2550
+ 2549::These New Puritans
2551
+ 2550::Alexisonfire
2552
+ 2551::Mumford & Sons
2553
+ 2552::K'naan
2554
+ 2553::Snoop Dogg
2555
+ 2554::Death from Above 1979
2556
+ 2555::Delphic
2557
+ 2556::Wolfmother
2558
+ 2557::Friendly Fires
2559
+ 2558::Everything Everything
2560
+ 2559::311
2561
+ 2560::Warpaint
2562
+ 2561::Passion Pit
2563
+ 2562::Starfucker
2564
+ 2563::Robin Thicke
2565
+ 2564::jj
2566
+ 2565::Nosaj Thing
2567
+ 2566::Washed Out
2568
+ 2567::Gossip
2569
+ 2568::Razorlight
2570
+ 2569::The Ordinary Boys
2571
+ 2570::Eisley
2572
+ 2571::The Flaming Lips
2573
+ 2572::Pretty Girls Make Graves
2574
+ 2573::The Slits
2575
+ 2574::Guster
2576
+ 2575::Augustana
2577
+ 2576::The Kovenant
2578
+ 2577::Black Label Society
2579
+ 2578::Soilwork
2580
+ 2579::Iced Earth
2581
+ 2580::Ayreon
2582
+ 2581::Agalloch
2583
+ 2582::Edguy
2584
+ 2583::Running Wild
2585
+ 2584::Demons & Wizards
2586
+ 2585::King Crimson
2587
+ 2586::DragonForce
2588
+ 2587::Dethklok
2589
+ 2588::Styx
2590
+ 2589::Korpiklaani
2591
+ 2590::Firewind
2592
+ 2591::Finntroll
2593
+ 2592::Funkadelic
2594
+ 2593::Bad Manners
2595
+ 2594::Bad Brains
2596
+ 2595::The Gathering
2597
+ 2596::Labyrinth
2598
+ 2597::Elvenking
2599
+ 2598::Candlemass
2600
+ 2599::Persuader
2601
+ 2600::Metal Church
2602
+ 2601::Cameo
2603
+ 2602::Windir
2604
+ 2603::Steve Miller Band
2605
+ 2604::Andre 3000
2606
+ 2605::The Sword
2607
+ 2606::Ohio Players
2608
+ 2607::Alcatrazz
2609
+ 2608::Mountain
2610
+ 2609::King Diamond
2611
+ 2610::Devin Townsend
2612
+ 2611::Bloodbath
2613
+ 2612::Cynic
2614
+ 2613::Mustard Plug
2615
+ 2614::CéU
2616
+ 2615::Nitin Sawhney
2617
+ 2616::Minutemen
2618
+ 2617::Antony and the Johnsons
2619
+ 2618::Emilie Simon
2620
+ 2619::Cocteau Twins
2621
+ 2620::After Forever
2622
+ 2621::Devil Doll
2623
+ 2622::Violeta Parra
2624
+ 2623::Porter
2625
+ 2624::Gerry Rafferty
2626
+ 2625::Steel Pulse
2627
+ 2626::Bowling for Soup
2628
+ 2627::Chevelle
2629
+ 2628::Wheatus
2630
+ 2629::Tamia
2631
+ 2630::Westlife
2632
+ 2631::Five
2633
+ 2632::Jamal
2634
+ 2633::Eva Simons
2635
+ 2634::Kelly Key
2636
+ 2635::Mr. President
2637
+ 2636::Whigfield
2638
+ 2637::Ice MC
2639
+ 2638::Arash
2640
+ 2639::Daniela Mercury
2641
+ 2640::Lawrence
2642
+ 2641::LS Jack
2643
+ 2642::Steps
2644
+ 2643::E-Type
2645
+ 2644::Between the Buried and Me
2646
+ 2645::Born of Osiris
2647
+ 2646::Periphery
2648
+ 2647::Veil of Maya
2649
+ 2648::The Contortionist
2650
+ 2649::Within The Ruins
2651
+ 2650::After the Burial
2652
+ 2651::Moving Mountains
2653
+ 2652::Real Life
2654
+ 2653::John Taylor
2655
+ 2654::The Stills
2656
+ 2655::Orianthi
2657
+ 2656::Heaven Shall Burn
2658
+ 2657::Austrian Death Machine
2659
+ 2658::Stray From the Path
2660
+ 2659::Ana Cañas
2661
+ 2660::Silbermond
2662
+ 2661::Eluveitie
2663
+ 2662::Die Toten Hosen
2664
+ 2663::Hot Water Music
2665
+ 2664::Caliban
2666
+ 2665::It Prevails
2667
+ 2666::I Killed the Prom Queen
2668
+ 2667::Lisa Marie Presley
2669
+ 2668::Abandon All Ships
2670
+ 2669::The Crimson Armada
2671
+ 2670::Architects
2672
+ 2671::Alter Bridge
2673
+ 2672::Kutless
2674
+ 2673::Maylene and the Sons of Disaster
2675
+ 2674::Confide
2676
+ 2675::Demon Hunter
2677
+ 2676::Disarmonia Mundi
2678
+ 2677::Kyte
2679
+ 2678::Nightrage
2680
+ 2679::Misery Signals
2681
+ 2680::House vs. Hurricane
2682
+ 2681::One Morning Left
2683
+ 2682::Kids In Glass Houses
2684
+ 2683::FM Static
2685
+ 2684::Head
2686
+ 2685::Threat Signal
2687
+ 2686::Becoming the Archetype
2688
+ 2687::Against Me!
2689
+ 2688::Lucero
2690
+ 2689::Mooncake
2691
+ 2690::Of Mice & Men
2692
+ 2691::Sabrepulse
2693
+ 2692::Norma Jean
2694
+ 2693::Blockhead
2695
+ 2694::Emancipator
2696
+ 2695::Мои Ракеты Вверх
2697
+ 2696::The American Dollar
2698
+ 2697::Caspian
2699
+ 2698::NEVERSMILE
2700
+ 2699::Xe-NONE
2701
+ 2700::Asobi Seksu
2702
+ 2701::Lydia
2703
+ 2702::Bassnectar
2704
+ 2703::The Amity Affliction
2705
+ 2704::Maria Mena
2706
+ 2705::Gorgoroth
2707
+ 2706::Marduk
2708
+ 2707::Immortal
2709
+ 2708::Destruction
2710
+ 2709::Entombed
2711
+ 2710::Borknagar
2712
+ 2711::Nocturnal Depression
2713
+ 2712::Zемфира
2714
+ 2713::Adriano Celentano
2715
+ 2714::Alex Gaudino
2716
+ 2715::Shaggy
2717
+ 2716::Eels
2718
+ 2717::Tortoise
2719
+ 2718::Lights
2720
+ 2719::Nadja
2721
+ 2720::PJ Harvey
2722
+ 2721::Leonard Cohen
2723
+ 2722::Architecture in Helsinki
2724
+ 2723::The Sound of Animals Fighting
2725
+ 2724::Starlight Mints
2726
+ 2725::Mercyful Fate
2727
+ 2726::Anti-Flag
2728
+ 2727::Isaac Hayes
2729
+ 2728::The 69 Eyes
2730
+ 2729::The Von Bondies
2731
+ 2730::Deerhoof
2732
+ 2731::Apostle of Hustle
2733
+ 2732::Primus
2734
+ 2733::Xiu Xiu
2735
+ 2734::Built to Spill
2736
+ 2735::The Fiery Furnaces
2737
+ 2736::Peeping Tom
2738
+ 2737::Public Image Ltd.
2739
+ 2738::Eagles of Death Metal
2740
+ 2739::Charlotte Gainsbourg
2741
+ 2740::John Cage
2742
+ 2741::The Sugarcubes
2743
+ 2742::Danzig
2744
+ 2743::John Zorn
2745
+ 2744::Nurse With Wound
2746
+ 2745::Scarling.
2747
+ 2746::Caribou
2748
+ 2747::Liars
2749
+ 2748::GWAR
2750
+ 2749::Sun City Girls
2751
+ 2750::Secret Chiefs 3
2752
+ 2751::Celtic Frost
2753
+ 2752::Into Eternity
2754
+ 2753::Funki Porcini
2755
+ 2754::Leæther Strip
2756
+ 2755::Auf der Maur
2757
+ 2756::Living Colour
2758
+ 2757::Lovage
2759
+ 2758::Esthero
2760
+ 2759::Faust
2761
+ 2760::The Go! Team
2762
+ 2761::Camera Obscura
2763
+ 2762::The 5.6.7.8's
2764
+ 2763::Exciter
2765
+ 2764::Don Caballero
2766
+ 2765::Helmet
2767
+ 2766::Eluvium
2768
+ 2767::Pinback
2769
+ 2768::Angelspit
2770
+ 2769::Bif Naked
2771
+ 2770::John Frusciante
2772
+ 2771::Harold Budd
2773
+ 2772::Sloan
2774
+ 2773::Daniel Johnston
2775
+ 2774::Meat Puppets
2776
+ 2775::Beulah
2777
+ 2776::The Creepshow
2778
+ 2777::Matthew Herbert
2779
+ 2778::Tosca
2780
+ 2779::Afterlife
2781
+ 2780::El Perro del Mar
2782
+ 2781::Gravity Kills
2783
+ 2782::The Postmarks
2784
+ 2783::Minor Threat
2785
+ 2784::Hella
2786
+ 2785::Veruca Salt
2787
+ 2786::Love Is All
2788
+ 2787::Buckethead
2789
+ 2788::Richard Cheese
2790
+ 2789::Mouse on Mars
2791
+ 2790::MC5
2792
+ 2791::My Ruin
2793
+ 2792::Hanzel und Gretyl
2794
+ 2793::Gang Gang Dance
2795
+ 2794::The Concretes
2796
+ 2795::The Long Blondes
2797
+ 2796::OOIOO
2798
+ 2797::Shannon Wright
2799
+ 2798::Marcomé
2800
+ 2799::Terry Riley
2801
+ 2800::Tristan Feldbauer
2802
+ 2801::Warren Zevon
2803
+ 2802::Daniel Lanois
2804
+ 2803::The Tragically Hip
2805
+ 2804::Karlheinz Stockhausen
2806
+ 2805::Hello Saferide
2807
+ 2806::Unexpect
2808
+ 2807::This Heat
2809
+ 2808::Bill Laswell
2810
+ 2809::Art Ensemble of Chicago
2811
+ 2810::Milivoj Culibrk
2812
+ 2811::Estradasphere
2813
+ 2812::The Boy Least Likely To
2814
+ 2813::The Wimshurst's Machine
2815
+ 2814::Wintersleep
2816
+ 2815::Ministry
2817
+ 2816::New Model Army
2818
+ 2817::Johnny Hates Jazz
2819
+ 2818::Robert Palmer
2820
+ 2819::The Skatalites
2821
+ 2820::Lush
2822
+ 2821::Dead Can Dance
2823
+ 2822::Pavement
2824
+ 2823::Suicide
2825
+ 2824::Elvis Costello & The Attractions
2826
+ 2825::The Beat
2827
+ 2826::The Rapture
2828
+ 2827::Suzanne Vega
2829
+ 2828::The Sundays
2830
+ 2829::Neil Diamond
2831
+ 2830::Crass
2832
+ 2831::Clock DVA
2833
+ 2832::Toad the Wet Sprocket
2834
+ 2833::GBH
2835
+ 2834::Slowdive
2836
+ 2835::Mazzy Star
2837
+ 2836::The Rutles
2838
+ 2837::Brendan Benson
2839
+ 2838::Badly Drawn Boy
2840
+ 2839::The Undertones
2841
+ 2840::The Band
2842
+ 2841::The La's
2843
+ 2842::The Modern Lovers
2844
+ 2843::Mr. Mister
2845
+ 2844::Autograph
2846
+ 2845::Scandal
2847
+ 2846::Falco
2848
+ 2847::Soft Machine
2849
+ 2848::Black Tape for a Blue Girl
2850
+ 2849::The Rembrandts
2851
+ 2850::Buzzcocks
2852
+ 2851::Aimee Mann
2853
+ 2852::The Comsat Angels
2854
+ 2853::The Danse Society
2855
+ 2854::The Sounds
2856
+ 2855::Sebadoh
2857
+ 2856::Klaus Nomi
2858
+ 2857::The Lemonheads
2859
+ 2858::Laura Branigan
2860
+ 2859::Book of Love
2861
+ 2860::Tim Finn
2862
+ 2861::Camper Van Beethoven
2863
+ 2862::Pete Shelley
2864
+ 2863::The Icicle Works
2865
+ 2864::Buffalo Springfield
2866
+ 2865::Badfinger
2867
+ 2866::The Mission
2868
+ 2867::Ride
2869
+ 2868::The House of Love
2870
+ 2869::France Gall
2871
+ 2870::And Also the Trees
2872
+ 2871::The Church
2873
+ 2872::.38 Special
2874
+ 2873::X-Ray Spex
2875
+ 2874::Prince Buster
2876
+ 2875::Teenage Fanclub
2877
+ 2876::Big Star
2878
+ 2877::The Korgis
2879
+ 2878::The Raincoats
2880
+ 2879::Severed Heads
2881
+ 2880::Hüsker Dü
2882
+ 2881::The Germs
2883
+ 2882::Sham 69
2884
+ 2883::Television Personalities
2885
+ 2884::The Adverts
2886
+ 2885::X-Mal Deutschland
2887
+ 2886::The Replacements
2888
+ 2887::IMA Robot
2889
+ 2888::Freur
2890
+ 2889::Youth Group
2891
+ 2890::Love Spirals Downwards
2892
+ 2891::Low
2893
+ 2892::Todd Rundgren
2894
+ 2893::Stereo Total
2895
+ 2894::Television
2896
+ 2895::The Blue Nile
2897
+ 2896::Hothouse Flowers
2898
+ 2897::Kirsty MacColl
2899
+ 2898::Billy Bragg
2900
+ 2899::Josef K
2901
+ 2900::Sid Vicious
2902
+ 2901::The Lightning Seeds
2903
+ 2902::UK Subs
2904
+ 2903::YACHT
2905
+ 2904::Young Marble Giants
2906
+ 2905::Talulah Gosh
2907
+ 2906::Faces
2908
+ 2907::The Teardrop Explodes
2909
+ 2908::Pere Ubu
2910
+ 2909::The Boomtown Rats
2911
+ 2910::Pop Will Eat Itself
2912
+ 2911::Das Ich
2913
+ 2912::Stromae
2914
+ 2913::Social Distortion
2915
+ 2914::Salem
2916
+ 2915::The Black Dahlia Murder
2917
+ 2916::Sonic Syndicate
2918
+ 2917::Emmure
2919
+ 2918::The Dillinger Escape Plan
2920
+ 2919::Dance Gavin Dance
2921
+ 2920::Agoraphobic Nosebleed
2922
+ 2921::Mastodon
2923
+ 2922::Naked City
2924
+ 2923::Rhapsody of Fire
2925
+ 2924::Exodus
2926
+ 2925::Primal Fear
2927
+ 2926::Tankard
2928
+ 2927::Artillery
2929
+ 2928::Toxic Holocaust
2930
+ 2929::Municipal Waste
2931
+ 2930::Napalm Death
2932
+ 2931::Bleeding Through
2933
+ 2932::Waking The Cadaver
2934
+ 2933::Impending Doom
2935
+ 2934::See You Next Tuesday
2936
+ 2935::Megaherz
2937
+ 2936::Architect
2938
+ 2937::ВFI
2939
+ 2938::Blind Witness
2940
+ 2939::We Butter The Bread With Butter
2941
+ 2940::It Dies Today
2942
+ 2941::Danko Jones
2943
+ 2942::Best Coast
2944
+ 2943::Protest The Hero
2945
+ 2944::Ляпис Трубецкой
2946
+ 2945::1349
2947
+ 2946::Vader
2948
+ 2947::Dark Funeral
2949
+ 2948::Joe Bonamassa
2950
+ 2949::Machinae Supremacy
2951
+ 2950::Hypocrisy
2952
+ 2951::Converge
2953
+ 2952::The Ghost Inside
2954
+ 2953::Comeback Kid
2955
+ 2954::Have Heart
2956
+ 2955::New Years Day
2957
+ 2956::iwrestledabearonce
2958
+ 2957::Ektomorf
2959
+ 2958::Blood Red Shoes
2960
+ 2959::Misery Index
2961
+ 2960::The Faceless
2962
+ 2961::The Chariot
2963
+ 2962::Asesino
2964
+ 2963::H2O
2965
+ 2964::Possessed
2966
+ 2965::Dissection
2967
+ 2966::Altaria
2968
+ 2967::The Acacia Strain
2969
+ 2968::Terror
2970
+ 2969::In Fear and Faith
2971
+ 2970::Necrophagist
2972
+ 2971::Adept
2973
+ 2972::Anal Cunt
2974
+ 2973::I Declare War
2975
+ 2974::Dr. Acula
2976
+ 2975::And Hell Followed With
2977
+ 2976::Attila
2978
+ 2977::War From a Harlots Mouth
2979
+ 2978::Molotov Solution
2980
+ 2979::The Tony Danza Tapdance Extravaganza
2981
+ 2980::To/Die/For
2982
+ 2981::Throwdown
2983
+ 2982::Sadus
2984
+ 2983::Agnostic Front
2985
+ 2984::Emigrate
2986
+ 2985::Dungeon Elite [**]
2987
+ 2986::iamerror
2988
+ 2987::Arsonists Get All The Girls
2989
+ 2988::Greeley Estates
2990
+ 2989::Coroner
2991
+ 2990::I:Scintilla
2992
+ 2991::Conjure One
2993
+ 2992::De-Phazz
2994
+ 2993::Corrosion of Conformity
2995
+ 2994::Virgin Black
2996
+ 2995::Onslaught
2997
+ 2996::The Derek Trucks Band
2998
+ 2997::For the Fallen Dreams
2999
+ 2998::Sumatra
3000
+ 2999::Every Time I Die
3001
+ 3000::Memphis May Fire
3002
+ 3001::Xentrix
3003
+ 3002::Times of Grace
3004
+ 3003::Charlie Musselwhite
3005
+ 3004::Death Before Dishonor
3006
+ 3005::Kylesa
3007
+ 3006::The Locust
3008
+ 3007::Spectra Paris
3009
+ 3008::Raunchy
3010
+ 3009::Element Eighty
3011
+ 3010::Your Demise
3012
+ 3011::Spineshank
3013
+ 3012::Oceana
3014
+ 3013::Razor
3015
+ 3014::Beseech
3016
+ 3015::Imperanon
3017
+ 3016::An Albatross
3018
+ 3017::The Human Abstract
3019
+ 3018::Circle Jerks
3020
+ 3019::Envy
3021
+ 3020::United Nations
3022
+ 3021::Sybreed
3023
+ 3022::Prodigy
3024
+ 3023::Sick of It All
3025
+ 3024::Trap Them
3026
+ 3025::Walter Trout
3027
+ 3026::Dead Poetic
3028
+ 3027::Meg & Dia
3029
+ 3028::Cephalic Carnage
3030
+ 3029::Tesseract
3031
+ 3030::Bonded By Blood
3032
+ 3031::Casey Jones
3033
+ 3032::Silent Civilian
3034
+ 3033::Through the Eyes of the Dead
3035
+ 3034::HEARTSREVOLUTION
3036
+ 3035::Amanda Blank
3037
+ 3036::T.S.O.L.
3038
+ 3037::AKADO
3039
+ 3038::Arkaea
3040
+ 3039::Scanners
3041
+ 3040::Super Junior
3042
+ 3041::SHINee
3043
+ 3042::소녀시대
3044
+ 3043::BoA
3045
+ 3044::M. Ward
3046
+ 3045::Devendra Banhart
3047
+ 3046::Nick Drake
3048
+ 3047::Tracy Chapman
3049
+ 3048::Tilly and the Wall
3050
+ 3049::The Magnetic Fields
3051
+ 3050::Jens Lekman
3052
+ 3051::Neutral Milk Hotel
3053
+ 3052::The Microphones
3054
+ 3053::Augustus Pablo
3055
+ 3054::King Tubby
3056
+ 3055::Lee "Scratch" Perry
3057
+ 3056::The Gun Club
3058
+ 3057::Spiritualized
3059
+ 3058::The Flying Burrito Brothers
3060
+ 3059::Loretta Lynn
3061
+ 3060::Hawkwind
3062
+ 3061::Leadbelly
3063
+ 3062::Grandaddy
3064
+ 3063::Guided by Voices
3065
+ 3064::Townes Van Zandt
3066
+ 3065::Red House Painters
3067
+ 3066::Madredeus
3068
+ 3067::Mediæval Bæbes
3069
+ 3068::Her Space Holiday
3070
+ 3069::Hood
3071
+ 3070::Ane Brun
3072
+ 3071::Son House
3073
+ 3072::Sopor Aeternus & The Ensemble of Shadows
3074
+ 3073::Parkway Drive
3075
+ 3074::Avantasia
3076
+ 3075::You Me At Six
3077
+ 3076::Mis-Teeq
3078
+ 3077::Adam Green
3079
+ 3078::Nina Simone
3080
+ 3079::Stacie Orrico
3081
+ 3080::Emma Shapplin
3082
+ 3081::4minute
3083
+ 3082::Eazy-E
3084
+ 3083::Mobb Deep
3085
+ 3084::Chayanne
3086
+ 3085::Royce da 5'9"
3087
+ 3086::Busta Rhymes
3088
+ 3087::Sara Bareilles
3089
+ 3088::Lenka
3090
+ 3089::A Static Lullaby
3091
+ 3090::Girl Talk
3092
+ 3091::Spinnerette
3093
+ 3092::Will Young
3094
+ 3093::Macy Gray
3095
+ 3094::This Mortal Coil
3096
+ 3095::Cold War Kids
3097
+ 3096::Marit Larsen
3098
+ 3097::Every Avenue
3099
+ 3098::Edenbridge
3100
+ 3099::Lisa Miskovsky
3101
+ 3100::Krystal Meyers
3102
+ 3101::Kirlian Camera
3103
+ 3102::The Organ
3104
+ 3103::Clannad
3105
+ 3104::Lisa "Left Eye" Lopes
3106
+ 3105::Millionaires
3107
+ 3106::The Casualties
3108
+ 3107::Evile
3109
+ 3108::Cadaveria
3110
+ 3109::Astarte
3111
+ 3110::Elis
3112
+ 3111::Jonathan Davis
3113
+ 3112::Android Lust
3114
+ 3113::Hillsong United
3115
+ 3114::The Toy Dolls
3116
+ 3115::A Place to Bury Strangers
3117
+ 3116::Crash Test Dummies
3118
+ 3117::The Wreckers
3119
+ 3118::Lunatica
3120
+ 3119::Draconian
3121
+ 3120::Trail of Tears
3122
+ 3121::Uh Huh Her
3123
+ 3122::Planetshakers
3124
+ 3123::Coal Chamber
3125
+ 3124::The Submarines
3126
+ 3125::Mortal Love
3127
+ 3126::M2M
3128
+ 3127::Danity Kane
3129
+ 3128::Doda
3130
+ 3129::Brodka
3131
+ 3130::GZA/Genius
3132
+ 3131::Ghostface Killah
3133
+ 3132::Natasha Bedingfield
3134
+ 3133::Aretha Franklin
3135
+ 3134::James Brown
3136
+ 3135::Alexandra Burke
3137
+ 3136::Ewa Farna
3138
+ 3137::Lady Sovereign
3139
+ 3138::Dan Balan
3140
+ 3139::Eve
3141
+ 3140::Carla Bruni
3142
+ 3141::Minnie Riperton
3143
+ 3142::Czesław Śpiewa
3144
+ 3143::Etta James
3145
+ 3144::Michael Bublé
3146
+ 3145::Beady Eye
3147
+ 3146::Eric B. & Rakim
3148
+ 3147::Shontelle
3149
+ 3148::Hey
3150
+ 3149::Édith Piaf
3151
+ 3150::Peter Doherty
3152
+ 3151::Angus & Julia Stone
3153
+ 3152::Strachy Na Lachy
3154
+ 3153::Yolanda Be Cool & DCUP
3155
+ 3154::Paloma Faith
3156
+ 3155::Stanfour
3157
+ 3156::Hellogoodbye
3158
+ 3157::Nosowska
3159
+ 3158::Ania
3160
+ 3159::Bow Wow
3161
+ 3160::Aura Dione
3162
+ 3161::Cody Simpson
3163
+ 3162::Natalia Kills
3164
+ 3163::James Horner
3165
+ 3164::Lea Michele
3166
+ 3165::Nneka
3167
+ 3166::Alexis Jordan
3168
+ 3167::Tinchy Stryder
3169
+ 3168::Lauryn Hill
3170
+ 3169::Mark Ronson & The Business Intl
3171
+ 3170::Box Car Racer
3172
+ 3171::Flipsyde
3173
+ 3172::Soulja Boy
3174
+ 3173::En Vogue
3175
+ 3174::Clipse
3176
+ 3175::Yael Naim
3177
+ 3176::Flogging Molly
3178
+ 3177::Smolik
3179
+ 3178::Ana Johnsson
3180
+ 3179::Sinéad O'Connor
3181
+ 3180::Clare Maguire
3182
+ 3181::Mike Posner
3183
+ 3182::Justin Nozuka
3184
+ 3183::T-Pain
3185
+ 3184::Kaki King
3186
+ 3185::Vanessa Paradis
3187
+ 3186::Tom Petty
3188
+ 3187::Rachael Yamagata
3189
+ 3188::Måns Zelmerlöw
3190
+ 3189::Moulin Rouge
3191
+ 3190::Hope
3192
+ 3191::Run-D.M.C.
3193
+ 3192::The Supremes
3194
+ 3193::Diana Ross and The Supremes
3195
+ 3194::Hurt
3196
+ 3195::Pretty Ricky
3197
+ 3196::Gotan Project
3198
+ 3197::The Last Goodnight
3199
+ 3198::Zabili Mi Żółwia
3200
+ 3199::Cee-Lo
3201
+ 3200::Cee Lo Green
3202
+ 3201::Lil Mama
3203
+ 3202::Anita Baker
3204
+ 3203::Julian Casablancas
3205
+ 3204::Poisonblack
3206
+ 3205::Black Stone Cherry
3207
+ 3206::Queensberry
3208
+ 3207::The Blackout
3209
+ 3208::deadmau5
3210
+ 3209::Marianas Trench
3211
+ 3210::Aiden
3212
+ 3211::Sugarcult
3213
+ 3212::CKY
3214
+ 3213::Halestorm
3215
+ 3214::Emerson, Lake & Palmer
3216
+ 3215::Kix
3217
+ 3216::The Saturdays
3218
+ 3217::Jessie J
3219
+ 3218::Rebecca Black
3220
+ 3219::Самое Большое Простое Число
3221
+ 3220::Red Snapper
3222
+ 3221::Animal ДжаZ
3223
+ 3222::Laika
3224
+ 3223::Killwhitneydead
3225
+ 3224::Texas in July
3226
+ 3225::Elio e le Storie Tese
3227
+ 3226::TV on the Radio
3228
+ 3227::Blackfield
3229
+ 3228::Helena Paparizou
3230
+ 3229::Midge Ure
3231
+ 3230::Exposé
3232
+ 3231::X-Dream
3233
+ 3232::Lights of Euphoria
3234
+ 3233::Astrud Gilberto
3235
+ 3234::Athlete
3236
+ 3235::The Ark
3237
+ 3236::Alizée
3238
+ 3237::Huey Lewis & The News
3239
+ 3238::Morten Harket
3240
+ 3239::Bad Boys Blue
3241
+ 3240::Silent Circle
3242
+ 3241::Fancy
3243
+ 3242::Astrix
3244
+ 3243::Chris Spheeris
3245
+ 3244::عمر دياب
3246
+ 3245::Cesária Évora
3247
+ 3246::Supreme Beings of Leisure
3248
+ 3247::Bread
3249
+ 3248::Frou Frou
3250
+ 3249::Ace Ventura
3251
+ 3250::Guillemots
3252
+ 3251::Gus Gus
3253
+ 3252::Boy George
3254
+ 3253::Jay-Jay Johanson
3255
+ 3254::Aqualung
3256
+ 3255::Tahiti 80
3257
+ 3256::Offer Nissim
3258
+ 3257::Orient Expressions
3259
+ 3258::Elegant Machinery
3260
+ 3259::Mel & Kim
3261
+ 3260::Bebel Gilberto
3262
+ 3261::Jody Watley
3263
+ 3262::Ghosts
3264
+ 3263::Mumm-Ra
3265
+ 3264::Bitter:Sweet
3266
+ 3265::Angtoria
3267
+ 3266::Terence Trent D'Arby
3268
+ 3267::Nicola Conte
3269
+ 3268::Alter Ego
3270
+ 3269::Samantha Fox
3271
+ 3270::Anouar Brahem
3272
+ 3271::Liza Minnelli
3273
+ 3272::Blue Stone
3274
+ 3273::Peter Schilling
3275
+ 3274::Omar Faruk Tekbilek
3276
+ 3275::Team Sleep
3277
+ 3276::Up, Bustle and Out
3278
+ 3277::Parov Stelar
3279
+ 3278::Asian Dub Foundation
3280
+ 3279::Chase & Status
3281
+ 3280::KOTOKO
3282
+ 3281::安室奈美恵
3283
+ 3282::モーニング娘。
3284
+ 3283::大塚愛
3285
+ 3284::水樹奈々
3286
+ 3285::林原めぐみ
3287
+ 3286::北出菜奈
3288
+ 3287::Zola Jesus
3289
+ 3288::Blackmore's Night
3290
+ 3289::Stevie Ray Vaughan
3291
+ 3290::Rick Wakeman
3292
+ 3291::Geddy Lee
3293
+ 3292::Roger Waters
3294
+ 3293::Van der Graaf Generator
3295
+ 3294::Focus
3296
+ 3295::Bachman-Turner Overdrive
3297
+ 3296::Slade
3298
+ 3297::Traffic
3299
+ 3298::Premiata Forneria Marconi
3300
+ 3299::B.B. King & Eric Clapton
3301
+ 3300::Neil Young & Crazy Horse
3302
+ 3301::Inti-Illimani
3303
+ 3302::Thievery Corporation
3304
+ 3303::Ornette Coleman
3305
+ 3304::Michael Gray
3306
+ 3305::Madvillain
3307
+ 3306::Sneaker Pimps
3308
+ 3307::Skye
3309
+ 3308::Dangerous Muse
3310
+ 3309::Grizzly Bear
3311
+ 3310::Martina Topley-Bird
3312
+ 3311::Smoke City
3313
+ 3312::Halou
3314
+ 3313::H.U.V.A. Network
3315
+ 3314::Aes Dana
3316
+ 3315::Anja Garbarek
3317
+ 3316::dZihan & Kamien
3318
+ 3317::Days of the New
3319
+ 3318::Shirley Bassey
3320
+ 3319::Patti LaBelle
3321
+ 3320::The Field
3322
+ 3321::Daughter Darling
3323
+ 3322::Montefiori Cocktail
3324
+ 3323::Laurent Garnier
3325
+ 3324::Waldeck
3326
+ 3325::Rue du Soleil
3327
+ 3326::Kruder & Dorfmeister
3328
+ 3327::DeVotchKa
3329
+ 3328::O Teatro Mágico
3330
+ 3329::The Tallest Man on Earth
3331
+ 3330::New Young Pony Club
3332
+ 3331::Pizzicato Five
3333
+ 3332::The Dresden Dolls
3334
+ 3333::Those Dancing Days
3335
+ 3334::Laura Marling
3336
+ 3335::Super Furry Animals
3337
+ 3336::Nightmare
3338
+ 3337::Rory Gallagher
3339
+ 3338::Duke Ellington
3340
+ 3339::St. Vincent
3341
+ 3340::Brian Wilson
3342
+ 3341::Owen Pallett
3343
+ 3342::Jorge Drexler
3344
+ 3343::The Raveonettes
3345
+ 3344::Tim Maia
3346
+ 3345::Janelle Monáe
3347
+ 3346::Tujiko Noriko
3348
+ 3347::Vashti Bunyan
3349
+ 3348::Sons and Daughters
3350
+ 3349::Nico
3351
+ 3350::Rasputina
3352
+ 3351::The Zutons
3353
+ 3352::Harry and the Potters
3354
+ 3353::The Ditty Bops
3355
+ 3354::VHS or Beta
3356
+ 3355::The Seatbelts
3357
+ 3356::Louise Attaque
3358
+ 3357::Au Revoir Simone
3359
+ 3358::Emmy the Great
3360
+ 3359::Juli
3361
+ 3360::Hope Sandoval & The Warm Inventions
3362
+ 3361::Dum Dum Girls
3363
+ 3362::Blue Man Group
3364
+ 3363::Louis XIV
3365
+ 3364::The Grates
3366
+ 3365::The Cribs
3367
+ 3366::The Maccabees
3368
+ 3367::Envydust
3369
+ 3368::Head Automatica
3370
+ 3369::Boy Kill Boy
3371
+ 3370::The View
3372
+ 3371::The Futureheads
3373
+ 3372::Moptop
3374
+ 3373::Madina Lake
3375
+ 3374::Juliette and The Licks
3376
+ 3375::Sirenia
3377
+ 3376::Kristine Elezaj
3378
+ 3377::Ticon
3379
+ 3378::Asura
3380
+ 3379::Brigitte Bardot
3381
+ 3380::Hybrid
3382
+ 3381::Lützenkirchen
3383
+ 3382::Kiara Rocks
3384
+ 3383::嵐
3385
+ 3384::玉置成実
3386
+ 3385::Grave Digger
3387
+ 3386::Bonfire
3388
+ 3387::3 Inches of Blood
3389
+ 3388::Nazareth
3390
+ 3389::Sandy Leah
3391
+ 3390::Mat Kearney
3392
+ 3391::Nonpoint
3393
+ 3392::Turbonegro
3394
+ 3393::Scott Walker
3395
+ 3394::Gianna Nannini
3396
+ 3395::Velvet Acid Christ
3397
+ 3396::David Gray
3398
+ 3397::Rancid
3399
+ 3398::The Wallflowers
3400
+ 3399::Teitur
3401
+ 3400::Psapp
3402
+ 3401::Natalie Walker
3403
+ 3402::Deathstars
3404
+ 3403::My Life with the Thrill Kill Kult
3405
+ 3404::Hank Williams Jr.
3406
+ 3405::Hootie & the Blowfish
3407
+ 3406::Gov't Mule
3408
+ 3407::Lene Marlin
3409
+ 3408::Toadies
3410
+ 3409::Joy Williams
3411
+ 3410::Mr. Scruff
3412
+ 3411::Anna Nalick
3413
+ 3412::Olivia Ruiz
3414
+ 3413::Thomas Dybdahl
3415
+ 3414::Vermillion Lies
3416
+ 3415::Jerry Lee Lewis
3417
+ 3416::Charlie Daniels Band
3418
+ 3417::Blues Traveler
3419
+ 3418::Jimmy Cliff
3420
+ 3419::Better Than Ezra
3421
+ 3420::Vienna Teng
3422
+ 3421::Bel Canto
3423
+ 3422::Jonny Lang
3424
+ 3423::ohGr
3425
+ 3424::Pigface
3426
+ 3425::Crosby, Stills & Nash
3427
+ 3426::Celestial Aeon Project
3428
+ 3427::Powerman 5000
3429
+ 3428::Ace Frehley
3430
+ 3429::Various Artists
3431
+ 3430::Alcest
3432
+ 3431::Heaven & Hell
3433
+ 3432::Tor Lundvall
3434
+ 3433::All My Faith Lost ...
3435
+ 3434::Magna Canta
3436
+ 3435::32Crash
3437
+ 3436::Jota Quest
3438
+ 3437::Scars on Broadway
3439
+ 3438::Upcdowncleftcrightcabc+start
3440
+ 3439::Banco de Gaia
3441
+ 3440::I Hear Sirens
3442
+ 3441::Yonderboi
3443
+ 3442::Kwoon
3444
+ 3443::Fuck Buttons
3445
+ 3444::Destroyalldreamers
3446
+ 3445::Пелагея
3447
+ 3446::Rachel's
3448
+ 3447::Tunturia
3449
+ 3448::Peter Tosh
3450
+ 3449::Damian Marley
3451
+ 3450::Vavamuffin
3452
+ 3451::Ziggy Marley
3453
+ 3452::Gentleman
3454
+ 3453::He Is Legend
3455
+ 3454::Dance Club Massacre
3456
+ 3455::Pogo
3457
+ 3456::Circa Survive
3458
+ 3457::Kill Paradise
3459
+ 3458::Tyler, The Creator
3460
+ 3459::Anna Calvi
3461
+ 3460::Rev Theory
3462
+ 3461::The Perishers
3463
+ 3462::José González
3464
+ 3463::The Wombats
3465
+ 3464::Madlib
3466
+ 3465::Paul Kalkbrenner
3467
+ 3466::James Blake
3468
+ 3467::Télépopmusik
3469
+ 3468::Tesla Boy
3470
+ 3469::Trentemøller
3471
+ 3470::Yoav
3472
+ 3471::Bent
3473
+ 3472::Sad Lovers and Giants
3474
+ 3473::The Chameleons
3475
+ 3474::HEALTH
3476
+ 3475::Toro y Moi
3477
+ 3476::SCSI-9
3478
+ 3477::Zeigeist
3479
+ 3478::Beach Fossils
3480
+ 3479::Bad Lieutenant
3481
+ 3480::Gui Boratto
3482
+ 3481::Chairlift
3483
+ 3482::Nathan Fake
3484
+ 3483::Thieves Like Us
3485
+ 3484::Kollektiv Turmstrasse
3486
+ 3485::Extrawelt
3487
+ 3486::Stephan Bodzin
3488
+ 3487::Minilogue
3489
+ 3488::Sascha Funke
3490
+ 3489::A Camp
3491
+ 3490::Anthony Rother
3492
+ 3491::Matthew Dear
3493
+ 3492::The Mary Onettes
3494
+ 3493::Erik Satie
3495
+ 3494::Weather Report
3496
+ 3495::Nujabes
3497
+ 3496::Nick Cave & Warren Ellis
3498
+ 3497::Night Ranger
3499
+ 3498::Girlicious
3500
+ 3499::Blitz
3501
+ 3500::Worm Is Green
3502
+ 3501::Broken Social Scene
3503
+ 3502::Great Lake Swimmers
3504
+ 3503::Grateful Dead
3505
+ 3504::Anathallo
3506
+ 3505::Piano Magic
3507
+ 3506::Do Make Say Think
3508
+ 3507::Cartola
3509
+ 3508::The Mountain Goats
3510
+ 3509::Okkervil River
3511
+ 3510::Amália Rodrigues
3512
+ 3511::Celtic Woman
3513
+ 3512::João Gilberto
3514
+ 3513::Woods
3515
+ 3514::Ray LaMontagne
3516
+ 3515::The Brian Jonestown Massacre
3517
+ 3516::Streetlight Manifesto
3518
+ 3517::Ash
3519
+ 3518::Elis Regina
3520
+ 3519::Bang Gang
3521
+ 3520::Baden Powell
3522
+ 3521::The Radio Dept.
3523
+ 3522::Stafrænn Hákon
3524
+ 3523::Owen
3525
+ 3524::My Morning Jacket
3526
+ 3525::Amusement Parks on Fire
3527
+ 3526::Efterklang
3528
+ 3527::Gray Strawberries
3529
+ 3528::Omnia
3530
+ 3529::September Malevolence
3531
+ 3530::The Meters
3532
+ 3531::Chapterhouse
3533
+ 3532::Nara Leão
3534
+ 3533::Borko
3535
+ 3534::Electrelane
3536
+ 3535::Novos Baianos
3537
+ 3536::Carissa's Wierd
3538
+ 3537::Under Byen
3539
+ 3538::Huun-Huur-Tu
3540
+ 3539::Stan Getz
3541
+ 3540::Andrei Machado
3542
+ 3541::Say Hi to Your Mom
3543
+ 3542::Kitchens of Distinction
3544
+ 3543::Sun Kil Moon
3545
+ 3544::Casiotone for the Painfully Alone
3546
+ 3545::Drive-By Truckers
3547
+ 3546::Love
3548
+ 3547::Hermeto Pascoal
3549
+ 3548::Roger Miller
3550
+ 3549::Horse Feathers
3551
+ 3550::Akron/Family
3552
+ 3551::Tristeza
3553
+ 3552::A Hawk and a Hacksaw
3554
+ 3553::Delays
3555
+ 3554::Rilo Kiley
3556
+ 3555::Infernal
3557
+ 3556::dredg
3558
+ 3557::Trapt
3559
+ 3558::Billy Currington
3560
+ 3559::Richard Ashcroft
3561
+ 3560::From Autumn to Ashes
3562
+ 3561::Ronan Keating
3563
+ 3562::Cat Stevens
3564
+ 3563::Train
3565
+ 3564::Richie Sambora
3566
+ 3565::Air Traffic
3567
+ 3566::Entwine
3568
+ 3567::Brian McKnight
3569
+ 3568::Superchic[k]
3570
+ 3569::Globus
3571
+ 3570::Semisonic
3572
+ 3571::Skazi
3573
+ 3572::Shadow Gallery
3574
+ 3573::Dark Moor
3575
+ 3574::Nocturnal Rites
3576
+ 3575::Lost Horizon
3577
+ 3576::Evergrey
3578
+ 3577::TV-2
3579
+ 3578::Common
3580
+ 3579::Faith Evans
3581
+ 3580::Otis Redding
3582
+ 3581::The Game
3583
+ 3582::Musiq
3584
+ 3583::Dizzee Rascal
3585
+ 3584::Redman
3586
+ 3585::Utada
3587
+ 3586::Akufen
3588
+ 3587::Black Star
3589
+ 3588::Third Eye Blind
3590
+ 3589::Jennifer Love Hewitt
3591
+ 3590::Example
3592
+ 3591::Coconut Records
3593
+ 3592::Ginuwine
3594
+ 3593::Jamie T
3595
+ 3594::Mario
3596
+ 3595::112
3597
+ 3596::Ben Woods
3598
+ 3597::John Lee Hooker
3599
+ 3598::Missy Higgins
3600
+ 3599::Christina Milian
3601
+ 3600::Lisa Gerrard
3602
+ 3601::Black Lab
3603
+ 3602::Pete Yorn
3604
+ 3603::Ol' Dirty Bastard
3605
+ 3604::Alkaline Trio
3606
+ 3605::Pennywise
3607
+ 3606::Battlelore
3608
+ 3607::Lucinda Williams
3609
+ 3608::RZA
3610
+ 3609::Amber Pacific
3611
+ 3610::植松伸夫
3612
+ 3611::Lemon Jelly
3613
+ 3612::The Blues Brothers
3614
+ 3613::The Living End
3615
+ 3614::The J. Geils Band
3616
+ 3615::Beatsteaks
3617
+ 3616::Meat Loaf
3618
+ 3617::Jagged Edge
3619
+ 3618::Marcy Playground
3620
+ 3619::Stealers Wheel
3621
+ 3620::The Click Five
3622
+ 3621::One Night Only
3623
+ 3622::Scouting for Girls
3624
+ 3623::The Crowns
3625
+ 3624::The Bird and The Bee
3626
+ 3625::Chiodos
3627
+ 3626::The Get Up Kids
3628
+ 3627::The Coasters
3629
+ 3628::Overkill
3630
+ 3629::Down
3631
+ 3630::Nek
3632
+ 3631::Gipsy Kings
3633
+ 3632::Ibrahim Ferrer
3634
+ 3633::Paco de Lucía
3635
+ 3634::Alejandro Fernández
3636
+ 3635::Moenia
3637
+ 3636::Karsh Kale
3638
+ 3637::Oliver Shanti
3639
+ 3638::Graham Coxon
3640
+ 3639::Maino
3641
+ 3640::Keyshia Cole
3642
+ 3641::Винтаж
3643
+ 3642::Booty Luv
3644
+ 3643::Three 6 Mafia
3645
+ 3644::Jordan Rudess
3646
+ 3645::Lunatic Soul
3647
+ 3646::David Gilmour
3648
+ 3647::Liquid Tension Experiment
3649
+ 3648::Al Di Meola
3650
+ 3649::John Petrucci
3651
+ 3650::Michael Giacchino
3652
+ 3651::Django Reinhardt
3653
+ 3652::Donovan
3654
+ 3653::Fish
3655
+ 3654::Fates Warning
3656
+ 3655::Obscura
3657
+ 3656::Camel
3658
+ 3657::Atheist
3659
+ 3658::Gentle Giant
3660
+ 3659::David Sylvian
3661
+ 3660::Nine Horses
3662
+ 3661::Harry Gregson-Williams
3663
+ 3662::Beardfish
3664
+ 3663::Phideaux
3665
+ 3664::Eloy
3666
+ 3665::Pestilence
3667
+ 3666::Kenny G
3668
+ 3667::Rain Tree Crow
3669
+ 3668::Goblin
3670
+ 3669::maudlin of the Well
3671
+ 3670::Caravan
3672
+ 3671::Gryphon
3673
+ 3672::Chroma Key
3674
+ 3673::Robin Trower
3675
+ 3674::Chick Corea
3676
+ 3675::Return to Forever
3677
+ 3676::Renaissance
3678
+ 3677::Trey Gunn
3679
+ 3678::The Rakes
3680
+ 3679::Dirty Pretty Things
3681
+ 3680::Milburn
3682
+ 3681::The Bravery
3683
+ 3682::Quietdrive
3684
+ 3683::The Automatic
3685
+ 3684::The Hoosiers
3686
+ 3685::Obie Trice
3687
+ 3686::Bizarre
3688
+ 3687::Cassiane
3689
+ 3688::Kim Carnes
3690
+ 3689::Brenda Lee
3691
+ 3690::Jack's Mannequin
3692
+ 3691::Motion City Soundtrack
3693
+ 3692::The John Butler Trio
3694
+ 3693::The Cloud Room
3695
+ 3694::Mark Knopfler
3696
+ 3695::Haircut 100
3697
+ 3696::Olivia Newton-John
3698
+ 3697::Idlewild
3699
+ 3698::Edwyn Collins
3700
+ 3699::Orange Juice
3701
+ 3700::Trio
3702
+ 3701::Jane Wiedlin
3703
+ 3702::Passengers
3704
+ 3703::Andy Taylor
3705
+ 3704::Nuclear Assault
3706
+ 3705::Doves
3707
+ 3706::Hot Hot Heat
3708
+ 3707::Klaus Badelt
3709
+ 3708::Kidneythieves
3710
+ 3709::Grave
3711
+ 3710::D'espairsRay
3712
+ 3711::Benjamin Biolay
3713
+ 3712::Ian Brown
3714
+ 3713::Eddie Cochran
3715
+ 3714::Link Wray
3716
+ 3715::Gary Jules
3717
+ 3716::Immortal Technique
3718
+ 3717::Jurassic 5
3719
+ 3718::Blackalicious
3720
+ 3719::Bone Thugs-N-Harmony
3721
+ 3720::Ice Cube
3722
+ 3721::Cam'ron
3723
+ 3722::Notorious B.I.G.
3724
+ 3723::Naughty by Nature
3725
+ 3724::Proof
3726
+ 3725::Big L
3727
+ 3726::Taj Mahal
3728
+ 3727::Andrew W.K.
3729
+ 3728::Joe
3730
+ 3729::Vanilla Ice
3731
+ 3730::Cap'n Jazz
3732
+ 3731::Less Than Jake
3733
+ 3732::Saves the Day
3734
+ 3733::American Football
3735
+ 3734::Matt Pond PA
3736
+ 3735::Karunesh
3737
+ 3736::Ravi Shankar
3738
+ 3737::Philip Glass
3739
+ 3738::Frozen Silence
3740
+ 3739::Current 93
3741
+ 3740::Psychic TV
3742
+ 3741::Ryoji Ikeda
3743
+ 3742::Zoviet France
3744
+ 3743::Lustmord
3745
+ 3744::Alva Noto
3746
+ 3745::Sergei Rachmaninoff
3747
+ 3746::Lusine
3748
+ 3747::Kaya Project
3749
+ 3748::Tangerine Dream
3750
+ 3749::Atrium Carceri
3751
+ 3750::Kammarheit
3752
+ 3751::Kronos Quartet
3753
+ 3752::Niyaz
3754
+ 3753::Steve Roach
3755
+ 3754::Muslimgauze
3756
+ 3755::Yat-Kha
3757
+ 3756::Steve Reich
3758
+ 3757::Luc Ferrari
3759
+ 3758::Baskyl
3760
+ 3759::Modus
3761
+ 3760::大谷幸
3762
+ 3761::DJ Food
3763
+ 3762::Jon Hopkins
3764
+ 3763::Mythos
3765
+ 3764::THEreminGIRL
3766
+ 3765::The Teenagers
3767
+ 3766::Solar Fields
3768
+ 3767::The Tear Garden
3769
+ 3768::Goran Bregović
3770
+ 3769::Michael Andrews
3771
+ 3770::Patrick Doyle
3772
+ 3771::Eric Serra
3773
+ 3772::Jan Hammer
3774
+ 3773::Swans
3775
+ 3774::John Barry
3776
+ 3775::Dario Marianelli
3777
+ 3776::Side Liner
3778
+ 3777::Revolting Cocks
3779
+ 3778::American Head Charge
3780
+ 3779::Lords of Acid
3781
+ 3780::Zeromancer
3782
+ 3781::Zombina and the Skeletones
3783
+ 3782::Dog Fashion Disco
3784
+ 3783::Mortiis
3785
+ 3784::Sevendust
3786
+ 3785::Stabbing Westward
3787
+ 3786::Godhead
3788
+ 3787::Fun Lovin' Criminals
3789
+ 3788::Pagoda
3790
+ 3789::Moonspell
3791
+ 3790::Gojira
3792
+ 3791::Balmorhea
3793
+ 3792::Glen Hansard & Markéta Irglová
3794
+ 3793::Peace Orchestra
3795
+ 3794::dEUS
3796
+ 3795::Diante do Trono
3797
+ 3796::Carter Burwell
3798
+ 3797::Dusty Kid
3799
+ 3798::Modjo
3800
+ 3799::Nada Surf
3801
+ 3800::Matia Bazar
3802
+ 3801::The Glove
3803
+ 3802::Sex Gang Children
3804
+ 3803::Altered Images
3805
+ 3804::The Tornados
3806
+ 3805::Them Crooked Vultures
3807
+ 3806::Ólafur Arnalds
3808
+ 3807::Remy Zero
3809
+ 3808::Inkubus Sukkubus
3810
+ 3809::Monty Python
3811
+ 3810::Steely Dan
3812
+ 3811::MUM
3813
+ 3812::Traveling Wilburys
3814
+ 3813::The Nitty Gritty Dirt Band
3815
+ 3814::Carly Simon
3816
+ 3815::This Will Destroy You
3817
+ 3816::Big Brother & The Holding Company
3818
+ 3817::Cage the Elephant
3819
+ 3818::Bill Hicks
3820
+ 3819::K's Choice
3821
+ 3820::Little Feat
3822
+ 3821::Sam & Dave
3823
+ 3822::Three Dog Night
3824
+ 3823::Dan Fogelberg
3825
+ 3824::Poe
3826
+ 3825::Indigo Girls
3827
+ 3826::Nikka Costa
3828
+ 3827::Crosby, Stills, Nash & Young
3829
+ 3828::And So I Watch You From Afar
3830
+ 3829::Elysian Fields
3831
+ 3830::pg.lost
3832
+ 3831::Maserati
3833
+ 3832::Dan Auerbach
3834
+ 3833::Eddie Izzard
3835
+ 3834::Firefall
3836
+ 3835::Zwan
3837
+ 3836::Cine
3838
+ 3837::Copacabana Club
3839
+ 3838::E.S. Posthumus
3840
+ 3839::Of the Wand and the Moon
3841
+ 3840::Rodrigo y Gabriela
3842
+ 3841::Bell X1
3843
+ 3842::Les Savy Fav
3844
+ 3843::Steve Earle
3845
+ 3844::Cibo Matto
3846
+ 3845::Jonny Greenwood
3847
+ 3846::Susumu Yokota
3848
+ 3847::Get Cape. Wear Cape. Fly
3849
+ 3848::Fred Astaire
3850
+ 3849::Kay Starr
3851
+ 3850::Jamie Lidell
3852
+ 3851::Gustavo Santaolalla
3853
+ 3852::Gnawa Diffusion
3854
+ 3853::Michelle Branch
3855
+ 3854::Stephanie McIntosh
3856
+ 3855::Queen + Paul Rodgers
3857
+ 3856::Mägo de Oz
3858
+ 3857::Jill Scott
3859
+ 3858::The Easybeats
3860
+ 3859::El Cuarteto de Nos
3861
+ 3860::Horace Andy
3862
+ 3861::Os Paralamas do Sucesso
3863
+ 3862::Plastiscines
3864
+ 3863::Laurel Aitken
3865
+ 3864::Chico Science & Nação Zumbi
3866
+ 3865::The Wailers
3867
+ 3866::The Warlocks
3868
+ 3867::Spacemen 3
3869
+ 3868::Mala Rodríguez
3870
+ 3869::Aesop Rock
3871
+ 3870::The Field Mice
3872
+ 3871::Gustavo Cerati
3873
+ 3872::Buju Banton
3874
+ 3873::Glasvegas
3875
+ 3874::Kinky
3876
+ 3875::The Rifles
3877
+ 3876::Gil Scott-Heron
3878
+ 3877::Elastica
3879
+ 3878::Paul Weller
3880
+ 3879::The English Beat
3881
+ 3880::Estelle
3882
+ 3881::Toots and the Maytals
3883
+ 3882::Mansun
3884
+ 3883::The Feelies
3885
+ 3884::The Duke Spirit
3886
+ 3885::Kula Shaker
3887
+ 3886::Gregory Isaacs
3888
+ 3887::Burning Spear
3889
+ 3888::Barrington Levy
3890
+ 3889::Ike & Tina Turner
3891
+ 3890::The Tears
3892
+ 3891::Easy Star All-Stars
3893
+ 3892::The Thrills
3894
+ 3893::Bernard Butler
3895
+ 3894::Free the Robots
3896
+ 3895::John Mayall & The Bluesbreakers
3897
+ 3896::Sleeper
3898
+ 3897::Pharrell
3899
+ 3898::The Telescopes
3900
+ 3899::Marianne Faithfull
3901
+ 3900::The Lucksmiths
3902
+ 3901::Tindersticks
3903
+ 3902::Julian Cope
3904
+ 3903::Pernice Brothers
3905
+ 3904::You Say Party! We Say Die!
3906
+ 3905::VETO
3907
+ 3906::Shout Out Louds
3908
+ 3907::Curve
3909
+ 3908::Art Brut
3910
+ 3909::Love and Rockets
3911
+ 3910::Fela Kuti
3912
+ 3911::Windy & Carl
3913
+ 3912::Grouper
3914
+ 3913::Dictaphone
3915
+ 3914::Porn Sword Tobacco
3916
+ 3915::Ada
3917
+ 3916::The Herbaliser
3918
+ 3917::Secos & Molhados
3919
+ 3918::Neu!
3920
+ 3919::Pentagram
3921
+ 3920::Le Orme
3922
+ 3921::Manfred Mann's Earth Band
3923
+ 3922::Blue Cheer
3924
+ 3923::Saint Vitus
3925
+ 3924::Miike Snow
3926
+ 3925::Atmosphere
3927
+ 3926::Roy Ayers
3928
+ 3927::Sarah Vaughan
3929
+ 3928::Deerhunter
3930
+ 3929::Fink
3931
+ 3930::The Temper Trap
3932
+ 3931::Maria Gadú
3933
+ 3932::Seu Jorge
3934
+ 3933::Zeca Baleiro
3935
+ 3934::Roberta Sá
3936
+ 3935::Yuksek
3937
+ 3936::Djavan
3938
+ 3937::Wax Poetic
3939
+ 3938::Elsiane
3940
+ 3939::Jorge Ben Jor
3941
+ 3940::Logh
3942
+ 3941::Donavon Frankenreiter
3943
+ 3942::The Naked and Famous
3944
+ 3943::Urge Overkill
3945
+ 3944::Paulinho Moska
3946
+ 3945::Mariana Aydar
3947
+ 3946::3-11 Porter
3948
+ 3947::Alessandro Safina
3949
+ 3948::Shaman
3950
+ 3949::Republica
3951
+ 3950::Canned Heat
3952
+ 3951::Natiruts
3953
+ 3952::Blaze
3954
+ 3953::Gong
3955
+ 3954::Derek and the Dominos
3956
+ 3955::Collective Soul
3957
+ 3956::Richard Wright
3958
+ 3957::The Servant
3959
+ 3958::Ananda Shake
3960
+ 3959::Library Tapes
3961
+ 3960::Divine Heresy
3962
+ 3961::Afghan Whigs
3963
+ 3962::Bayside
3964
+ 3963::The Courteeners
3965
+ 3964::Someone Still Loves You Boris Yeltsin
3966
+ 3965::The Apples in Stereo
3967
+ 3966::Final Fantasy
3968
+ 3967::Delorean
3969
+ 3968::Wild Nothing
3970
+ 3969::Set Your Goals
3971
+ 3970::The Bloody Beetroots
3972
+ 3971::The Feeling
3973
+ 3972::Pure Reason Revolution
3974
+ 3973::Atlas Sound
3975
+ 3974::Portugal. The Man
3976
+ 3975::Pnau
3977
+ 3976::Rotting Christ
3978
+ 3977::Midnight Juggernauts
3979
+ 3978::Ampop
3980
+ 3979::Saltillo
3981
+ 3980::Bane
3982
+ 3981::Acceptance
3983
+ 3982::NOFX
3984
+ 3983::No Use for a Name
3985
+ 3984::Tears Run Rings
3986
+ 3985::Noah and the Whale
3987
+ 3986::...And You Will Know Us by the Trail of Dead
3988
+ 3987::Jack Peñate
3989
+ 3988::Catherine Wheel
3990
+ 3989::Mute Math
3991
+ 3990::Goose
3992
+ 3991::Labradford
3993
+ 3992::Cloud Cult
3994
+ 3993::The Secret Handshake
3995
+ 3994::PlayRadioPlay!
3996
+ 3995::The Unicorns
3997
+ 3996::Spleen United
3998
+ 3997::Polvo
3999
+ 3998::Scratch Acid
4000
+ 3999::The Whip
4001
+ 4000::Zoot Woman
4002
+ 4001::Go:Audio
4003
+ 4002::Great Northern
4004
+ 4003::The Dears
4005
+ 4004::Hawk Nelson
4006
+ 4005::Destroy The Runner
4007
+ 4006::Frankmusik
4008
+ 4007::Van She
4009
+ 4008::No Age
4010
+ 4009::Voxtrot
4011
+ 4010::Lights Out Asia
4012
+ 4011::Sleater-Kinney
4013
+ 4012::We Have Band
4014
+ 4013::The Monochrome Set
4015
+ 4014::Mission of Burma
4016
+ 4015::Bishop Allen
4017
+ 4016::The Dodos
4018
+ 4017::Swervedriver
4019
+ 4018::Edward Sharpe & the Magnetic Zeros
4020
+ 4019::Steve Aoki
4021
+ 4020::Thurston Moore
4022
+ 4021::Plastilina Mosh
4023
+ 4022::Sleeping at Last
4024
+ 4023::French Teen Idol
4025
+ 4024::Hate Eternal
4026
+ 4025::Sugar Ray
4027
+ 4026::Film School
4028
+ 4027::Secret Shine
4029
+ 4028::Surfer Blood
4030
+ 4029::The Republic Tigers
4031
+ 4030::We Smoke Fags
4032
+ 4031::The Big Pink
4033
+ 4032::Under the Influence of Giants
4034
+ 4033::Soundpool
4035
+ 4034::Paper Route
4036
+ 4035::Sick Puppies
4037
+ 4036::BarlowGirl
4038
+ 4037::Ryan Cabrera
4039
+ 4038::Akcent
4040
+ 4039::Vanilla Sky
4041
+ 4040::Carolina Liar
4042
+ 4041::Casting Crowns
4043
+ 4042::Vertical Horizon
4044
+ 4043::Falling Up
4045
+ 4044::Orson
4046
+ 4045::T.M.Revolution
4047
+ 4046::Pain Confessor
4048
+ 4047::The Blood Brothers
4049
+ 4048::Diablo Swing Orchestra
4050
+ 4049::Satyricon
4051
+ 4050::Karnivool
4052
+ 4051::Winds of Plague
4053
+ 4052::Deathspell Omega
4054
+ 4053::Anata
4055
+ 4054::Blut aus Nord
4056
+ 4055::Closure In Moscow
4057
+ 4056::近藤浩治
4058
+ 4057::Koop
4059
+ 4058::PMMP
4060
+ 4059::Boys Noize
4061
+ 4060::Orphaned Land
4062
+ 4061::Andromeda
4063
+ 4062::Hanni Kohl
4064
+ 4063::Steve Vai
4065
+ 4064::Haggard
4066
+ 4065::Mors Principium Est
4067
+ 4066::Café Tacuba
4068
+ 4067::Jarabe de Palo
4069
+ 4068::Ark
4070
+ 4069::Marty Friedman
4071
+ 4070::Maná
4072
+ 4071::Ojos de Brujo
4073
+ 4072::Cacophony
4074
+ 4073::I'm from Barcelona
4075
+ 4074::Diablo
4076
+ 4075::Paul Anka
4077
+ 4076::Damn Yankees
4078
+ 4077::Diabolical Masquerade
4079
+ 4078::Jumbo
4080
+ 4079::Banco del Mutuo Soccorso
4081
+ 4080::Sacred Reich
4082
+ 4081::Septic Flesh
4083
+ 4082::Legion of the Damned
4084
+ 4083::Rosetta Stone
4085
+ 4084::Jane Birkin & Serge Gainsbourg
4086
+ 4085::Bill Haley
4087
+ 4086::Bella Morte
4088
+ 4087::Museum
4089
+ 4088::The Airborne Toxic Event
4090
+ 4089::Legowelt
4091
+ 4090::Robert Plant
4092
+ 4091::Elmore James
4093
+ 4092::Sonny Boy Williamson
4094
+ 4093::Freddie King
4095
+ 4094::Albert King
4096
+ 4095::Citizen Cope
4097
+ 4096::Dr. John
4098
+ 4097::Otis Rush
4099
+ 4098::Grant Green
4100
+ 4099::Junior Wells
4101
+ 4100::Lightnin' Hopkins
4102
+ 4101::Koko Taylor
4103
+ 4102::Cursive
4104
+ 4103::Ted Leo and The Pharmacists
4105
+ 4104::Laura Veirs
4106
+ 4105::La Oreja de Van Gogh
4107
+ 4106::Fernanda Takai
4108
+ 4107::As Blood Runs Black
4109
+ 4108::akissforjersey
4110
+ 4109::Unearth
4111
+ 4110::Bury Your Dead
4112
+ 4111::Sonny Rollins
4113
+ 4112::Cauterize
4114
+ 4113::Rufio
4115
+ 4114::My American Heart
4116
+ 4115::The Hush Sound
4117
+ 4116::Andy McKee
4118
+ 4117::Breathe Electric
4119
+ 4118::The Audition
4120
+ 4119::Moneen
4121
+ 4120::The Bled
4122
+ 4121::Broadway
4123
+ 4122::LoveHateHero
4124
+ 4123::División Minúscula
4125
+ 4124::Daphne Loves Derby
4126
+ 4125::KSU
4127
+ 4126::Four Year Strong
4128
+ 4127::For Today
4129
+ 4128::Maroon
4130
+ 4129::The Agony Scene
4131
+ 4130::Earth Crisis
4132
+ 4131::Emarosa
4133
+ 4132::Morningwood
4134
+ 4133::Hird
4135
+ 4134::The Dave Brubeck Quartet
4136
+ 4135::Gustav Mahler
4137
+ 4136::Franz Joseph Haydn
4138
+ 4137::Farid Farjad
4139
+ 4138::梶浦由記
4140
+ 4139::Marco Borsato
4141
+ 4140::Joe Walsh
4142
+ 4141::Sailor Moon
4143
+ 4142::Montrose
4144
+ 4143::Nephew
4145
+ 4144::Volbeat
4146
+ 4145::Colin Hay
4147
+ 4146::John Cale
4148
+ 4147::Beth Gibbons & Rustin Man
4149
+ 4148::Bay City Rollers
4150
+ 4149::Free
4151
+ 4150::Booker T. & The MG's
4152
+ 4151::Eddie Money
4153
+ 4152::Bob Seger
4154
+ 4153::Gene Vincent
4155
+ 4154::Peter & Gordon
4156
+ 4155::John Mellencamp
4157
+ 4156::Bill Haley and the Comets
4158
+ 4157::Wanda Jackson
4159
+ 4158::The Swinging Blue Jeans
4160
+ 4159::The Cowsills
4161
+ 4160::Foghat
4162
+ 4161::Fats Domino
4163
+ 4162::Harry Belafonte
4164
+ 4163::Ricky Nelson
4165
+ 4164::Lesley Gore
4166
+ 4165::The Jeff Healey Band
4167
+ 4166::Tommy James & The Shondells
4168
+ 4167::Little River Band
4169
+ 4168::The Marshall Tucker Band
4170
+ 4169::The Lively Ones
4171
+ 4170::Dion & The Belmonts
4172
+ 4171::The Music Machine
4173
+ 4172::Kenny Rogers
4174
+ 4173::Waylon Jennings
4175
+ 4174::The Beautiful South
4176
+ 4175::The Left Banke
4177
+ 4176::Carl Perkins
4178
+ 4177::Tennessee Ernie Ford
4179
+ 4178::Bo Diddley
4180
+ 4179::Blackfoot
4181
+ 4180::Heathen
4182
+ 4181::Forbidden
4183
+ 4182::Exumer
4184
+ 4183::Our Lady Peace
4185
+ 4184::Dishwalla
4186
+ 4185::Gama Bomb
4187
+ 4186::Dark Angel
4188
+ 4187::Joseph Arthur
4189
+ 4188::Saving Abel
4190
+ 4189::All About Eve
4191
+ 4190::The Breeders
4192
+ 4191::Letzte Instanz
4193
+ 4192::British Sea Power
4194
+ 4193::Dead Fish
4195
+ 4194::Fear Before the March of Flames
4196
+ 4195::Téléphone
4197
+ 4196::White Rose Movement
4198
+ 4197::Milla Jovovich
4199
+ 4198::Renaud
4200
+ 4199::Georges Brassens
4201
+ 4200::Alain Bashung
4202
+ 4201::Prince & The Revolution
4203
+ 4202::Prince and the New Power Generation
4204
+ 4203::Mariza
4205
+ 4204::Gal Costa
4206
+ 4205::Ry Cooder
4207
+ 4206::Francis Cabrel
4208
+ 4207::2002
4209
+ 4208::Jane Birkin
4210
+ 4209::Phil Ochs
4211
+ 4210::Mitchel Musso
4212
+ 4211::Esmée Denters
4213
+ 4212::The Ready Set
4214
+ 4213::New Boyz
4215
+ 4214::Stereolab
4216
+ 4215::Throwing Muses
4217
+ 4216::Cranes
4218
+ 4217::Deadlock
4219
+ 4218::Innerpartysystem
4220
+ 4219::川井憲次
4221
+ 4220::Empyrium
4222
+ 4221::Javier Navarrete
4223
+ 4222::Rick James
4224
+ 4223::Bobby Brown
4225
+ 4224::The Polyphonic Spree
4226
+ 4225::Chew Lips
4227
+ 4226::Soulwax
4228
+ 4227::Scarlett Johansson
4229
+ 4228::DatA
4230
+ 4229::2 Many DJ's
4231
+ 4230::Danger
4232
+ 4231::Filthy Dukes
4233
+ 4232::Danny
4234
+ 4233::Zac Efron
4235
+ 4234::Polly Scattergood
4236
+ 4235::Marié Digby
4237
+ 4236::Ben Lee
4238
+ 4237::Bethany Joy Lenz
4239
+ 4238::Tyler Hilton
4240
+ 4239::Grace Potter and the Nocturnals
4241
+ 4240::Heather Nova
4242
+ 4241::Die Antwoord
4243
+ 4242::Muchy
4244
+ 4243::Reverend and The Makers
4245
+ 4244::Wild Beasts
4246
+ 4245::Dying Fetus
4247
+ 4246::Trouble
4248
+ 4247::Summoning
4249
+ 4248::Autopsy
4250
+ 4249::Vanilla Ninja
4251
+ 4250::Moi dix Mois
4252
+ 4251::Charlie Clouser
4253
+ 4252::Azam Ali
4254
+ 4253::My Brightest Diamond
4255
+ 4254::Shearwater
4256
+ 4255::Slint
4257
+ 4256::Vas
4258
+ 4257::High on Fire
4259
+ 4258::Olivier Messiaen
4260
+ 4259::Richard Thompson
4261
+ 4260::Richard Wagner
4262
+ 4261::Count Basie
4263
+ 4262::Benny Goodman
4264
+ 4263::Shocking Blue
4265
+ 4264::Jermaine Jackson
4266
+ 4265::Frida
4267
+ 4266::Neneh Cherry
4268
+ 4267::Wendy Carlos
4269
+ 4268::Lalo Schifrin
4270
+ 4269::Doris Day
4271
+ 4270::Billy Preston
4272
+ 4271::The Waterboys
4273
+ 4272::Ian McCulloch
4274
+ 4273::Julee Cruise
4275
+ 4274::Beth Orton
4276
+ 4275::Shulman
4277
+ 4276::Ott
4278
+ 4277::坂本龍一
4279
+ 4278::LFO
4280
+ 4279::Fila Brazillia
4281
+ 4280::Entheogenic
4282
+ 4281::Alpha
4283
+ 4282::Jon Kennedy
4284
+ 4283::Murcof
4285
+ 4284::Jega
4286
+ 4285::Jan Jelinek
4287
+ 4286::Medwyn Goodall
4288
+ 4287::Keiko Matsui
4289
+ 4288::Jim Brickman
4290
+ 4289::Urban Myth Club
4291
+ 4290::Moonbootica
4292
+ 4291::Sven Väth
4293
+ 4292::Dykehouse
4294
+ 4293::Dubtribe Sound System
4295
+ 4294::Deuter
4296
+ 4295::VFSix
4297
+ 4296::Raven-Symoné
4298
+ 4297::Adiemus
4299
+ 4298::Therapy?
4300
+ 4299::Can
4301
+ 4300::Terry Reid
4302
+ 4301::Secret Service
4303
+ 4302::Ours
4304
+ 4303::Krokus
4305
+ 4304::Cowboy Junkies
4306
+ 4305::Fountains of Wayne
4307
+ 4306::Nick Cave
4308
+ 4307::Del Shannon
4309
+ 4308::Randy Crawford
4310
+ 4309::Michael Angelo Batio
4311
+ 4310::Tokyo Blade
4312
+ 4311::Bernard Herrmann
4313
+ 4312::Renan Luce
4314
+ 4313::The Shangri-Las
4315
+ 4314::Juliana Hatfield
4316
+ 4315::John Scofield
4317
+ 4316::Brad Mehldau
4318
+ 4317::Nina Hagen
4319
+ 4318::Lizzy Borden
4320
+ 4319::Hellyeah
4321
+ 4320::Blake Lewis
4322
+ 4321::Jeremy Camp
4323
+ 4322::Neon Trees
4324
+ 4323::Flight of the Conchords
4325
+ 4324::Bushido
4326
+ 4325::Anders Manga
4327
+ 4326::Crematory
4328
+ 4327::Death Angel
4329
+ 4328::Virgin Steele
4330
+ 4329::Avalanch
4331
+ 4330::Farben Lehre
4332
+ 4331::The Pillows
4333
+ 4332::Tinariwen
4334
+ 4333::David Holmes
4335
+ 4334::Halford
4336
+ 4335::Billy Corgan
4337
+ 4336::Stephen Lynch
4338
+ 4337::Mercedes Sosa
4339
+ 4338::Peter Broderick
4340
+ 4339::Cirque du Soleil
4341
+ 4340::Luis Miguel
4342
+ 4341::Elbow
4343
+ 4342::Everclear
4344
+ 4343::Do As Infinity
4345
+ 4344::Griffin House
4346
+ 4345::Qntal
4347
+ 4346::Bacilos
4348
+ 4347::Lu
4349
+ 4348::We Are The Ocean
4350
+ 4349::This Town Needs Guns
4351
+ 4350::Immanu El
4352
+ 4351::Agnes
4353
+ 4352::The Starting Line
4354
+ 4353::Salt The Wound
4355
+ 4354::Nightmare of You
4356
+ 4355::Say Anything
4357
+ 4356::Burning Skies
4358
+ 4357::Still Remains
4359
+ 4358::Matchbook Romance
4360
+ 4359::Just Surrender
4361
+ 4360::The Red Chord
4362
+ 4361::The Music
4363
+ 4362::Reggie and the Full Effect
4364
+ 4363::fun.
4365
+ 4364::The Damned Things
4366
+ 4365::As Cities Burn
4367
+ 4366::Young Galaxy
4368
+ 4367::autoKratz
4369
+ 4368::The Echelon Effect
4370
+ 4369::Decoder
4371
+ 4370::Roberta Flack
4372
+ 4371::Pedro the Lion
4373
+ 4372::Rogue Wave
4374
+ 4373::Young Knives
4375
+ 4374::There For Tomorrow
4376
+ 4375::Seabear
4377
+ 4376::The Moldy Peaches
4378
+ 4377::Liquido
4379
+ 4378::Colour Haze
4380
+ 4379::Tiger Army
4381
+ 4380::Nerina Pallot
4382
+ 4381::Capsule
4383
+ 4382::鈴木あみ
4384
+ 4383::Sienna Skies
4385
+ 4384::3
4386
+ 4385::Third Day
4387
+ 4386::El Canto del Loco
4388
+ 4387::Exilia
4389
+ 4388::Kiln
4390
+ 4389::Midlake
4391
+ 4390::Roots Manuva
4392
+ 4391::The Bouncing Souls
4393
+ 4392::Anouk
4394
+ 4393::Deaf Center
4395
+ 4394::Aidan Baker
4396
+ 4395::Jóhann Jóhannsson
4397
+ 4396::Battles
4398
+ 4397::Cut Chemist
4399
+ 4398::Gregor Samsa
4400
+ 4399::Jaga Jazzist
4401
+ 4400::The Kilimanjaro Darkjazz Ensemble
4402
+ 4401::Soldout
4403
+ 4402::Alex Smoke
4404
+ 4403::Two Lone Swordsmen
4405
+ 4404::Gabriel Ananda
4406
+ 4405::Mira Calix
4407
+ 4406::Sylvain Chauveau
4408
+ 4407::Dirty Three
4409
+ 4408::El-P
4410
+ 4409::Descendents
4411
+ 4410::Kingdom
4412
+ 4411::The Vandals
4413
+ 4412::Dance of Days
4414
+ 4413::Milow
4415
+ 4414::Lloyd
4416
+ 4415::Marques Houston
4417
+ 4416::Electric Wizard
4418
+ 4417::Propagandhi
4419
+ 4418::Lifelover
4420
+ 4419::Edge of Sanity
4421
+ 4420::dälek
4422
+ 4421::Restart
4423
+ 4422::Medicine
4424
+ 4423::Fobia
4425
+ 4424::Cool Kids of Death
4426
+ 4425::Strike
4427
+ 4426::Marjorie Estiano
4428
+ 4427::Ida Maria
4429
+ 4428::The Holloways
4430
+ 4429::oOoOO
4431
+ 4430::Modern Witch
4432
+ 4431::Bolt Action Five
4433
+ 4432::EPMD
4434
+ 4433::Discharge
4435
+ 4434::D.R.I.
4436
+ 4435::7Seconds
4437
+ 4436::Skip James
4438
+ 4437::Fear
4439
+ 4438::Adolescents
4440
+ 4439::Green River
4441
+ 4440::Cro-Mags
4442
+ 4441::Wax Tailor
4443
+ 4442::The Cooper Temple Clause
4444
+ 4443::Stina Nordenstam
4445
+ 4444::Lycia
4446
+ 4445::Audrey
4447
+ 4446::Harland
4448
+ 4447::Mýa
4449
+ 4448::Secede
4450
+ 4449::X-Marks the Pedwalk
4451
+ 4450::Solitary Experiments
4452
+ 4451::Seefeel
4453
+ 4452::T. Raumschmiere
4454
+ 4453::Klinik
4455
+ 4454::Quantic
4456
+ 4455::Northern Lite
4457
+ 4456::Audion
4458
+ 4457::Underground Resistance
4459
+ 4458::Jackson and His Computer Band
4460
+ 4459::GreenGender
4461
+ 4460::Alex Under
4462
+ 4461::Panik
4463
+ 4462::Baroness
4464
+ 4463::Bert Jansch
4465
+ 4464::Ashley Roberts
4466
+ 4465::Cordel do Fogo Encantado
4467
+ 4466::Teoman
4468
+ 4467::The Dubliners
4469
+ 4468::Darvin
4470
+ 4469::The Walkmen
4471
+ 4470::The Black Angels
4472
+ 4471::The Tea Party
4473
+ 4472::¡Forward, Russia!
4474
+ 4473::Carl Orff
4475
+ 4474::Georges Bizet
4476
+ 4475::Yasmin Levy
4477
+ 4476::Peter Brötzmann
4478
+ 4477::Mulatu Astatke
4479
+ 4478::Ali Farka Touré & Toumani Diabaté
4480
+ 4479::Erkan Oğur
4481
+ 4480::Irfan
4482
+ 4481::Joe Henderson
4483
+ 4482::Coleman Hawkins
4484
+ 4483::Wayne Shorter
4485
+ 4484::Zoë Keating
4486
+ 4485::Serkan Süleymaniye
4487
+ 4486::Mercan Dede
4488
+ 4487::Mark Owen
4489
+ 4488::Jesus Jones
4490
+ 4489::White Town
4491
+ 4490::Françoise Hardy
4492
+ 4491::Tim Urban
4493
+ 4492::Gladys Knight & The Pips
4494
+ 4493::Ben E. King
4495
+ 4494::Eve 6
4496
+ 4495::Edwin McCain
4497
+ 4496::Montell Jordan
4498
+ 4497::mewithoutYou
4499
+ 4498::Copeland
4500
+ 4499::45 Grave
4501
+ 4500::4hero
4502
+ 4501::808 State
4503
+ 4502::Feeder
4504
+ 4503::James Dean Bradfield
4505
+ 4504::Louis Prima
4506
+ 4505::Gorefest
4507
+ 4506::Big Mama Thornton
4508
+ 4507::The Hooters
4509
+ 4508::Dar Williams
4510
+ 4509::Mates of State
4511
+ 4510::Ben Frost
4512
+ 4511::Oficina G3
4513
+ 4512::NOMAK
4514
+ 4513::Swollen Members
4515
+ 4514::Jimmy Reed
4516
+ 4515::Billy Squier
4517
+ 4516::Julie London
4518
+ 4517::The Gaslight Anthem
4519
+ 4518::Nicole Atkins
4520
+ 4519::Sugarplum Fairy
4521
+ 4520::Little Man Tate
4522
+ 4521::New Radicals
4523
+ 4522::Chantal Kreviazuk
4524
+ 4523::Jackson Browne
4525
+ 4524::BBMak
4526
+ 4525::Lisa Loeb
4527
+ 4526::Wim Mertens
4528
+ 4527::Morton Feldman
4529
+ 4528::Pink Turns Blue
4530
+ 4529::Steve Bug
4531
+ 4530::Deee-Lite
4532
+ 4531::Scout Niblett
4533
+ 4532::I Monster
4534
+ 4533::Boozoo Bajou
4535
+ 4534::Maps And Diagrams
4536
+ 4535::Paris Combo
4537
+ 4536::Hildur Guðnadóttir
4538
+ 4537::Joe McElderry
4539
+ 4538::Britt Nicole
4540
+ 4539::Hillsong
4541
+ 4540::Chris & Cosey
4542
+ 4541::Telepathe
4543
+ 4542::Dima Bilan
4544
+ 4543::Luis Fonsi
4545
+ 4544::Dionne Bromfield
4546
+ 4545::Alex Ubago
4547
+ 4546::Franz Schubert
4548
+ 4547::Michael Nyman
4549
+ 4548::Nâdiya
4550
+ 4549::Fernanda Brum
4551
+ 4550::Twin Shadow
4552
+ 4551::Machine Drum
4553
+ 4552::Mest
4554
+ 4553::The Hold Steady
4555
+ 4554::Cannonball Adderley
4556
+ 4555::Platinum Blonde
4557
+ 4556::Ned's Atomic Dustbin
4558
+ 4557::Moist
4559
+ 4558::久石譲
4560
+ 4559::Alpinestars
4561
+ 4560::Jonathan Coulton
4562
+ 4561::Carter the Unstoppable Sex Machine
4563
+ 4562::Martha and the Muffins
4564
+ 4563::Kenickie
4565
+ 4564::Magnet
4566
+ 4565::Giorgio Moroder
4567
+ 4566::Catatonia
4568
+ 4567::Pete Townshend
4569
+ 4568::Charlotte Martin
4570
+ 4569::Brooke Fraser
4571
+ 4570::György Ligeti
4572
+ 4571::Zbigniew Preisner
4573
+ 4572::Belleruche
4574
+ 4573::Molotov
4575
+ 4574::Jawbreaker
4576
+ 4575::Gomez
4577
+ 4576::Youssou N'Dour
4578
+ 4577::Nileppez
4579
+ 4578::Keith Moon
4580
+ 4579::Lulu
4581
+ 4580::Joan as Police Woman
4582
+ 4581::Dezerter
4583
+ 4582::O. Children
4584
+ 4583::Gevende
4585
+ 4584::Bülent Ortaçgil
4586
+ 4585::Feridun Düzağaç
4587
+ 4586::Be Your Own Pet
4588
+ 4587::A.C. Newman
4589
+ 4588::Zooey Deschanel
4590
+ 4589::Shantel
4591
+ 4590::Global Deejays
4592
+ 4591::Wonder Girls
4593
+ 4592::Brazilian Girls
4594
+ 4593::Drexciya
4595
+ 4594::Coeur de Pirate
4596
+ 4595::SoKo
4597
+ 4596::Nine Black Alps
4598
+ 4597::The Sunshine Underground
4599
+ 4598::Gridlock
4600
+ 4599::Sparta
4601
+ 4600::Elefant
4602
+ 4601::Fantasia
4603
+ 4602::Fikret Kızılok
4604
+ 4603::Astrobrite
4605
+ 4604::Darren Criss
4606
+ 4605::Abney Park
data/ref/lastfm/train_data.df ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7db71a60d3da4c449091fbc254efc9602f81eec6eb9a60534811ccd61974dbc1
3
+ size 2462846
data/ref/movielens/Test_data.df ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d38308a0078d4518dc9eeb619c8d1afb53d830712cc7d36ac51cc47eaafc97d6
3
+ size 43718
data/ref/movielens/Val_data.df ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a950900d0055649e2983ef403d189d5fdc5bd4f84d229a377806d371b32e460
3
+ size 43200
data/ref/movielens/train_data.df ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bac4c88b11e6f76d914c2b22aadbb97315a78432bb79f424f7015775053b9546
3
+ size 9633075
data/ref/movielens/u.item ADDED
The diff for this file is too large to render. See raw diff
 
data/ref/steam/Test_data.df ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2766b9faf90e3531a35207c01fbef2f655af97c21a6ba7d18581bbef2bc6ad59
3
+ size 59142
data/ref/steam/id2name.txt ADDED
@@ -0,0 +1,3581 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 0::Ironbound
2
+ 1::Real Pool 3D - Poolians
3
+ 2::Half-Life
4
+ 3::The Ship: Murder Party
5
+ 4::DEFCON
6
+ 5::The Ship: Single Player
7
+ 6::Garry's Mod
8
+ 7::Gumboy - Crazy Adventures™
9
+ 8::Vegas: Make It Big™
10
+ 9::Civilization IV®: Warlords
11
+ 10::X-COM: Terror From the Deep
12
+ 11::Tomb Raider: Anniversary
13
+ 12::Final DOOM
14
+ 13::Master Levels for Doom II
15
+ 14::DOOM II
16
+ 15::Wolfenstein 3D
17
+ 16::QUAKE Mission Pack 1: Scourge of Armagon
18
+ 17::QUAKE Mission Pack 2: Dissolution of Eternity
19
+ 18::Genesis Rising
20
+ 19::Warhammer® 40,000: Dawn of War® - Dark Crusade
21
+ 20::Command &amp; Conquer: Red Alert 3
22
+ 21::Prison Tycoon 3™: Lockdown
23
+ 22::18 Wheels of Steel: American Long Haul
24
+ 23::Tycoon City: New York
25
+ 24::Death to Spies
26
+ 25::Unreal 2: The Awakening
27
+ 26::Silent Hunter® III
28
+ 27::Luxor 3
29
+ 28::Sprint Cars Road to Knoxville
30
+ 29::Ride! Carnival Tycoon
31
+ 30::Eternity's Child
32
+ 31::X-COM: Enforcer
33
+ 32::X-COM: Apocalypse
34
+ 33::S.T.A.L.K.E.R.: Clear Sky
35
+ 34::The Witcher: Enhanced Edition Director's Cut
36
+ 35::Multiwinia
37
+ 36::Sid Meier's Civilization IV: Colonization
38
+ 37::Brothers in Arms: Hell's Highway™
39
+ 38::Xpand Rally Xtreme
40
+ 39::Heroes of Might &amp; Magic V: Hammers of Fate
41
+ 40::Brothers in Arms: Earned in Blood™
42
+ 41::The Settlers®: Rise Of An Empire Gold Edition
43
+ 42::Bully: Scholarship Edition
44
+ 43::Saints Row 2
45
+ 44::Rise of the Argonauts
46
+ 45::Knights of Honor
47
+ 46::Unreal Tournament 3 Black
48
+ 47::The Graveyard
49
+ 48::Burnout Paradise: The Ultimate Box
50
+ 49::The Maw
51
+ 50::Puzzle Kingdoms
52
+ 51::X-Blades
53
+ 52::Zeno Clash
54
+ 53::King's Bounty: The Legend
55
+ 54::Death Track®: Resurrection
56
+ 55::Freedom Force
57
+ 56::Darkest Hour: Europe '44-'45
58
+ 57::Prototype™
59
+ 58::The Elder Scrolls III: Morrowind® Game of the Year Edition
60
+ 59::Armored Fist 3
61
+ 60::BRAINPIPE: A Plunge to Unhumanity
62
+ 61::Hunting Unlimited 2010
63
+ 62::Tales of Monkey Island Complete Pack
64
+ 63::Armed and Dangerous®
65
+ 64::The Secret of Monkey Island: Special Edition
66
+ 65::TimeShift™
67
+ 66::Bionic Commando
68
+ 67::Osmos
69
+ 68::Fallout 2: A Post Nuclear Role Playing Game
70
+ 69::Crash Time 2
71
+ 70::World of Zoo
72
+ 71::Windosill
73
+ 72::Machinarium
74
+ 73::Need for Speed: Shift
75
+ 74::Command &amp; Conquer: Red Alert 3 - Uprising
76
+ 75::Painkiller: Resurrection
77
+ 76::SpellForce 2 - Anniversary Edition
78
+ 77::Rush for Berlin Gold
79
+ 78::Hammerfight
80
+ 79::SpellForce - Platinum Edition
81
+ 80::Secret Files 2: Puritas Cordis
82
+ 81::Dragon Age: Origins
83
+ 82::Aztaka
84
+ 83::LEGO® Star Wars™ - The Complete Saga
85
+ 84::Command &amp; Conquer 3: Kane's Wrath
86
+ 85::Wings of Prey
87
+ 86::Fallout 3: Game of the Year Edition
88
+ 87::Roogoo
89
+ 88::Pirates, Vikings, and Knights II
90
+ 89::Super Laser Racer
91
+ 90::Mount &amp; Blade: Warband
92
+ 91::M.U.D. TV
93
+ 92::The Misadventures of P.B. Winterbottom
94
+ 93::Iron Grip: Warlord
95
+ 94::Flight of the Icarus
96
+ 95::Ziro
97
+ 96::SEGA Mega Drive and Genesis Classics
98
+ 97::Alpha Protocol™
99
+ 98::Kane &amp; Lynch 2: Dog Days
100
+ 99::Puzzle Quest 2
101
+ 100::Burn Zombie Burn!
102
+ 101::Victoria I Complete
103
+ 102::Arma 2: British Armed Forces
104
+ 103::Chime
105
+ 104::Swarm Arena
106
+ 105::Serious Sam Classic: The Second Encounter
107
+ 106::X: Tension
108
+ 107::Governor of Poker 2 - Premium Edition
109
+ 108::Grotesque Tactics: Evil Heroes
110
+ 109::Doc Clock: The Toasted Sandwich of Time
111
+ 110::Puzzle Bots
112
+ 111::Nimbus
113
+ 112::Flight Control HD
114
+ 113::Pound of Ground
115
+ 114::Pat &amp; Mat
116
+ 115::Alternativa
117
+ 116::Call of Duty®: Black Ops
118
+ 117::Arcadia
119
+ 118::Oddworld: Stranger's Wrath HD
120
+ 119::Grand Theft Auto III
121
+ 120::A.R.E.S.: Extinction Agenda
122
+ 121::Dead Space™ 2
123
+ 122::The Sims™ 3
124
+ 123::YOU DON’T KNOW JACK®
125
+ 124::DUNGEONS - Steam Special Edition
126
+ 125::BIT.TRIP RUNNER
127
+ 126::Dinner Date
128
+ 127::Majesty 2 Collection
129
+ 128::A.V.A. Alliance of Valiant Arms™
130
+ 129::Spiral Knights
131
+ 130::Solar 2
132
+ 131::F.E.A.R. 3
133
+ 132::Dungeons and Dragons: Daggerdale
134
+ 133::Nancy Drew®: Secrets Can Kill REMASTERED
135
+ 134::Nancy Drew®: The Captive Curse
136
+ 135::EDGE
137
+ 136::Puzzle Pirates
138
+ 137::Warhammer 40,000: Space Marine
139
+ 138::Men of War: Vietnam
140
+ 139::Wasteland Angel
141
+ 140::Nancy Drew®: Shadow at the Water's Edge
142
+ 141::Bunch of Heroes
143
+ 142::The Binding of Isaac
144
+ 143::Off-Road Drive
145
+ 144::Dead Rising 2: Off the Record
146
+ 145::Memoir '44 Online
147
+ 146::Serious Sam: The Random Encounter
148
+ 147::Puzzler World 2
149
+ 148::Airline Tycoon 2
150
+ 149::The Haunted: Hells Reach
151
+ 150::DC Universe™ Online
152
+ 151::Saints Row: The Third Initiation Station
153
+ 152::Fractal: Make Blooms Not War
154
+ 153::Galactic Civilizations® II: Ultimate Edition
155
+ 154::Earth Defense Force: Insect Armageddon
156
+ 155::X3: Albion Prelude
157
+ 156::Trine 2: Complete Story
158
+ 157::Oil Rush
159
+ 158::King Arthur II: The Role-Playing Wargame
160
+ 159::Fortune Summoners
161
+ 160::Star Trek Online
162
+ 161::Jagged Alliance - Back in Action
163
+ 162::Titan Attacks!
164
+ 163::Alan Wake
165
+ 164::Realm of the Mad God
166
+ 165::Painkiller: Recurring Evil
167
+ 166::Stacking
168
+ 167::Wizorb
169
+ 168::Myst V
170
+ 169::Yesterday
171
+ 170::Midnight Mysteries 4: Haunted Houdini
172
+ 171::Ridge Racer™ Unbounded
173
+ 172::Imagine Earth
174
+ 173::Scary Girl
175
+ 174::Binary Domain
176
+ 175::Analogue: A Hate Story
177
+ 176::Toy Soldiers
178
+ 177::Resident Evil: Operation Raccoon City
179
+ 178::Ticket to Ride
180
+ 179::The Lord of the Rings Online™
181
+ 180::Indie Game: The Movie
182
+ 181::Krater
183
+ 182::The Dark Eye: Chains of Satinav
184
+ 183::Tom Clancy's Ghost Recon: Future Soldier™
185
+ 184::Quantum Conundrum
186
+ 185::Tribes: Ascend
187
+ 186::Source Filmmaker
188
+ 187::eXceed 2nd - Vampire REX
189
+ 188::eXceed 3rd - Jade Penetrate Black Package
190
+ 189::Death Rally
191
+ 190::Hero Academy
192
+ 191::Iron Brigade
193
+ 192::Galaxy on Fire 2™ Full HD
194
+ 193::Sleeping Dogs
195
+ 194::Din's Curse
196
+ 195::Batman: Arkham City - Game of the Year Edition
197
+ 196::To the Moon
198
+ 197::Intrusion 2
199
+ 198::Carrier Command: Gaea Mission
200
+ 199::3D-Coat V4.8
201
+ 200::Cortex Command
202
+ 201::Rocketbirds: Hardboiled Chicken
203
+ 202::Doom 3: BFG Edition
204
+ 203::A Game of Dwarves
205
+ 204::Hotline Miami
206
+ 205::Guns of Icarus Online
207
+ 206::POSTAL 2
208
+ 207::F1 RACE STARS™
209
+ 208::Sine Mora
210
+ 209::Stealth Bastard Deluxe
211
+ 210::The Book of Unwritten Tales: The Critter Chronicles
212
+ 211::Mabinogi
213
+ 212::Triple Town
214
+ 213::Legacy of Kain: Defiance
215
+ 214::Iron Sky: Invasion
216
+ 215::Angelica Weaver: Catch Me When You Can
217
+ 216::Fieldrunners 2
218
+ 217::Ace Combat Assault Horizon - Enhanced Edition
219
+ 218::Retrovirus
220
+ 219::Cities XL Platinum
221
+ 220::Super House of Dead Ninjas
222
+ 221::Highborn
223
+ 222::Tomb Raider - The Final Hours Digital Book
224
+ 223::Monster Loves You!
225
+ 224::Alien Spidy
226
+ 225::POSTAL
227
+ 226::Cut the Rope
228
+ 227::Anodyne
229
+ 228::Another World – 20th Anniversary Edition
230
+ 229::Sang-Froid - Tales of Werewolves
231
+ 230::Cubemen 2
232
+ 231::Surgeon Simulator
233
+ 232::Poker Night 2
234
+ 233::StarDrive
235
+ 234::Sanctum 2
236
+ 235::Reus
237
+ 236::Resident Evil Revelations / Biohazard Revelations
238
+ 237::Remember Me
239
+ 238::Penny Arcade's On the Rain-Slick Precipice of Darkness 4
240
+ 239::Mortal Kombat Komplete Edition
241
+ 240::Soldier Front 2
242
+ 241::TrackMania² Valley
243
+ 242::FINAL FANTASY VII
244
+ 243::BIT.TRIP FATE
245
+ 244::Ittle Dew
246
+ 245::CastleStorm
247
+ 246::Skulls of the Shogun
248
+ 247::Rise of the Triad
249
+ 248::Cloudberry Kingdom™
250
+ 249::Inquisitor
251
+ 250::Worms Clan Wars
252
+ 251::Electronic Super Joy
253
+ 252::PixelJunk™ Monsters Ultimate
254
+ 253::Lilly Looking Through
255
+ 254::America's Army: Proving Grounds
256
+ 255::Game Dev Tycoon
257
+ 256::Delver
258
+ 257::MirrorMoon EP
259
+ 258::Castle of Illusion
260
+ 259::Ironclad Tactics
261
+ 260::Foul Play
262
+ 261::Scribblenauts Unmasked: A DC Comics Adventure
263
+ 262::Farming Simulator 2013 Titanium Edition
264
+ 263::SNOW
265
+ 264::Soundodger+
266
+ 265::Iesabel
267
+ 266::Knights and Merchants
268
+ 267::Eleusis
269
+ 268::Bridge Constructor
270
+ 269::Bad Hotel
271
+ 270::140
272
+ 271::Anachronox
273
+ 272::Knytt Underground
274
+ 273::Deus Ex: Human Revolution - Director's Cut
275
+ 274::Contagion
276
+ 275::Blood of the Werewolf
277
+ 276::Master Reboot
278
+ 277::Alone in the Dark
279
+ 278::Deadly Premonition: The Director's Cut
280
+ 279::State of Decay
281
+ 280::Long Live The Queen
282
+ 281::Rain Blood Chronicles: Mirage
283
+ 282::Vox
284
+ 283::Kingdoms Rise
285
+ 284::Ring Runner: Flight of the Sages
286
+ 285::9.03m
287
+ 286::The Mysterious Cities of Gold
288
+ 287::The Last Express Gold Edition
289
+ 288::Finding Teddy
290
+ 289::Dark Fall 2: Lights Out
291
+ 290::Dark Fall: The Journal
292
+ 291::LogoMaker 4
293
+ 292::Broken Sword 5 - the Serpent's Curse
294
+ 293::Darkout
295
+ 294::Dominions 4: Thrones of Ascension
296
+ 295::Wooden Sen'SeY
297
+ 296::Shufflepuck Cantina Deluxe
298
+ 297::King’s Bounty: Legions
299
+ 298::FATE
300
+ 299::RaySupreme 3D
301
+ 300::Cold War
302
+ 301::CONSORTIUM
303
+ 302::Nidhogg
304
+ 303::Galcon Legends
305
+ 304::Humans Must Answer
306
+ 305::Estranged: Act I
307
+ 306::Paper Sorcerer
308
+ 307::Hero Siege
309
+ 308::The Castle Doctrine
310
+ 309::Saturday Morning RPG
311
+ 310::Serena
312
+ 311::Octodad: Dadliest Catch
313
+ 312::Aqua Kitty - Milk Mine Defender
314
+ 313::Sentinel 3: Homeworld
315
+ 314::Aces Wild: Manic Brawling Action!
316
+ 315::GTGD S1: More Than a Gamer
317
+ 316::BlazBlue: Calamity Trigger
318
+ 317::Disciples III: Reincarnation
319
+ 318::The Plan
320
+ 319::LocoCycle
321
+ 320::Girls Like Robots
322
+ 321::METAL SLUG 3
323
+ 322::Card City Nights
324
+ 323::Ikaruga
325
+ 324::Hexcells
326
+ 325::Epic Battle Fantasy 4
327
+ 326::Assassin's Creed Freedom Cry
328
+ 327::Castlevania: Lords of Shadow 2
329
+ 328::Towtruck Simulator 2015
330
+ 329::Dominique Pamplemousse
331
+ 330::1954 Alcatraz
332
+ 331::Constant C
333
+ 332::Deus Ex: The Fall
334
+ 333::LUFTRAUSERS
335
+ 334::Danmaku Unlimited 2
336
+ 335::Heroine's Quest: The Herald of Ragnarok
337
+ 336::Quest of Dungeons
338
+ 337::Black Mirror II
339
+ 338::Abyss: The Wraiths of Eden
340
+ 339::The Tomorrow War
341
+ 340::El Matador
342
+ 341::Marauder
343
+ 342::Harvester
344
+ 343::Evolution RTS
345
+ 344::House of 1,000 Doors: Family Secrets Collector's Edition
346
+ 345::Volt
347
+ 346::Secrets of Rætikon
348
+ 347::Putt-Putt® Joins the Parade
349
+ 348::Heldric - The legend of the shoemaker
350
+ 349::Abalone
351
+ 350::UFO: Aftermath
352
+ 351::RC Cars
353
+ 352::Ascension to the Throne
354
+ 353::FarSky
355
+ 354::Depths of Fear :: Knossos
356
+ 355::Nostradamus: The Last Prophecy
357
+ 356::Vangers
358
+ 357::Street Racing Syndicate
359
+ 358::Clockwork Tales: Of Glass and Ink
360
+ 359::Rogue Shooter: The FPS Roguelike
361
+ 360::Brigade E5: New Jagged Union
362
+ 361::7,62 High Calibre
363
+ 362::Black Rainbow
364
+ 363::BloodRayne Betrayal
365
+ 364::Sweezy Gunner
366
+ 365::Indie Graphics Bundle - Royalty Free Sprites
367
+ 366::The Whispered World Special Edition
368
+ 367::Descent: FreeSpace – The Great War
369
+ 368::Coldfire Keep
370
+ 369::Fantasy Grounds
371
+ 370::Chronology
372
+ 371::The Last Tinker™: City of Colors
373
+ 372::Wildlife Park 3
374
+ 373::Rhiannon: Curse of the Four Branches
375
+ 374::Fearless Fantasy
376
+ 375::Orc Attack: Flatulent Rebellion
377
+ 376::Spy Fox 3 "Operation Ozone"
378
+ 377::Pajama Sam 3: You Are What You Eat From Your Head To Your Feet
379
+ 378::Rage Runner
380
+ 379::Pretentious Game
381
+ 380::Legionwood 2: Rise of the Eternal's Realm - Director's Cut
382
+ 381::DRAKERZ-Confrontation
383
+ 382::Paradigm Shift
384
+ 383::Retro Game Crunch
385
+ 384::I Am Vegend - Zombiegeddon
386
+ 385::A.I.M.2 Clan Wars
387
+ 386::Numba Deluxe
388
+ 387::CAPSULE
389
+ 388::Defense Zone 2
390
+ 389::GearCity
391
+ 390::The Fall
392
+ 391::The Campaign Series: Fall Weiss
393
+ 392::Dysan the Shapeshifter
394
+ 393::Enigmatis 2: The Mists of Ravenwood
395
+ 394::Richard &amp; Alice
396
+ 395::Lifeless Planet Premier Edition
397
+ 396::Magicite
398
+ 397::Castle: Never Judge a Book by its Cover
399
+ 398::Echo of the Wilds
400
+ 399::Reversion - The Escape (1st Chapter)
401
+ 400::Enemy Front
402
+ 401::Faery - Legends of Avalon
403
+ 402::A Wizard's Lizard
404
+ 403::Bot Colony
405
+ 404::Stick RPG 2: Director's Cut
406
+ 405::Heroes Rise: The Hero Project
407
+ 406::Beyond Space Remastered Edition
408
+ 407::MotoGP™14
409
+ 408::Spoiler Alert
410
+ 409::Monsters Ate My Birthday Cake
411
+ 410::Claire
412
+ 411::Deponia: The Complete Journey
413
+ 412::House of 1000 Doors: The Palm of Zoroaster Collector's Edition
414
+ 413::Namariel Legends: Iron Lord Premium Edition
415
+ 414::Evil Pumpkin: The Lost Halloween
416
+ 415::Rooms: The Main Building
417
+ 416::Necronomicon: The Dawning of Darkness
418
+ 417::Pivvot
419
+ 418::Steel &amp; Steam: Episode 1
420
+ 419::Scooby Doo! &amp; Looney Tunes Cartoon Universe: Adventure
421
+ 420::Wildlife Park 2
422
+ 421::Wildlife Park 2 - Fantasy
423
+ 422::Wildlife Park 2 - Dino World
424
+ 423::Wildlife Park 2 - Horses
425
+ 424::Pixel Hunter
426
+ 425::Detective Case and Clown Bot in: Murder in the Hotel Lisbon
427
+ 426::Pressured
428
+ 427::Toxic Bunny HD
429
+ 428::Ghostship Aftermath
430
+ 429::Lantern Forge
431
+ 430::Sokobond
432
+ 431::Data Hacker: Initiation
433
+ 432::My Ex-Boyfriend the Space Tyrant
434
+ 433::Dungeon Defenders Eternity
435
+ 434::Third Eye Crime
436
+ 435::Machines At War 3
437
+ 436::Vertical Drop Heroes HD
438
+ 437::Crazy Plant Shop
439
+ 438::The Tower
440
+ 439::Road Not Taken
441
+ 440::War on Folvos
442
+ 441::Wyv and Keep: The Temple of the Lost Idol
443
+ 442::Another Perspective
444
+ 443::Mega Coin Squad
445
+ 444::Heavy Fire: Afghanistan
446
+ 445::Pixeluvo
447
+ 446::Platypus II
448
+ 447::Advanced Tactics Gold
449
+ 448::Platypus
450
+ 449::Nux
451
+ 450::Heroes &amp; Legends: Conquerors of Kolhar
452
+ 451::Shadowgate (2014)
453
+ 452::Curse: The Eye of Isis
454
+ 453::Deep Under the Sky
455
+ 454::Amerzone: The Explorer’s Legacy
456
+ 455::The Journey Down: Chapter Two
457
+ 456::How to Survive
458
+ 457::One Day For Ched
459
+ 458::The Flying Dutchman
460
+ 459::Fable Anniversary
461
+ 460::Jacob Jones and the Bigfoot Mystery : Episode 1
462
+ 461::The Collider
463
+ 462::Color Symphony
464
+ 463::My Lands: Black Gem Hunting
465
+ 464::WAKFU
466
+ 465::Pro Rugby Manager 2015
467
+ 466::Enola
468
+ 467::Wasteland 2: Director's Cut
469
+ 468::Cho Dengeki Stryker All Ages Version
470
+ 469::Magicmaker
471
+ 470::Middle-earth™: Shadow of Mordor™
472
+ 471::Front Page Sports Football
473
+ 472::Left in the Dark: No One on Board
474
+ 473::METAL SLUG X
475
+ 474::Moto Racer Collection
476
+ 475::Disney Princess: My Fairytale Adventure
477
+ 476::Metal Dead
478
+ 477::Styx: Master of Shadows
479
+ 478::Crow
480
+ 479::Costume Quest 2
481
+ 480::Tron 2.0
482
+ 481::Sigils of Elohim
483
+ 482::The Evil Within
484
+ 483::Enigmatis: The Ghosts of Maple Creek
485
+ 484::The Treasures of Montezuma 3
486
+ 485::Time Rifters
487
+ 486::Screencheat
488
+ 487::Vampires: Guide Them to Safety!
489
+ 488::Sign Motion
490
+ 489::Sproggiwood
491
+ 490::Enforcer: Police Crime Action
492
+ 491::REVOLVER360 RE:ACTOR
493
+ 492::The Stalin Subway: Red Veil
494
+ 493::Daedalus - No Escape
495
+ 494::The Moon Sliver
496
+ 495::Miscreated
497
+ 496::The Interactive Adventures of Dog Mendonça &amp; Pizzaboy®
498
+ 497::Alea Jacta Est
499
+ 498::The Binding of Isaac: Rebirth
500
+ 499::Broken Sword 4 - the Angel of Death
501
+ 500::Command HQ
502
+ 501::Eradicator
503
+ 502::Winged Sakura: Mindy's Arc
504
+ 503::Pitiri 1977
505
+ 504::Gunspell - Steam Edition
506
+ 505::Gold Rush! Anniversary
507
+ 506::Deadlings: Rotten Edition
508
+ 507::Toybox Turbos
509
+ 508::Amphora
510
+ 509::Randal's Monday
511
+ 510::Ski-World Simulator
512
+ 511::The Blue Flamingo
513
+ 512::Adventure Time: The Secret Of The Nameless Kingdom
514
+ 513::WARMACHINE: Tactics
515
+ 514::Letter Quest: Grimm's Journey
516
+ 515::Death Skid Marks
517
+ 516::AppGameKit: Easy Game Development
518
+ 517::Luna: Shattered Hearts: Episode 1
519
+ 518::Brink of Consciousness: The Lonely Hearts Murders
520
+ 519::Craft The World
521
+ 520::If My Heart Had Wings
522
+ 521::Shroud of the Avatar: Forsaken Virtues
523
+ 522::Haunted House: Cryptic Graves
524
+ 523::Tesla Breaks the World!
525
+ 524::Joe Dever's Lone Wolf HD Remastered
526
+ 525::Mining Industry Simulator
527
+ 526::Galcon 2: Galactic Conquest
528
+ 527::iO
529
+ 528::Bet On Soldier
530
+ 529::Elegy for a Dead World
531
+ 530::JUJU
532
+ 531::FINAL FANTASY® XIII-2
533
+ 532::SunAge: Battle for Elysium
534
+ 533::Town of Salem
535
+ 534::Dead Effect
536
+ 535::BLOCKADE 3D
537
+ 536::Shadows of War
538
+ 537::Mechs &amp; Mercs: Black Talons
539
+ 538::Squirreltopia
540
+ 539::Disorder
541
+ 540::TRISTOY
542
+ 541::Crystal Catacombs
543
+ 542::Witch's Pranks: Frog's Fortune Collector's Edition
544
+ 543::HuniePop
545
+ 544::Cat Goes Fishing
546
+ 545::Resident Evil / biohazard HD REMASTER
547
+ 546::PARTICLE MACE
548
+ 547::Among Ripples
549
+ 548::Stranded Deep
550
+ 549::Guild Commander
551
+ 550::Venetica - Gold Edition
552
+ 551::Tulpa
553
+ 552::The Lady
554
+ 553::Overture
555
+ 554::Terra Incognita ~ Chapter One: The Descendant
556
+ 555::Astray
557
+ 556::Forsaken Isle
558
+ 557::Barter Empire
559
+ 558::Pixel Heroes: Byte &amp; Magic
560
+ 559::Wickland
561
+ 560::Plazma Being
562
+ 561::Samudai
563
+ 562::Dark Forester
564
+ 563::Spirit Run - Fire vs. Ice
565
+ 564::Damned Nation Reborn
566
+ 565::Barbarian Brawl
567
+ 566::On A Roll 3D
568
+ 567::Hotline Miami 2: Wrong Number Digital Comic
569
+ 568::Odysseus: Long Way Home
570
+ 569::Night Shift
571
+ 570::Over 9000 Zombies!
572
+ 571::Oddworld: New 'n' Tasty
573
+ 572::DRAGON BALL XENOVERSE
574
+ 573::DYNASTY WARRIORS® 8 Empires
575
+ 574::White Night
576
+ 575::Tallowmere
577
+ 576::Damnation City of Death
578
+ 577::Mighty Dungeons
579
+ 578::Marble Age
580
+ 579::ARMED SEVEN
581
+ 580::Shelter 2
582
+ 581::Drizzlepath
583
+ 582::Automation - The Car Company Tycoon Game
584
+ 583::Sid Meier's Starships
585
+ 584::Spirit of War
586
+ 585::Bermuda
587
+ 586::Oceanhorn: Monster of Uncharted Seas
588
+ 587::Woolfe - The Red Hood Diaries
589
+ 588::Trapped Dead: Lockdown
590
+ 589::Incognito
591
+ 590::Factions: Origins of Malu
592
+ 591::12 Labours of Hercules
593
+ 592::Walkover
594
+ 593::Exowar
595
+ 594::Dungeon Highway
596
+ 595::Hospital Manager
597
+ 596::Red Lake
598
+ 597::Spirits of Xanadu
599
+ 598::Claws &amp; Feathers
600
+ 599::Grim Legends 2: Song of the Dark Swan
601
+ 600::An Octave Higher
602
+ 601::Pixel Puzzles 2: Anime
603
+ 602::RIDE
604
+ 603::Jones On Fire
605
+ 604::Deadly Sin
606
+ 605::Half-Life 2: Update
607
+ 606::A Pixel Story
608
+ 607::Attack of the Labyrinth +
609
+ 608::AdVenture Capitalist
610
+ 609::Bloodsports.TV
611
+ 610::Hare In The Hat
612
+ 611::The Defenders: The Second Wave
613
+ 612::gravilon
614
+ 613::Grass Simulator
615
+ 614::Astronaut Simulator
616
+ 615::Adventures of Bertram Fiddle: Episode 1: A Dreadly Business
617
+ 616::Copa Petrobras de Marcas
618
+ 617::Defend Your Life: TD
619
+ 618::Dead Synchronicity: Tomorrow Comes Today
620
+ 619::Hero Generations
621
+ 620::Love And Order
622
+ 621::The Undying Plague
623
+ 622::Westerado: Double Barreled
624
+ 623::Telepath Tactics
625
+ 624::House of Caravan
626
+ 625::Highlands
627
+ 626::Airport Simulator 2015
628
+ 627::Parcel
629
+ 628::Environmental Station Alpha
630
+ 629::Blue Rose
631
+ 630::Crest - an indirect god sim
632
+ 631::Mind Snares: Alice's Journey
633
+ 632::Crypt of the NecroDancer
634
+ 633::Crypt of the NecroDancer Extended Soundtrack
635
+ 634::Hypt
636
+ 635::The Lost Battalion: All Out Warfare
637
+ 636::Breakout Invaders
638
+ 637::RPG Maker 2003
639
+ 638::Ys VI: The Ark of Napishtim
640
+ 639::The Music Machine
641
+ 640::Action Henk
642
+ 641::Treeker: The Lost Glasses
643
+ 642::Galactic Civilizations III
644
+ 643::Trainz: A New Era
645
+ 644::Luna's Wandering Stars
646
+ 645::Age of Castles: Warlords
647
+ 646::Dustbowl
648
+ 647::NEON STRUCT
649
+ 648::Spaceman Sparkles 2
650
+ 649::Porcunipine
651
+ 650::Teddy Terror
652
+ 651::OBEY
653
+ 652::True Bliss
654
+ 653::The Incredible Adventures of Van Helsing III
655
+ 654::Wyrmsun
656
+ 655::Teddy Floppy Ear - The Race
657
+ 656::A Bastard's Tale
658
+ 657::AKIBA'S TRIP: Undead & Undressed
659
+ 658::Homesick
660
+ 659::Sleep Attack
661
+ 660::Blender Game Asset Creation
662
+ 661::BLADESTORM: Nightmare
663
+ 662::Alter World
664
+ 663::Waste Walkers
665
+ 664::The Silent Age
666
+ 665::Tiamat X
667
+ 666::stratO
668
+ 667::MASSIVE CHALICE
669
+ 668::Sirius Online
670
+ 669::Soccer Rage
671
+ 670::ZombieRun
672
+ 671::You Must Build A Boat
673
+ 672::Duck Game
674
+ 673::Chicken Invaders 3
675
+ 674::Alone in the Dark: Illumination™
676
+ 675::Super Star Path
677
+ 676::Koala Kids
678
+ 677::Nomad
679
+ 678::CDF Ghostship
680
+ 679::Age of Fear: The Undead King
681
+ 680::Lost Lands: Dark Overlord
682
+ 681::Action Alien
683
+ 682::Spy Bugs
684
+ 683::Scarab Tales
685
+ 684::Prismatica
686
+ 685::Farm Frenzy: Hurricane Season
687
+ 686::The Amber Throne
688
+ 687::oO
689
+ 688::Lilly and Sasha: Guardian Angels
690
+ 689::FaceRig
691
+ 690::Once Bitten, Twice Dead!
692
+ 691::Hello Kitty and Sanrio Friends Racing
693
+ 692::City of Fools
694
+ 693::Trap Them
695
+ 694::Girlfriend Rescue
696
+ 695::Chime Sharp
697
+ 696::RC Mini Racers
698
+ 697::Squarelands
699
+ 698::CroNix
700
+ 699::Super Dungeon Run
701
+ 700::Cosmic Rocket Defender
702
+ 701::Lethal RPG: War
703
+ 702::Aberoth
704
+ 703::Super Hipster Lumberjack
705
+ 704::"Glow Ball" - The billiard puzzle game
706
+ 705::Dungeon League
707
+ 706::Microcosmum: survival of cells
708
+ 707::Project Druid - 2D Labyrinth Explorer-
709
+ 708::Siege Wars
710
+ 709::Hydraulic Empire
711
+ 710::MANOS
712
+ 711::Gunjitsu
713
+ 712::Dream Chamber
714
+ 713::SAGA
715
+ 714::Empyrion - Galactic Survival
716
+ 715::Celestian Tales: Old North
717
+ 716::Royal Bounty HD
718
+ 717::Wanderlust Adventures
719
+ 718::Blaster Shooter GunGuy!
720
+ 719::Gravity Error
721
+ 720::Skyrim Script Extender (SKSE)
722
+ 721::Hacknet
723
+ 722::Angry Arrows
724
+ 723::Silver Creek Falls: Chapter 1
725
+ 724::C-Wars
726
+ 725::Three Digits
727
+ 726::Curses 'N Chaos
728
+ 727::Planetary Annihilation: TITANS
729
+ 728::Big Thinkers Kindergarten
730
+ 729::Reign of Bullets
731
+ 730::The Settlers Online
732
+ 731::Race Track Builder
733
+ 732::Pure Hold'em
734
+ 733::Afterlife Empire
735
+ 734::Inevitability
736
+ 735::Teeworlds
737
+ 736::Planet of the Eyes
738
+ 737::ANKI
739
+ 738::Sound Shift
740
+ 739::Kult: Heretic Kingdoms
741
+ 740::Giana Sisters: Dream Runners
742
+ 741::RFLEX
743
+ 742::Fran Bow
744
+ 743::Onikira - Demon Killer
745
+ 744::The Black Watchmen
746
+ 745::Dead In Bermuda
747
+ 746::Playing History 2 - Slave Trade
748
+ 747::Calvino Noir
749
+ 748::One Piece Pirate Warriors 3
750
+ 749::Exile's End
751
+ 750::Mad Max: Fury Road
752
+ 751::Painters Guild
753
+ 752::NOBUNAGA'S AMBITION: Sphere of Influence
754
+ 753::iZBOT
755
+ 754::Gridberd
756
+ 755::Master Spy
757
+ 756::MadOut Ice Storm
758
+ 757::EasyAntiCheat eSports
759
+ 758::Ravenmark: Scourge of Estellion
760
+ 759::Queen's Quest: Tower of Darkness
761
+ 760::Superstatic
762
+ 761::Aerannis
763
+ 762::Arcane Sorcery
764
+ 763::The Archetype
765
+ 764::Three Heroes
766
+ 765::Battle of the Bulge
767
+ 766::Train Simulator
768
+ 767::Star Command Galaxies
769
+ 768::Splendor
770
+ 769::TY the Tasmanian Tiger 4
771
+ 770::Metal Reaper Online
772
+ 771::CodeSpells
773
+ 772::Airport Madness: Time Machine
774
+ 773::Forsaken Fortress Strategy
775
+ 774::Templar Battleforce
776
+ 775::Doodle God
777
+ 776::Lost Lands: A Hidden Object Adventure
778
+ 777::Toto Temple Deluxe
779
+ 778::Armikrog
780
+ 779::A.I. Invasion
781
+ 780::Game Corp DX
782
+ 781::Super Sky Arena
783
+ 782::Hidden Object Bundle 4 in 1
784
+ 783::Slipstream 5000
785
+ 784::Normality
786
+ 785::Zenohell
787
+ 786::Downwell
788
+ 787::Mushroom 11
789
+ 788::War of Beach
790
+ 789::Warhammer 40,000: Deathwatch - Enhanced Edition
791
+ 790::Zombie Grinder
792
+ 791::Lonath Online
793
+ 792::Dungeon Manager ZV
794
+ 793::Overlord: Fellowship of Evil
795
+ 794::Kingdom: Classic
796
+ 795::Noct
797
+ 796::Armor Clash
798
+ 797::Divine Slice of Life
799
+ 798::Outpost 13
800
+ 799::A Wolf in Autumn
801
+ 800::Divinity: Original Sin - Enhanced Edition
802
+ 801::Statues
803
+ 802::Infinite Space III: Sea of Stars
804
+ 803::The Last Crown: Midnight Horror
805
+ 804::Indie Game Battle
806
+ 805::Voidspire Tactics
807
+ 806::The Last NightMary - A Lenda do Cabeça de Cuia
808
+ 807::Anno 2205™
809
+ 808::Space - The Return Of The Pixxelfrazzer
810
+ 809::Your Quest
811
+ 810::Swordbreaker The Game
812
+ 811::The Incredible Adventures of Van Helsing: Final Cut
813
+ 812::Krosmaster Arena
814
+ 813::Garden Rescue: Christmas Edition
815
+ 814::Dispatcher
816
+ 815::Dota 2 Player Profiles
817
+ 816::Showing Tonight: Mindhunters Incident
818
+ 817::Uriel's Chasm 2: את
819
+ 818::True Lover's Knot
820
+ 819::Bitardia
821
+ 820::Osteya
822
+ 821::Gender Bender DNA Twister Extreme
823
+ 822::Jurassic Island: The Dinosaur Zoo
824
+ 823::One More Dungeon
825
+ 824::RC Simulation 2.0
826
+ 825::Strania - The Stella Machina -
827
+ 826::FIM Speedway Grand Prix 15
828
+ 827::Lowglow
829
+ 828::Reveal The Deep
830
+ 829::Wild Season
831
+ 830::GUILTY GEAR Xrd -SIGN-
832
+ 831::MechWarrior Online™
833
+ 832::OH! RPG!
834
+ 833::Gabe Newell Simulator 2.0
835
+ 834::Hatoful Boyfriend: Holiday Star
836
+ 835::Turnover
837
+ 836::Gigachess
838
+ 837::Taimumari
839
+ 838::Super Slam Dunk Touchdown
840
+ 839::Waves 2
841
+ 840::River City Super Sports Challenge ~All Stars Special~
842
+ 841::The Withering
843
+ 842::The fall of gods
844
+ 843::Swapperoo
845
+ 844::Our Nation's Miner
846
+ 845::What's under your blanket !?
847
+ 846::Squirbs
848
+ 847::Chiptune Champion
849
+ 848::That Dragon, Cancer
850
+ 849::Eight Mini Racers
851
+ 850::LoveBeat
852
+ 851::Pro Basketball Manager 2016
853
+ 852::Zombie Wars: Invasion
854
+ 853::BATTLE PIXELS
855
+ 854::Kivi, Toilet and Shotgun
856
+ 855::Dragon's Dogma: Dark Arisen
857
+ 856::Astro Lords: Oort Cloud
858
+ 857::No Turning Back: The Pixel Art Action-Adventure Roguelike
859
+ 858::Scrap Mechanic
860
+ 859::World's Dawn
861
+ 860::Knight Online
862
+ 861::Incredible Dracula: Chasing Love Collector's Edition
863
+ 862::Apocalypse Hotel - The Post-Apocalyptic Hotel Simulator!
864
+ 863::LEGO® MARVEL's Avengers
865
+ 864::Midnight
866
+ 865::Time of Dragons
867
+ 866::Spellweaver
868
+ 867::Dead6hot
869
+ 868::Pocket Rumble
870
+ 869::Tales of Symphonia
871
+ 870::Horror in the Asylum
872
+ 871::Tap Tap Legions - Epic battles within 5 seconds!
873
+ 872::Soccer Manager
874
+ 873::SnakEscape
875
+ 874::Doors
876
+ 875::Defragmented
877
+ 876::Break Chance Memento
878
+ 877::Jumpix Jump
879
+ 878::Super Helmets on Fire DX Ultra Edition Plus Alpha
880
+ 879::My Name is Mayo
881
+ 880::Marmoset Hexels 3
882
+ 881::Army of Tentacles: (Not) A Cthulhu Dating Sim
883
+ 882::We Know the Devil
884
+ 883::Layers of Fear
885
+ 884::Street Fighter V
886
+ 885::Gurgamoth
887
+ 886::Find Out
888
+ 887::Story Of the Survivor
889
+ 888::Lamia's Game Room
890
+ 889::String Theory
891
+ 890::CABAL Online
892
+ 891::No Pineapple Left Behind
893
+ 892::Super Night Riders
894
+ 893::Astro Duel
895
+ 894::NeonXSZ
896
+ 895::OutDrive
897
+ 896::ALONE IN SPACE
898
+ 897::Just Death
899
+ 898::Shadwen
900
+ 899::The Kindred
901
+ 900::Atlantic Fleet
902
+ 901::Cat on a Diet
903
+ 902::Lucy -The Eternity She Wished For-
904
+ 903::Balrum
905
+ 904::Banzai Escape
906
+ 905::Age of Gladiators
907
+ 906::Trigger Runners
908
+ 907::New York Taxi Simulator
909
+ 908::CUPID - A free to play Visual Novel
910
+ 909::Mazement
911
+ 910::Wild Animal Racing
912
+ 911::Tom Clancy’s The Division™
913
+ 912::Mind Zero
914
+ 913::From Earth
915
+ 914::Alekhine's Gun
916
+ 915::Unknown Battle
917
+ 916::Hidden Object 6-in-1 bundle
918
+ 917::101 Ways to Die
919
+ 918::Metal Assault
920
+ 919::Cubicolor
921
+ 920::Forgotten Myths CCG
922
+ 921::Borstal
923
+ 922::Vortex: The Gateway
924
+ 923::Atari Vault
925
+ 924::Paws: A Shelter 2 Game
926
+ 925::Villagers
927
+ 926::SAMOLIOTIK
928
+ 927::At the Mountains of Madness
929
+ 928::Super Robot Jump Jump
930
+ 929::Christmas Adventure: Candy Storm
931
+ 930::Super Arcade Football
932
+ 931::Anna's Quest
933
+ 932::NO THING
934
+ 933::Jet Set Knights
935
+ 934::Automata Empire
936
+ 935::S2ENGINE HD
937
+ 936::Warden: Melody of the Undergrowth
938
+ 937::Judgment: Apocalypse Survival Simulation
939
+ 938::Twilight Struggle
940
+ 939::Welkin Road
941
+ 940::Massive
942
+ 941::METAL SLUG 2
943
+ 942::Pang Adventures
944
+ 943::Imperia Online
945
+ 944::Gahkthun of the Golden Lightning Steam Edition
946
+ 945::Spaceport Hope
947
+ 946::Muddy Heights® 2
948
+ 947::ARCADE GAME SERIES: DIG DUG
949
+ 948::ARCADE GAME SERIES: Ms. PAC-MAN
950
+ 949::Klabi
951
+ 950::Save the Dodos
952
+ 951::Drusilla Dreams
953
+ 952::Colours of Magic: Aqua Teeter
954
+ 953::Ghoul Kid
955
+ 954::Cornerstone: The Song of Tyrim
956
+ 955::Endorlight
957
+ 956::The Panic Room
958
+ 957::Abandoned Knight
959
+ 958::Back in 1995
960
+ 959::N.E.R.O.: Nothing Ever Remains Obscure
961
+ 960::Cubium Dreams
962
+ 961::Mr Nibbles Forever
963
+ 962::Puzzle Galaxies
964
+ 963::Zombillie
965
+ 964::The Pit And The Pendulum
966
+ 965::Aselia the Eternal -The Spirit of Eternity Sword-
967
+ 966::Fragments of Him
968
+ 967::Share
969
+ 968::Shadow Complex Remastered
970
+ 969::Parkitect
971
+ 970::Neon Drive
972
+ 971::Princess Isabella: The Rise of an Heir
973
+ 972::Goliath
974
+ 973::The House in Fata Morgana
975
+ 974::Autumn
976
+ 975::SMASHING THE BATTLE
977
+ 976::Evo Explores
978
+ 977::SHOCK TROOPERS
979
+ 978::Romopolis
980
+ 979::htoL#NiQ: The Firefly Diary / htoL#NiQ-ホタルノニッキ-
981
+ 980::Bitardia Cards: Memes of 2ch
982
+ 981::GROOVY
983
+ 982::Survive in Space
984
+ 983::METAGAL
985
+ 984::Soulcaster: Part I &amp; II
986
+ 985::Grandpa's Table
987
+ 986::The Mahjong Huntress
988
+ 987::Warriors' Wrath
989
+ 988::Hyper Bounce Blast
990
+ 989::KNIGHTS
991
+ 990::The Way of Life Free Edition
992
+ 991::The NADI Project
993
+ 992::Magma Tsunami
994
+ 993::Summer Sale
995
+ 994::Investigator
996
+ 995::Anima Gate of Memories
997
+ 996::Home Behind
998
+ 997::A Healer Only Lives Twice
999
+ 998::Rescue Lucy
1000
+ 999::Impossible Quest
1001
+ 1000::Sudoku Quest
1002
+ 1001::Ghost 1.0
1003
+ 1002::Death Goat
1004
+ 1003::GemBreak
1005
+ 1004::OESE
1006
+ 1005::CrazyCars3D
1007
+ 1006::Hover 2030
1008
+ 1007::Vilmonic
1009
+ 1008::Twisted Worlds
1010
+ 1009::Mahjong Destiny
1011
+ 1010::MAZE LORD
1012
+ 1011::XSplit
1013
+ 1012::Farnham Fables
1014
+ 1013::PAC-MAN 256
1015
+ 1014::ChuSingura46+1 S
1016
+ 1015::Affairs of the Court: Choice of Romance
1017
+ 1016::Chicken Assassin - Master of Humiliation
1018
+ 1017::Earth Space Colonies
1019
+ 1018::Vinyl
1020
+ 1019::Tomoyo After ~It's a Wonderful Life~ English Edition
1021
+ 1020::RWBY: Grimm Eclipse
1022
+ 1021::The Lion's Song: Episode 1 - Silence
1023
+ 1022::Goblins and Grottos
1024
+ 1023::Radical Spectrum: Volume 1
1025
+ 1024::Fossil Echo
1026
+ 1025::Replica
1027
+ 1026::Tomato Jones
1028
+ 1027::UNDER NIGHT IN-BIRTH Exe:Late
1029
+ 1028::Muv-Luv
1030
+ 1029::Alteric
1031
+ 1030::RimWorld
1032
+ 1031::Empty Horizons
1033
+ 1032::Mahjong Deluxe 2: Astral Planes
1034
+ 1033::BrainBread 2
1035
+ 1034::Red Rope: Don't Fall Behind
1036
+ 1035::Fabulous Food Truck
1037
+ 1036::DreamBreak
1038
+ 1037::Intruder Alert: Ixian Operations
1039
+ 1038::Starbound
1040
+ 1039::Inverted
1041
+ 1040::Lovely Planet Arcade
1042
+ 1041::Legends of the Universe - StarCore
1043
+ 1042::Marvel: Ultimate Alliance 2
1044
+ 1043::Strange Night
1045
+ 1044::Ratz Instagib
1046
+ 1045::The Return Home
1047
+ 1046::Totally Unbalanced
1048
+ 1047::Intrude
1049
+ 1048::SurvHive
1050
+ 1049::Snow Horse
1051
+ 1050::Pixel Cup Soccer 17
1052
+ 1051::Mibibli's Quest
1053
+ 1052::The Cat! Porfirio's Adventure
1054
+ 1053::Little King's Story
1055
+ 1054::Aircraft War X
1056
+ 1055::Conclusion
1057
+ 1056::BlackSmith HIT
1058
+ 1057::Street Legal Racing: Redline v2.3.1
1059
+ 1058::Music Maker 2017 Premium Steam Edition
1060
+ 1059::Bear With Me
1061
+ 1060::Existentia
1062
+ 1061::Heart&amp;Slash
1063
+ 1062::Blade Ballet
1064
+ 1063::Winter Novel
1065
+ 1064::FORTIFY
1066
+ 1065::Ray Gigant
1067
+ 1066::One Night Two Crazies
1068
+ 1067::Pixel Puzzles 2: RADical ROACH
1069
+ 1068::Cubway
1070
+ 1069::Typoman
1071
+ 1070::Grow Up
1072
+ 1071::Monsters and Monocles
1073
+ 1072::ACE Academy
1074
+ 1073::Deus Ex: Mankind Divided
1075
+ 1074::AdVenture Communist
1076
+ 1075::Automobilista
1077
+ 1076::SMILE GAME BUILDER
1078
+ 1077::Panzer Warfare
1079
+ 1078::STANDBY
1080
+ 1079::Optika
1081
+ 1080::Tribal Pass
1082
+ 1081::Fright Light
1083
+ 1082::SHINRAI - Broken Beyond Despair
1084
+ 1083::Caesar™ IV
1085
+ 1084::Quest for Glory 1-5
1086
+ 1085::Caesar™ 3
1087
+ 1086::Four Sided Fantasy
1088
+ 1087::Hidden Object - 12 in 1 bundle
1089
+ 1088::Agent Walker: Secret Journey
1090
+ 1089::Gochi-Show! -How To Learn Japanese Cooking Game-
1091
+ 1090::Football Mogul 15
1092
+ 1091::Police Tactics: Imperio
1093
+ 1092::Halcyon 6: Starbase Commander
1094
+ 1093::The Tale of Doris and the Dragon - Episode 1
1095
+ 1094::Flat Heroes
1096
+ 1095::Black Sand Drift
1097
+ 1096::Stronghold Legends: Steam Edition
1098
+ 1097::Trials of Azra
1099
+ 1098::Rustangelo
1100
+ 1099::Paladins®
1101
+ 1100::Woodle Tree 2: Worlds
1102
+ 1101::Minimized
1103
+ 1102::Bad ass babes
1104
+ 1103::Wheels of Aurelia
1105
+ 1104::Zenith
1106
+ 1105::NBA 2K17
1107
+ 1106::Heart's Medicine - Time to Heal
1108
+ 1107::Pixel-Warfare: Pro
1109
+ 1108::Fractured Space
1110
+ 1109::Sorcery! Part 4
1111
+ 1110::Dog Sled Saga
1112
+ 1111::Pavilion
1113
+ 1112::EARTHLOCK: Festival of Magic
1114
+ 1113::Sig.NULL
1115
+ 1114::Clustertruck
1116
+ 1115::Anykey Simulator
1117
+ 1116::Bloody Walls
1118
+ 1117::Hardware Engineering
1119
+ 1118::Particle Fleet: Emergence
1120
+ 1119::NEON Ultra
1121
+ 1120::Recursed
1122
+ 1121::MegaTagmension Blanc + Neptune VS Zombies (Neptunia)
1123
+ 1122::Zup!
1124
+ 1123::Platro
1125
+ 1124::Industry Manager: Future Technologies
1126
+ 1125::Scrap Garden - The Day Before
1127
+ 1126::Major\Minor - Complete Edition
1128
+ 1127::Hidden Dimensions 3
1129
+ 1128::Ballistic Protection
1130
+ 1129::BitMaster
1131
+ 1130::Rubek
1132
+ 1131::Feel The Snow
1133
+ 1132::Stay Close
1134
+ 1133::High Octane Drift
1135
+ 1134::Heroes &amp; Generals
1136
+ 1135::The Whisperer in Darkness
1137
+ 1136::True Fear: Forsaken Souls
1138
+ 1137::The Infinite Black
1139
+ 1138::Russian SuperHero Dead Ivan
1140
+ 1139::Bad Caterpillar
1141
+ 1140::Party Saboteurs
1142
+ 1141::PAYDAY 2: John Wick Weapon Pack
1143
+ 1142::Sid Meier’s Civilization® VI
1144
+ 1143::Drift GEAR Racing Free
1145
+ 1144::Hired Ops
1146
+ 1145::Sky Break
1147
+ 1146::Kim
1148
+ 1147::BossConstructor
1149
+ 1148::Ginger: Beyond the Crystal
1150
+ 1149::Halloween Forever
1151
+ 1150::Slayaway Camp
1152
+ 1151::Grab the Bottle
1153
+ 1152::That's Mahjong!
1154
+ 1153::Equin: The Lantern
1155
+ 1154::PHAT PHROG
1156
+ 1155::The Elder Scrolls V: Skyrim Special Edition
1157
+ 1156::NSFW ~ Not a Simulator For Working
1158
+ 1157::Miniature - The Story Puzzle
1159
+ 1158::Balloon Blowout
1160
+ 1159::GO AWAY, THERE'S KUMIS OVER THERE!
1161
+ 1160::Zombie Exodus: Safe Haven
1162
+ 1161::Memory's Dogma CODE:01
1163
+ 1162::Vanguards
1164
+ 1163::Anime Studio Simulator
1165
+ 1164::Trimmer Tycoon
1166
+ 1165::Bye-Bye, Wacky Planet
1167
+ 1166::Soda Girls
1168
+ 1167::Planet Explorers
1169
+ 1168::Goblin and Coins
1170
+ 1169::World of Tanks Blitz
1171
+ 1170::Sethian
1172
+ 1171::Silence
1173
+ 1172::SPINGUN
1174
+ 1173::Chimpact 1 - Chuck's Adventure
1175
+ 1174::Don't open the doors!
1176
+ 1175::The Sandbox Evolution - Craft a 2D Pixel Universe!
1177
+ 1176::Creeper World 2: Anniversary Edition
1178
+ 1177::ICEY
1179
+ 1178::Zone4
1180
+ 1179::Club Naughty
1181
+ 1180::Shuffle!
1182
+ 1181::Rugby Union Team Manager 2017
1183
+ 1182::InfinitasDM
1184
+ 1183::Secret Santa
1185
+ 1184::Counter Agents
1186
+ 1185::Dungeon Souls
1187
+ 1186::Beastiarium
1188
+ 1187::Drop Alive
1189
+ 1188::GAROU: MARK OF THE WOLVES
1190
+ 1189::ZombieCarz
1191
+ 1190::Root Of Evil: The Tailor
1192
+ 1191::Dream Quest
1193
+ 1192::The Dreamlord
1194
+ 1193::BIOS
1195
+ 1194::Glittermitten Grove
1196
+ 1195::8-Bit Invaders!
1197
+ 1196::Blue Tear
1198
+ 1197::Moekuri: Adorable + Tactical SRPG
1199
+ 1198::Art of War: Red Tides
1200
+ 1199::Stern Pinball Arcade
1201
+ 1200::Polygon Attack
1202
+ 1201::Duke of Alpha Centauri
1203
+ 1202::Trick and Treat - Visual Novel
1204
+ 1203::Lily's Day Off
1205
+ 1204::Last Answer
1206
+ 1205::Power Hover
1207
+ 1206::Bad Dream: Coma
1208
+ 1207::Laraan
1209
+ 1208::Illyriad - Grand Strategy MMO
1210
+ 1209::I Am The Hero
1211
+ 1210::Galaxy Admirals
1212
+ 1211::STARDROP
1213
+ 1212::Commands &amp; Colors: The Great War
1214
+ 1213::Imperium Galactica II
1215
+ 1214::Crab Dub
1216
+ 1215::Sticker Craft
1217
+ 1216::A Crashlands Story: Dev Diary
1218
+ 1217::SpiritSphere
1219
+ 1218::Tiles &amp; Tales
1220
+ 1219::PERFECT ANGLE: The puzzle game based on optical illusions
1221
+ 1220::Felix Jumpman
1222
+ 1221::Guild Quest
1223
+ 1222::Warcube
1224
+ 1223::Nyheim
1225
+ 1224::SQUAKE
1226
+ 1225::Fergus The Fly
1227
+ 1226::A House of Many Doors
1228
+ 1227::MOBIUS FINAL FANTASY™
1229
+ 1228::Off-Road Paradise: Trial 4x4
1230
+ 1229::Rock God Tycoon
1231
+ 1230::BOOR
1232
+ 1231::Battle Islands: Commanders
1233
+ 1232::Fairy Fencer F Advent Dark Force | フェアリーフェンサー エフ ADVENT DARK FORCE | 妖精劍士 F ADVENT DARK FORCE
1234
+ 1233::Copoka
1235
+ 1234::Mall Empire
1236
+ 1235::Golf It!
1237
+ 1236::The Curse Of Yendor
1238
+ 1237::Zup! 4
1239
+ 1238::Spoids
1240
+ 1239::Cavern Escape
1241
+ 1240::bob's game
1242
+ 1241::Night in the Woods
1243
+ 1242::Unexplored
1244
+ 1243::Anarchy Online
1245
+ 1244::Open Sorcery
1246
+ 1245::Shift Happens
1247
+ 1246::Northgard
1248
+ 1247::The Exiled
1249
+ 1248::Seasteader
1250
+ 1249::Blackwake
1251
+ 1250::The Inner Darkness
1252
+ 1251::Furious Angels
1253
+ 1252::For The King
1254
+ 1253::Turn Around
1255
+ 1254::VThree
1256
+ 1255::Ino
1257
+ 1256::Zoop! - Hunter's Grimm
1258
+ 1257::Hurricane
1259
+ 1258::ULTIMATE MARVEL VS. CAPCOM 3
1260
+ 1259::SimAirport
1261
+ 1260::Star Merchant
1262
+ 1261::Disc Jam
1263
+ 1262::Lightspeed Frontier
1264
+ 1263::Atelier Firis: The Alchemist and the Mysterious Journey / フィリスのアトリエ ~不思議な旅の錬金術士~
1265
+ 1264::LEGO® Worlds
1266
+ 1265::Reflex Arena
1267
+ 1266::Maximum Override
1268
+ 1267::Good Archer
1269
+ 1268::Blink
1270
+ 1269::Lifeline
1271
+ 1270::WARTILE
1272
+ 1271::Crisis in the Kremlin
1273
+ 1272::Toukiden 2
1274
+ 1273::Splody
1275
+ 1274::Flagster
1276
+ 1275::Monster Slayers
1277
+ 1276::Infinite Tanks
1278
+ 1277::Microgons
1279
+ 1278::Ballistic Overkill
1280
+ 1279::NIGHTSTAR: Rogue Wings
1281
+ 1280::Free! - Iwatobi Swim Club
1282
+ 1281::Mighty Party
1283
+ 1282::State of Anarchy: Master of Mayhem
1284
+ 1283::Idle Evolution
1285
+ 1284::AFL Evolution
1286
+ 1285::Cannons-Defenders: Steam Edition
1287
+ 1286::GIBZ
1288
+ 1287::Kith - Tales from the Fractured Plateaus
1289
+ 1288::Slime-san
1290
+ 1289::Fausts Alptraum
1291
+ 1290::Happy Campers
1292
+ 1291::The Signal From Tölva
1293
+ 1292::Bayonetta
1294
+ 1293::Aaero
1295
+ 1294::A Rose in the Twilight / ロゼと黄昏の古城
1296
+ 1295::Cosmic Star Heroine
1297
+ 1296::Skyforge
1298
+ 1297::The Wild Eternal
1299
+ 1298::Full Throttle Remastered
1300
+ 1299::Odyssey - The Next Generation Science Game
1301
+ 1300::Pixel Sand
1302
+ 1301::Brawlout
1303
+ 1302::Academagia: The Making of Mages
1304
+ 1303::Storm Riders
1305
+ 1304::Hearthlands
1306
+ 1305::Agatha Knife
1307
+ 1306::Attack Heroes
1308
+ 1307::TumbleSeed
1309
+ 1308::Dawn of Andromeda
1310
+ 1309::Welcome to Moreytown
1311
+ 1310::Fur Fun
1312
+ 1311::Chronicle of Innsmouth
1313
+ 1312::Birthdays the Beginning / バースデイズ・ザ・ビギニング
1314
+ 1313::Dead Cells
1315
+ 1314::Market Tycoon
1316
+ 1315::STEAM HAMMER
1317
+ 1316::Empathy: Path of Whispers
1318
+ 1317::Bounty Train
1319
+ 1318::Rabbit Story
1320
+ 1319::Arcfall
1321
+ 1320::Bokida - Heartfelt Reunion
1322
+ 1321::Mages of Mystralia
1323
+ 1322::Portal Knights
1324
+ 1323::The Infectious Madness of Doctor Dekker
1325
+ 1324::Skylar &amp; Plux: Adventure On Clover Island
1326
+ 1325::A Hole New World
1327
+ 1326::The Superfluous
1328
+ 1327::StarCrawlers
1329
+ 1328::Starpoint Gemini Warlords
1330
+ 1329::50 years
1331
+ 1330::Zombidle : REMONSTERED
1332
+ 1331::Rising Storm 2: Vietnam
1333
+ 1332::Formula Fusion
1334
+ 1333::Shotgun Legend
1335
+ 1334::TEKKEN 7
1336
+ 1335::Blitzkrieg 3
1337
+ 1336::Monolith
1338
+ 1337::Bunker 58
1339
+ 1338::Lines
1340
+ 1339::Flood of Light
1341
+ 1340::Rocket Wars
1342
+ 1341::Pro Cycling Manager 2017
1343
+ 1342::Orake 2D MMORPG
1344
+ 1343::Zafehouse Diaries 2
1345
+ 1344::Nex Machina
1346
+ 1345::Libra of the Vampire Princess
1347
+ 1346::Saga of Tanya the Evil
1348
+ 1347::Ticket to Earth
1349
+ 1348::Micro Machines World Series
1350
+ 1349::Zooicide
1351
+ 1350::Three Twenty One
1352
+ 1351::Monsters' Den: Godfall
1353
+ 1352::Still Not Dead
1354
+ 1353::qop
1355
+ 1354::Antisphere
1356
+ 1355::Crash Force®
1357
+ 1356::Block Survival: Legend of the Lost Islands
1358
+ 1357::Hard Reset
1359
+ 1358::Hand Simulator
1360
+ 1359::Tangledeep
1361
+ 1360::Just Cause™ 3: Multiplayer Mod
1362
+ 1361::PWND
1363
+ 1362::Book Of Potentia 2
1364
+ 1363::Shadow Bug
1365
+ 1364::Fable Fortune
1366
+ 1365::Airmen
1367
+ 1366::Onirim - Solitaire Card Game
1368
+ 1367::Vostok Inc.
1369
+ 1368::Stream Avatars
1370
+ 1369::The Low Road
1371
+ 1370::SUDOKU
1372
+ 1371::Crossout
1373
+ 1372::Sky Knights
1374
+ 1373::Call of Duty®: Modern Warfare® Remastered
1375
+ 1374::SolarGun
1376
+ 1375::Legends of Ellaria
1377
+ 1376::Capitalism 2
1378
+ 1377::Alchemyland
1379
+ 1378::Nash Racing
1380
+ 1379::The Legend of Heroes: Trails of Cold Steel
1381
+ 1380::Saurian
1382
+ 1381::Juanito Arcade Mayhem
1383
+ 1382::THE KING OF FIGHTERS: DESTINY
1384
+ 1383::Cash Crop
1385
+ 1384::Achievement Hunter: Spinner Edition
1386
+ 1385::Draw Puzzle
1387
+ 1386::MINDNIGHT
1388
+ 1387::The Shrouded Isle
1389
+ 1388::CUBOTS The Origins
1390
+ 1389::Cat Quest
1391
+ 1390::Mega Man Legacy Collection 2 / ロックマン クラシックス コレクション 2
1392
+ 1391::Sine Mora EX
1393
+ 1392::Lost Technology
1394
+ 1393::Gangs of Space
1395
+ 1394::Looterkings
1396
+ 1395::Fragmented
1397
+ 1396::Drive Isle
1398
+ 1397::Lumber King
1399
+ 1398::Discord Bot Maker
1400
+ 1399::White Day: A Labyrinth Named School
1401
+ 1400::Yet Another Zombie Defense HD
1402
+ 1401::BROKE PROTOCOL: Online City RPG
1403
+ 1402::Island Dash
1404
+ 1403::Pixel Traffic: Circle Rush
1405
+ 1404::Epic Cards Battle 2-Dragons Rising(TCG)
1406
+ 1405::Greedy Guns
1407
+ 1406::Ways of History
1408
+ 1407::Doodle God Blitz
1409
+ 1408::Find this!
1410
+ 1409::Reaching for Petals
1411
+ 1410::Kitten Rampage
1412
+ 1411::SLI-FI: 2D Planet Platformer
1413
+ 1412::Brawl of Ages
1414
+ 1413::The Ultimatest Battle
1415
+ 1414::Trackless
1416
+ 1415::Hand of the Gods
1417
+ 1416::Epic Tavern
1418
+ 1417::PRO EVOLUTION SOCCER 2018
1419
+ 1418::Oriental Empires
1420
+ 1419::Rover Builder
1421
+ 1420::Jettomero: Hero of the Universe
1422
+ 1421::ARENA an Age of Barbarians story
1423
+ 1422::STELLATUM
1424
+ 1423::World of Castles
1425
+ 1424::ATOMEGA
1426
+ 1425::The Caribbean Sail
1427
+ 1426::Super Fancy Pants Adventure
1428
+ 1427::Arrow Heads
1429
+ 1428::Auto Age: Standoff
1430
+ 1429::Heat Signature
1431
+ 1430::Figment
1432
+ 1431::DwarfCorp
1433
+ 1432::Road Dogs
1434
+ 1433::About Elise
1435
+ 1434::The Painscreek Killings
1436
+ 1435::Killer Instinct
1437
+ 1436::Over The Moonlight
1438
+ 1437::RollerCoaster Tycoon® Classic
1439
+ 1438::Vaporum
1440
+ 1439::Airport CEO
1441
+ 1440::Stick Fight: The Game
1442
+ 1441::JYDGE
1443
+ 1442::SORE
1444
+ 1443::Mushroom Wars 2
1445
+ 1444::Cubic
1446
+ 1445::Wunderdoktor
1447
+ 1446::Gold Rush: The Game
1448
+ 1447::Yorkshire Gubbins
1449
+ 1448::Party Hard Tycoon
1450
+ 1449::Bomber Crew
1451
+ 1450::Tank Force
1452
+ 1451::Debris
1453
+ 1452::Battlevoid: Sector Siege
1454
+ 1453::Scream Collector
1455
+ 1454::Forever Home
1456
+ 1455::JumpSky
1457
+ 1456::Path Out
1458
+ 1457::FURIDASHI: Drift Cyber Sport
1459
+ 1458::Warspear Online
1460
+ 1459::Mini Guns
1461
+ 1460::Star Story: The Horizon Escape
1462
+ 1461::Team Four Star RPG
1463
+ 1462::A Wonder
1464
+ 1463::LEGO® Marvel Super Heroes 2
1465
+ 1464::Deep Sky Derelicts
1466
+ 1465::Door Kickers: Action Squad
1467
+ 1466::CarX Drift Racing Online
1468
+ 1467::Remaya Idle
1469
+ 1468::The Mammoth: A Cave Painting
1470
+ 1469::Red Crucible®: Reloaded
1471
+ 1470::AirMech Wastelands
1472
+ 1471::Dominions 5 - Warriors of the Faith
1473
+ 1472::Ylands
1474
+ 1473::汉匈决战/Gloria Sinica: Han Xiongnu Wars
1475
+ 1474::SpellForce 3
1476
+ 1475::Gunlock
1477
+ 1476::Umineko: Golden Fantasia
1478
+ 1477::I wanna be The Cat
1479
+ 1478::Gorogoa
1480
+ 1479::War Planet Online: Global Conquest
1481
+ 1480::Modern Combat Versus
1482
+ 1481::Golden Hornet
1483
+ 1482::PLAYERUNKN1WN: Friendly Fire
1484
+ 1483::Wooden Battles
1485
+ 1484::Flora
1486
+ 1485::Fruity Smoothie
1487
+ 1486::X-Morph: Defense
1488
+ 1487::Long Live Santa!
1489
+ 1488::Decent Icons
1490
+ 1489::Dinosaurs A Prehistoric Adventure
1491
+ 1490::Gauntlet of IRE
1492
+ 1491::Ashes Cricket
1493
+ 1492::Finding Paradise
1494
+ 1493::Farmer's Dynasty
1495
+ 1494::Rumu
1496
+ 1495::Zup! 7
1497
+ 1496::OKAMI HD / 大神 絶景版
1498
+ 1497::March of Empires
1499
+ 1498::Space Wars: Interstellar Empires
1500
+ 1499::Super Turbo Demon Busters!
1501
+ 1500::Shadowhand
1502
+ 1501::SUPERHOT: MIND CONTROL DELETE
1503
+ 1502::Getting Over It with Bennett Foddy
1504
+ 1503::Cookies vs. Claus
1505
+ 1504::Nocturnal Hunt
1506
+ 1505::Bare Metal
1507
+ 1506::Jay Fighter: Remastered
1508
+ 1507::Cobalt WASD
1509
+ 1508::La Tale - Evolved
1510
+ 1509::Carcassonne - Tiles &amp; Tactics
1511
+ 1510::BattleRush
1512
+ 1511::My Free Zoo
1513
+ 1512::Wolfenstein II: The New Colossus
1514
+ 1513::Base Defense
1515
+ 1514::Battle Chef Brigade
1516
+ 1515::Yu-Gi-Oh! Duel Links
1517
+ 1516::東方天空璋 ~ Hidden Star in Four Seasons.
1518
+ 1517::Star Traders: Frontiers
1519
+ 1518::Tactical Monsters Rumble Arena
1520
+ 1519::Football Manager Touch 2018
1521
+ 1520::Gladiator School
1522
+ 1521::Oik 3
1523
+ 1522::Superflight
1524
+ 1523::Hand of Fate 2
1525
+ 1524::True or False Universe
1526
+ 1525::Need For Drink
1527
+ 1526::One Strike
1528
+ 1527::I Am Overburdened
1529
+ 1528::Hollowed
1530
+ 1529::The Norwood Suite
1531
+ 1530::ШП
1532
+ 1531::Zwei: The Ilvard Insurrection
1533
+ 1532::Lines by Nestor Yavorskyy
1534
+ 1533::Codename CURE
1535
+ 1534::LSD
1536
+ 1535::Dresden Files Cooperative Card Game
1537
+ 1536::Coffee Run
1538
+ 1537::Lost Dimension
1539
+ 1538::Flightless
1540
+ 1539::Black Clover
1541
+ 1540::SIMULACRA
1542
+ 1541::AER Memories of Old
1543
+ 1542::ClickRaid
1544
+ 1543::Capsa
1545
+ 1544::Real Farm
1546
+ 1545::Molten Armor
1547
+ 1546::The Inner World - The Last Wind Monk
1548
+ 1547::Beyond the Void
1549
+ 1548::Warbanners
1550
+ 1549::Freaky Awesome
1551
+ 1550::Rex: Another Island
1552
+ 1551::WWE 2K18
1553
+ 1552::Brawlhalla
1554
+ 1553::South Park™: The Fractured But Whole™
1555
+ 1554::Mercury Fallen
1556
+ 1555::Robot King Part I: Rebooted and Ready
1557
+ 1556::Dungeons 3
1558
+ 1557::Diamo XL
1559
+ 1558::Dragon Glory
1560
+ 1559::Middle-earth™: Shadow of War™
1561
+ 1560::Incline
1562
+ 1561::Rainbow Snake
1563
+ 1562::Press X to Not Die
1564
+ 1563::Battle Chasers: Nightwar
1565
+ 1564::Divided We Fall
1566
+ 1565::Castle Clicker : Building Tycoon
1567
+ 1566::Throne of Lies® The Online Game of Deceit
1568
+ 1567::Bermuda - Lost Survival
1569
+ 1568::Kritika Online
1570
+ 1569::Total War: WARHAMMER II
1571
+ 1570::The Guild 3
1572
+ 1571::Hob
1573
+ 1572::Project Nimbus
1574
+ 1573::Pinball FX3
1575
+ 1574::Niche - a genetics survival game
1576
+ 1575::Holdfast: Nations At War
1577
+ 1576::Ancient Frontier
1578
+ 1577::HumanKind: The Awakening
1579
+ 1578::Age of Gladiators II
1580
+ 1579::openCanvas 7
1581
+ 1580::Tricolour Lovestory
1582
+ 1581::Blast Out
1583
+ 1582::Another Lost Phone: Laura's Story
1584
+ 1583::Super Hydorah
1585
+ 1584::Simulator hipstera 2k17
1586
+ 1585::Lone Warrior
1587
+ 1586::HIVESWAP: Act 1
1588
+ 1587::Tooth and Tail
1589
+ 1588::FPV Drone Simulator
1590
+ 1589::Fjong
1591
+ 1590::Academia : School Simulator
1592
+ 1591::Insidia
1593
+ 1592::Stash
1594
+ 1593::Find You
1595
+ 1594::Darkestville Castle
1596
+ 1595::Card City Nights 2
1597
+ 1596::Madu Maths
1598
+ 1597::Fear For Freedom
1599
+ 1598::On a Roll
1600
+ 1599::1bitHeart
1601
+ 1600::Far Space Halloween edition
1602
+ 1601::Loading
1603
+ 1602::Robocraft
1604
+ 1603::Alphabear: Hardcover Edition
1605
+ 1604::The Escapists 2
1606
+ 1605::Achievement Hunter: Darkness
1607
+ 1606::Blue Horizon
1608
+ 1607::CastleMiner Warfare
1609
+ 1608::Moero Chronicle | 限界凸記 モエロクロニクル | 極限凸記 萌萌編年史
1610
+ 1609::Startup Company
1611
+ 1610::Team Racing League
1612
+ 1611::Halcyon 6: Starbase Commander (LIGHTSPEED EDITION)
1613
+ 1612::Super POTUS Trump
1614
+ 1613::Achievement Hunter: Overdose
1615
+ 1614::Code 7
1616
+ 1615::Spears 'n' Spades
1617
+ 1616::LawBreakers
1618
+ 1617::Hellblade: Senua's Sacrifice
1619
+ 1618::Strategy &amp; Tactics: Dark Ages
1620
+ 1619::Quest Hunter
1621
+ 1620::The Hunting God
1622
+ 1621::Machine World 2
1623
+ 1622::Black Squad
1624
+ 1623::Fighting Fantasy Legends
1625
+ 1624::Tricone Lab
1626
+ 1625::Fate/EXTELLA
1627
+ 1626::Iron Tides
1628
+ 1627::Cursed Treasure 2
1629
+ 1628::Planet Ancyra Chronicles
1630
+ 1629::Solar Settlers
1631
+ 1630::Dreamfall Chapters
1632
+ 1631::Pastry Lovers
1633
+ 1632::Strike Vector EX
1634
+ 1633::Aporia: Beyond The Valley
1635
+ 1634::Kingdoms and Castles
1636
+ 1635::Slash It Ultimate
1637
+ 1636::Dead Purge: Outbreak
1638
+ 1637::Star Trek Timelines
1639
+ 1638::Wild Guns Reloaded
1640
+ 1639::Minecraft: Story Mode - Season Two
1641
+ 1640::Omegaland
1642
+ 1641::Unturned
1643
+ 1642::Visual Novel Engine
1644
+ 1643::√Letter - Root Letter -
1645
+ 1644::Bomb Defense
1646
+ 1645::Cricket Captain 2017
1647
+ 1646::Baobabs Mausoleum Ep.1: Ovnifagos Don´t Eat Flamingos
1648
+ 1647::SAELIG
1649
+ 1648::PLANETS OF WAR
1650
+ 1649::Kimulator 2: The Bottle Flip Master
1651
+ 1650::TAP TOUCH RUN
1652
+ 1651::龙魂时刻
1653
+ 1652::Your Smile Beyond Twilight:黄昏下的月台上
1654
+ 1653::b
1655
+ 1654::welcome to heaven
1656
+ 1655::Miss Kobayashi's Dragon Maid
1657
+ 1656::Gabriel Dropout
1658
+ 1657::Kemono Friends
1659
+ 1658::3..2..1..Grenades!
1660
+ 1659::Astro Boy: Edge of Time
1661
+ 1660::SQUIDS FROM SPACE
1662
+ 1661::Art Of Gravity
1663
+ 1662::TinkerQuarry
1664
+ 1663::Formicide
1665
+ 1664::DiRT 4
1666
+ 1665::Pixel Worlds
1667
+ 1666::Crystal City
1668
+ 1667::Strategic Command WWII: War in Europe
1669
+ 1668::Passpartout: The Starving Artist
1670
+ 1669::Conarium
1671
+ 1670::Battlesloths 2025: The Great Pizza Wars
1672
+ 1671::UltraGoodness
1673
+ 1672::After The End: The Harvest
1674
+ 1673::Might &amp; Mayhem
1675
+ 1674::Lydia
1676
+ 1675::Tokyo 42
1677
+ 1676::Company of Heroes: Eastern Front
1678
+ 1677::Shotgun Farmers
1679
+ 1678::The Land of Pain
1680
+ 1679::XField Paintball 3
1681
+ 1680::Don't cut your hand
1682
+ 1681::First Strike: Final Hour
1683
+ 1682::Balthazar's Dream
1684
+ 1683::VEGAS Movie Studio 14 Steam Edition
1685
+ 1684::Flying Tigers: Shadows Over China
1686
+ 1685::Green Cat
1687
+ 1686::Life is Feudal: Forest Village
1688
+ 1687::Detective Butler: Maiden Voyage Murder
1689
+ 1688::MidBoss
1690
+ 1689::Quarantine
1691
+ 1690::Oafmatch
1692
+ 1691::You Are God
1693
+ 1692::ROKH
1694
+ 1693::Old Man's Journey
1695
+ 1694::Regalia: Of Men and Monarchs
1696
+ 1695::Andarilho
1697
+ 1696::Bombinator
1698
+ 1697::Blockle
1699
+ 1698::Run Away
1700
+ 1699::SPACEPLAN
1701
+ 1700::ATOMINE
1702
+ 1701::Last Dream: World Unknown
1703
+ 1702::Blossoms Bloom Brightest
1704
+ 1703::Cublast HD
1705
+ 1704::NEXT JUMP: Shmup Tactics
1706
+ 1705::Arcane Mapper
1707
+ 1706::Jidousha Shakai
1708
+ 1707::What Remains of Edith Finch
1709
+ 1708::Games of Glory
1710
+ 1709::Ruin of the Reckless
1711
+ 1710::Cuit
1712
+ 1711::Deformers
1713
+ 1712::Everything
1714
+ 1713::Orcs Must Die! Unchained
1715
+ 1714::Animal Rivals
1716
+ 1715::ONRAID
1717
+ 1716::Life Beetle
1718
+ 1717::Kingdom of Loot
1719
+ 1718::My Name is You
1720
+ 1719::iStorm
1721
+ 1720::ZRoll
1722
+ 1721::Tuebor: I Will Defend
1723
+ 1722::Bulletstorm: Full Clip Edition
1724
+ 1723::The Quest for Achievements
1725
+ 1724::Warstone TD
1726
+ 1725::The Franz Kafka Videogame
1727
+ 1726::Golf for Workgroups
1728
+ 1727::The Great Whale Road
1729
+ 1728::Berserk
1730
+ 1729::Mob Psycho 100
1731
+ 1730::Ghost In The Shell: Stand Alone Complex
1732
+ 1731::One Eyed Kutkh
1733
+ 1732::The Eagle's Heir
1734
+ 1733::HOUND
1735
+ 1734::Fallout Shelter
1736
+ 1735::Fences
1737
+ 1736::Life Forge ORPG
1738
+ 1737::News Tycoon
1739
+ 1738::Battle Brothers
1740
+ 1739::Weapon Shop Fantasy
1741
+ 1740::Bush Hockey League
1742
+ 1741::Dungeon Creepster
1743
+ 1742::Molemen Must Die!
1744
+ 1743::Tower Defense - Fantasy Legends Tower Game
1745
+ 1744::Factory Engineer
1746
+ 1745::The Crow's Eye
1747
+ 1746::Running Sausage
1748
+ 1747::The Tenth Line
1749
+ 1748::Macbat 64
1750
+ 1749::Antagonist
1751
+ 1750::Kona
1752
+ 1751::Viktor, a Steampunk Adventure
1753
+ 1752::NieR:Automata™
1754
+ 1753::MyWorld - Action RPG Maker
1755
+ 1754::Cosmic Express
1756
+ 1755::Virtual Rides 3 - Funfair Simulator
1757
+ 1756::The Keep
1758
+ 1757::64.0
1759
+ 1758::Clone Drone in the Danger Zone
1760
+ 1759::Introvert Quest
1761
+ 1760::Future Unfolding
1762
+ 1761::Real Heroes: Firefighter
1763
+ 1762::Tacopocalypse
1764
+ 1763::Monster Monpiece
1765
+ 1764::aMAZE
1766
+ 1765::Nephise
1767
+ 1766::Arma: Cold War Assault Mac/Linux
1768
+ 1767::Chosen 2
1769
+ 1768::A Girls Fabric Face
1770
+ 1769::Another Adventure
1771
+ 1770::Blossom Tales: The Sleeping King
1772
+ 1771::Universe in Fire
1773
+ 1772::MAKE IT as an Artist
1774
+ 1773::Tower!3D Pro
1775
+ 1774::Faeria
1776
+ 1775::Tanki Online
1777
+ 1776::Turret Terminator
1778
+ 1777::Dad Quest
1779
+ 1778::Vive le Roi
1780
+ 1779::Shadows of Adam
1781
+ 1780::Magazime Editor
1782
+ 1781::CropDuster Supreme
1783
+ 1782::Herald: An Interactive Period Drama - Book I &amp; II
1784
+ 1783::8-Bit Armies: Arena (Free)
1785
+ 1784::The Wardrobe
1786
+ 1785::LOGistICAL
1787
+ 1786::9Dragons
1788
+ 1787::Dropzone
1789
+ 1788::Command Ops 2
1790
+ 1789::Oik
1791
+ 1790::Upside Down
1792
+ 1791::Dear Esther: Landmark Edition
1793
+ 1792::Semispheres
1794
+ 1793::MX Nitro
1795
+ 1794::8-Bit Armies: Arena
1796
+ 1795::Heavy Gear Assault
1797
+ 1796::WayOut 2: Hex
1798
+ 1797::Realpolitiks
1799
+ 1798::Cabals: Card Blitz
1800
+ 1799::The Wild Eight
1801
+ 1800::Blood Harvest
1802
+ 1801::OR
1803
+ 1802::Max's Big Bust - A Captain Nekorai Tale
1804
+ 1803::WWE 2K17
1805
+ 1804::Induction
1806
+ 1805::Gunmetal Arcadia
1807
+ 1806::Xenon Valkyrie
1808
+ 1807::Numberline
1809
+ 1808::ANIMALITY
1810
+ 1809::Alwa's Awakening
1811
+ 1810::Midas Gold Plus
1812
+ 1811::BIGFOOT
1813
+ 1812::Itineris
1814
+ 1813::Reptilians Must Die!
1815
+ 1814::HitBox
1816
+ 1815::Disgaea 2 PC / 魔界戦記ディスガイア2 PC
1817
+ 1816::Double Dragon IV
1818
+ 1817::Sakura Agent
1819
+ 1818::Miner Ultra Adventures
1820
+ 1819::A Normal Lost Phone
1821
+ 1820::Witanlore: Dreamtime
1822
+ 1821::Heroes Evolved
1823
+ 1822::Avorion
1824
+ 1823::Hellenica
1825
+ 1824::Once Upon an All Hallow's Eve
1826
+ 1825::Frequent Flyer
1827
+ 1826::Urban Empire
1828
+ 1827::Mortifero Motus
1829
+ 1828::M.E.R.C.
1830
+ 1829::Monumental Failure
1831
+ 1830::Pinkman
1832
+ 1831::Atomic 79
1833
+ 1832::Zup! 3
1834
+ 1833::Tales of Berseria™
1835
+ 1834::Don't Chat With Strangers
1836
+ 1835::Unalive
1837
+ 1836::UNO
1838
+ 1837::Word Killer: Zorgilonian Chronicles
1839
+ 1838::Let's Draw
1840
+ 1839::Score a goal (Physical football)
1841
+ 1840::Blind Love
1842
+ 1841::Behind the Memory
1843
+ 1842::Random Journey
1844
+ 1843::Where are my Internets?
1845
+ 1844::Rumble Fighter: Unleashed
1846
+ 1845::Volleyball Unbound - Pro Beach Volleyball
1847
+ 1846::Xmas Shooting - Scramble!!
1848
+ 1847::Castle Battles
1849
+ 1848::VERSUS: The Elite Trials
1850
+ 1849::Super Blue Boy Planet
1851
+ 1850::Bamboo EP
1852
+ 1851::Endless Fables: The Minotaur's Curse
1853
+ 1852::Defense Zone 3 Ultra HD
1854
+ 1853::The Little Acre
1855
+ 1854::Palinurus
1856
+ 1855::Disney Infinity 3.0: Gold Edition
1857
+ 1856::Prominence Poker
1858
+ 1857::LogicBots
1859
+ 1858::Triennale Game Collection
1860
+ 1859::Campfire: One of Us Is the Killer
1861
+ 1860::Indie Game Sim
1862
+ 1861::Galactic Fighter
1863
+ 1862::Hunger Dungeon
1864
+ 1863::Hade
1865
+ 1864::Coffee Shop Tycoon
1866
+ 1865::Void Pyramid
1867
+ 1866::Minion Masters
1868
+ 1867::Ahnayro: The Dream World
1869
+ 1868::Terminal Hacker
1870
+ 1869::Construct: Escape the System
1871
+ 1870::Charlie's Adventure
1872
+ 1871::Atom Fishing II
1873
+ 1872::Shattered Throne
1874
+ 1873::Quern - Undying Thoughts
1875
+ 1874::Mad Hunter
1876
+ 1875::Turret Architect
1877
+ 1876::Peak Angle: Drift Online
1878
+ 1877::Eon Altar
1879
+ 1878::Front Office Football Eight
1880
+ 1879::Star Trek: Starfleet Command Gold Edition
1881
+ 1880::Germ Wars
1882
+ 1881::Disastr_Blastr
1883
+ 1882::Alicemare
1884
+ 1883::DesertLand 2115
1885
+ 1884::Glass Masquerade
1886
+ 1885::拯救大魔王2 Rescue the Great Demon 2
1887
+ 1886::Book Series - Alice in Wonderland
1888
+ 1887::Kopanito All-Stars Soccer
1889
+ 1888::Dreamcage Escape
1890
+ 1889::Screeps
1891
+ 1890::Drunk On Nectar
1892
+ 1891::ENIGMA:
1893
+ 1892::Ittle Dew 2
1894
+ 1893::CLASH
1895
+ 1894::Nodiatis
1896
+ 1895::Multimirror
1897
+ 1896::Spellstone
1898
+ 1897::Resin
1899
+ 1898::Badiya
1900
+ 1899::One Tower
1901
+ 1900::CRACKHEAD
1902
+ 1901::Kokurase - Episode 1
1903
+ 1902::Fairyland: Fairy Power
1904
+ 1903::Motorsport Manager
1905
+ 1904::Notruf 112 | Emergency Call 112
1906
+ 1905::VOI
1907
+ 1906::One Night Stand
1908
+ 1907::Driver Booster 4 for Steam
1909
+ 1908::WayOut
1910
+ 1909::Dead Age
1911
+ 1910::Steam Dev Days
1912
+ 1911::Call of Duty®: Infinite Warfare
1913
+ 1912::Melon Simulator™
1914
+ 1913::Zero G Arena
1915
+ 1914::Pro Skater 2D
1916
+ 1915::Moto Racer 4
1917
+ 1916::Slap The Fly
1918
+ 1917::CameraBag Photo
1919
+ 1918::Creepy Castle
1920
+ 1919::Sombrero: Spaghetti Western Mayhem
1921
+ 1920::Blight of the Immortals
1922
+ 1921::VERSUS SQUAD
1923
+ 1922::Yomawari: Night Alone / 夜廻
1924
+ 1923::The Puppet Master
1925
+ 1924::Dessert Storm
1926
+ 1925::JumpBall
1927
+ 1926::Murder Mystery Adventure
1928
+ 1927::Cursed Castilla (Maldita Castilla EX)
1929
+ 1928::Astral Heroes
1930
+ 1929::Rusty Lake: Roots
1931
+ 1930::AdventureQuest 3D
1932
+ 1931::Lance A Lot®: Classic Edition
1933
+ 1932::Soulless: Ray Of Hope
1934
+ 1933::9th Dawn II
1935
+ 1934::Crush Crush
1936
+ 1935::NUKED KNIGHT
1937
+ 1936::Physic Monster
1938
+ 1937::Remaining in a dream
1939
+ 1938::CrossWorlds: Escape
1940
+ 1939::Razortron 2000
1941
+ 1940::Near Midnight
1942
+ 1941::Eventide 2: The Sorcerers Mirror
1943
+ 1942::Duke Nukem 3D: 20th Anniversary World Tour
1944
+ 1943::WRC 6 FIA World Rally Championship
1945
+ 1944::Breathing Fear
1946
+ 1945::Mafia III
1947
+ 1946::Doorways: Old Prototype
1948
+ 1947::Aragami
1949
+ 1948::Unclaimed World
1950
+ 1949::The Space Garden
1951
+ 1950::The Sun Will Rise
1952
+ 1951::Zombo Buster Rising
1953
+ 1952::Helmet Heroes
1954
+ 1953::Rogue Operatives Hide and Seek
1955
+ 1954::Masquerada: Songs and Shadows
1956
+ 1955::Rescue Bear Operation
1957
+ 1956::FourChords Guitar Karaoke
1958
+ 1957::Slayer Shock
1959
+ 1958::Merger 3D
1960
+ 1959::Princess Maker 2 Refine
1961
+ 1960::God of Word
1962
+ 1961::Trick &amp; Treat
1963
+ 1962::Make America Great Again: The Trump Presidency
1964
+ 1963::Stellar Tactics
1965
+ 1964::Dream Car Builder
1966
+ 1965::Talewind
1967
+ 1966::DROD: Gunthro and the Epic Blunder
1968
+ 1967::Tentacult!
1969
+ 1968::The Uncertain: Episode 1 - The Last Quiet Day
1970
+ 1969::Toadled
1971
+ 1970::Gun Bombers
1972
+ 1971::Pankapu
1973
+ 1972::Hospitalize
1974
+ 1973::Think To Die
1975
+ 1974::Fall of Civilization
1976
+ 1975::Squeezone
1977
+ 1976::Eclipse: New Dawn for the Galaxy
1978
+ 1977::Diaries of a Spaceport Janitor
1979
+ 1978::Make America Great Again
1980
+ 1979::Solitaire Royale
1981
+ 1980::Soccer Manager 2017
1982
+ 1981::Beautiful Japanese Scenery - Animated Jigsaws
1983
+ 1982::Dots eXtreme
1984
+ 1983::Carrie's Order Up!
1985
+ 1984::PAC-MAN™ CHAMPIONSHIP EDITION 2
1986
+ 1985::Mad Games Tycoon
1987
+ 1986::Monsti
1988
+ 1987::Otherland MMO
1989
+ 1988::Envy the Dead
1990
+ 1989::What The Box?
1991
+ 1990::Chess Knight 2
1992
+ 1991::Zombitatos the end of the Pc master race
1993
+ 1992::Pixel Puzzles Ultimate
1994
+ 1993::Pixelscape: Oceans
1995
+ 1994::STEINS;GATE
1996
+ 1995::Star Crusade CCG
1997
+ 1996::Nanoborg
1998
+ 1997::Dungeon Rushers: Crawler RPG
1999
+ 1998::Revolution 60
2000
+ 1999::The Tale of a Common Man
2001
+ 2000::Tadpole Treble
2002
+ 2001::Windscape
2003
+ 2002::Cashtronauts
2004
+ 2003::The Last Hope
2005
+ 2004::Destiny of Ancient Kingdoms™
2006
+ 2005::Reset 1-1
2007
+ 2006::Thorne - Son of Slaves (Ep.2)
2008
+ 2007::The Hive
2009
+ 2008::Master of Orion
2010
+ 2009::Virginia
2011
+ 2010::Dangerous Relationship
2012
+ 2011::Momento Temporis: Light from the Deep
2013
+ 2012::Picross Touch
2014
+ 2013::Ultimate Arena
2015
+ 2014::Hardware Engineers
2016
+ 2015::J.U.R : Japan Underground Racing
2017
+ 2016::Orbital X
2018
+ 2017::Combat Core
2019
+ 2018::Clean'Em Up
2020
+ 2019::Girl Amazon Survival
2021
+ 2020::No Man's Sky
2022
+ 2021::Siralim 2
2023
+ 2022::8-Bit Hordes
2024
+ 2023::Genius Greedy Mouse
2025
+ 2024::My Butler
2026
+ 2025::Meridian: Squad 22
2027
+ 2026::Kimulator : Fight for your destiny
2028
+ 2027::Through Abandoned 2. The Forest
2029
+ 2028::Handsome Mr. Frog
2030
+ 2029::Tricky Towers
2031
+ 2030::ABZU
2032
+ 2031::Rising Islands
2033
+ 2032::Near Death
2034
+ 2033::MANDAGON
2035
+ 2034::Hacker Evolution IMMERSION
2036
+ 2035::D.N.Age
2037
+ 2036::Blade Arcus from Shining: Battle Arena
2038
+ 2037::Barnyard Mahjong 3
2039
+ 2038::The Amazing Shinsengumi: Heroes in Love
2040
+ 2039::Riptide GP: Renegade
2041
+ 2040::Kingdom Rush Frontiers
2042
+ 2041::Ghost of a Tale
2043
+ 2042::Quadrilateral Cowboy
2044
+ 2043::SolForge
2045
+ 2044::SUPERFIGHT
2046
+ 2045::Zavix Tower
2047
+ 2046::Polarity
2048
+ 2047::Emerland Solitaire: Endless Journey
2049
+ 2048::Mahluk:Dark demon
2050
+ 2049::Mainlining
2051
+ 2050::Domain Defense
2052
+ 2051::Pirates of the Polygon Sea
2053
+ 2052::NECROPOLIS: BRUTAL EDITION
2054
+ 2053::Rot Gut
2055
+ 2054::Killbot
2056
+ 2055::Bot Vice
2057
+ 2056::ESEA
2058
+ 2057::Song of the Deep
2059
+ 2058::Moon Colonization Project
2060
+ 2059::Haven Moon
2061
+ 2060::Jonah's Path
2062
+ 2061::Star Rangers™ XE
2063
+ 2062::Dragonpath
2064
+ 2063::FreeHolder
2065
+ 2064::Megadimension Neptunia VII
2066
+ 2065::Killing Time at Lightspeed: Enhanced Edition
2067
+ 2066::Hero Zero
2068
+ 2067::Rugby Challenge 3
2069
+ 2068::Crush Your Enemies
2070
+ 2069::LUMBERMANCER
2071
+ 2070::Osozaki 遅咲き Late Blooming - First
2072
+ 2071::Potato Thriller
2073
+ 2072::Umbrella Corps™/Biohazard Umbrella Corps™
2074
+ 2073::Asemblance
2075
+ 2074::Ortus Regni
2076
+ 2075::Evil Maze
2077
+ 2076::Urban Pirate
2078
+ 2077::Space Run Galaxy
2079
+ 2078::AWA
2080
+ 2079::Envoy 2
2081
+ 2080::Awareness Rooms
2082
+ 2081::Transmissions: Element 120
2083
+ 2082::Polandball: Can into Space!
2084
+ 2083::Bastard Bonds
2085
+ 2084::Fantasy Kingdom Simulator
2086
+ 2085::Legends of Callasia
2087
+ 2086::The Hat Man: Shadow Ward
2088
+ 2087::One Small Fire At A Time
2089
+ 2088::Vroomist
2090
+ 2089::Choice of Alexandria
2091
+ 2090::WildStar
2092
+ 2091::Tick Tock Bang Bang
2093
+ 2092::Gloria Victis
2094
+ 2093::Emporea: Realms of War and Magic
2095
+ 2094::Legacy of the Elder Star
2096
+ 2095::VirtualHere For Steam Link
2097
+ 2096::WASTED
2098
+ 2097::The Orb Chambers™
2099
+ 2098::Sol Trader
2100
+ 2099::Evolution
2101
+ 2100::Pepe Porcupine
2102
+ 2101::Amulet of Dreams
2103
+ 2102::Dark Days
2104
+ 2103::Wanda - A Beautiful Apocalypse
2105
+ 2104::Hope Lake
2106
+ 2105::Ember Kaboom
2107
+ 2106::Age of Barbarian Extended Cut
2108
+ 2107::BloodGate
2109
+ 2108::Onechanbara Z2: Chaos
2110
+ 2109::Kabitis
2111
+ 2110::Brigador: Up-Armored Edition
2112
+ 2111::Grim Legends 3: The Dark City
2113
+ 2112::Artificial Defense
2114
+ 2113::Descent: Road to Legend
2115
+ 2114::SENRAN KAGURA SHINOVI VERSUS
2116
+ 2115::Beater Spirit
2117
+ 2116::Runeous: Part One
2118
+ 2117::The Concourse
2119
+ 2118::BlackShot: Mercenary Warfare FPS
2120
+ 2119::Dead Island Definitive Edition
2121
+ 2120::Tower!3D
2122
+ 2121::Overhell
2123
+ 2122::Zenodyne R
2124
+ 2123::ABC Coloring Town
2125
+ 2124::Neon Hardcorps
2126
+ 2125::Garlock Online
2127
+ 2126::Iron Impact
2128
+ 2127::Aurora Nights
2129
+ 2128::Seduce Me 2: The Demon War
2130
+ 2129::FleetCOMM
2131
+ 2130::Total War: WARHAMMER
2132
+ 2131::Shoppe Keep
2133
+ 2132::BATTLE PIXEL'S SURVIVAL GROUND
2134
+ 2133::Please Hold
2135
+ 2134::Melody's Escape
2136
+ 2135::ORCS
2137
+ 2136::The Dweller
2138
+ 2137::Linkrealms
2139
+ 2138::NotCoD™
2140
+ 2139::Koihime Enbu
2141
+ 2140::The Mims Beginning
2142
+ 2141::Remnants of a Beautiful Day
2143
+ 2142::The Abbey of Crime Extensum
2144
+ 2143::Space Codex
2145
+ 2144::EPΘCH
2146
+ 2145::Rogue System
2147
+ 2146::Demetrios - The BIG Cynical Adventure
2148
+ 2147::RefRain - prism memories -
2149
+ 2148::DOOM
2150
+ 2149::Murasaki
2151
+ 2150::Trawl
2152
+ 2151::Who's Your Daddy
2153
+ 2152::She Wants Me Dead
2154
+ 2153::Mini Golf Mundo
2155
+ 2154::TASTEE: Lethal Tactics
2156
+ 2155::My Secret Pets!
2157
+ 2156::Super Blue Fighter
2158
+ 2157::Ryzom
2159
+ 2158::Theory Test UK 2016/17 - Driving Test Success
2160
+ 2159::Istrolid
2161
+ 2160::Stellaris
2162
+ 2161::Star Saviors
2163
+ 2162::Worm.is: The Game
2164
+ 2163::Liveza: Death of the Earth
2165
+ 2164::Cyber City 2157: The Visual Novel
2166
+ 2165::I and Me
2167
+ 2166::Gun Rocket
2168
+ 2167::Aurora Dusk: Steam Age
2169
+ 2168::Initia: Elemental Arena
2170
+ 2169::Worlds Adrift Island Creator
2171
+ 2170::Puzzle Box
2172
+ 2171::Oil Enterprise
2173
+ 2172::Bloons TD Battles
2174
+ 2173::Megamagic: Wizards of the Neon Age
2175
+ 2174::Don't Starve Together
2176
+ 2175::Rogue Stormers
2177
+ 2176::Endciv
2178
+ 2177::OPUS: The Day We Found Earth
2179
+ 2178::Western Press
2180
+ 2179::My Night Job
2181
+ 2180::Plastic Playground
2182
+ 2181::Typefighters (Steam Edition)
2183
+ 2182::Tales Across Time
2184
+ 2183::Danganronpa 2: Goodbye Despair
2185
+ 2184::HEX: Shards of Fate
2186
+ 2185::Voxel Warfare Online
2187
+ 2186::IS Defense
2188
+ 2187::The Banner Saga 2
2189
+ 2188::Night Blights
2190
+ 2189::Goetia
2191
+ 2190::Pub Encounter
2192
+ 2191::Club Life
2193
+ 2192::Last Hope - Tower Defense
2194
+ 2193::Grand Designer
2195
+ 2194::DARK SOULS™ III
2196
+ 2195::StarsOne
2197
+ 2196::Tenrow
2198
+ 2197::The Hero Project: Redemption Season
2199
+ 2198::Game Tycoon 2
2200
+ 2199::Virtual Rogue
2201
+ 2200::Tower Unite
2202
+ 2201::Fortify
2203
+ 2202::MXGP2 - The Official Motocross Videogame
2204
+ 2203::Mushroom Wars
2205
+ 2204::A Blind Legend
2206
+ 2205::Axes and Acres
2207
+ 2206::Campus Notes - forget me not.
2208
+ 2207::Odd||Even
2209
+ 2208::Little Walker
2210
+ 2209::Outrage
2211
+ 2210::Project Starship
2212
+ 2211::Sorcery! Part 3
2213
+ 2212::Domino Sky
2214
+ 2213::Metaverse
2215
+ 2214::Until I Have You
2216
+ 2215::Womb Room
2217
+ 2216::Daughter of Shadows: An SCP Breach Event
2218
+ 2217::Epsilon corp.
2219
+ 2218::CHKN
2220
+ 2219::Run Rabbit Run
2221
+ 2220::Blue Sheep
2222
+ 2221::Trackday Manager
2223
+ 2222::Tokyo Babel
2224
+ 2223::Catch a Falling Star
2225
+ 2224::[the Sequence]
2226
+ 2225::Bunker Punks
2227
+ 2226::Epistory - Typing Chronicles
2228
+ 2227::Trial by Viking
2229
+ 2228::XenoShyft
2230
+ 2229::The Last Door: Season 2 - Collector's Edition
2231
+ 2230::R.B.I. Baseball 16
2232
+ 2231::Angry Video Game Nerd II: ASSimilation
2233
+ 2232::MiniGolf Mania
2234
+ 2233::Forestry 2017 - The Simulation
2235
+ 2234::I Am Caligula
2236
+ 2235::Holodrive
2237
+ 2236::Slain: Back from Hell
2238
+ 2237::A Lenda do Herói - O Herói desta Canção
2239
+ 2238::Airstrike HD
2240
+ 2239::NOBUNAGA'S AMBITION: Sphere of Influence - Ascension / 信長の野望・創造 戦国立志伝
2241
+ 2240::LostWinds
2242
+ 2241::Wolcen: Lords of Mayhem
2243
+ 2242::Disposable Heroes
2244
+ 2243::Heroes of The West
2245
+ 2244::Day of the Tentacle Remastered
2246
+ 2245::The Next Door
2247
+ 2246::Spaceman Sparkles 3
2248
+ 2247::Congo
2249
+ 2248::Helen's Mysterious Castle
2250
+ 2249::War Birds: WW2 Air strike 1942
2251
+ 2250::Square's Route
2252
+ 2251::Hyper Box
2253
+ 2252::Victory and Glory: Napoleon
2254
+ 2253::Hustle Cat
2255
+ 2254::World's Fastest Pizza
2256
+ 2255::Mars 2030
2257
+ 2256::Out There Somewhere
2258
+ 2257::SPATIAL SOUND CARD
2259
+ 2258::There's Poop In My Soup
2260
+ 2259::Beat Da Beat
2261
+ 2260::The Secret Order 3: Ancient Times
2262
+ 2261::Moon Hunters
2263
+ 2262::Gremlins, Inc.
2264
+ 2263::Rabiez: Epidemic
2265
+ 2264::Mind Games
2266
+ 2265::Paper Train Traffic
2267
+ 2266::Space Pilgrim Episode IV: Sol
2268
+ 2267::Mystica: The Ninth Society
2269
+ 2268::GTGD S3 How To Make A Game
2270
+ 2269::LiEat
2271
+ 2270::Perfect Universe - Play with Gravity
2272
+ 2271::UnReal World
2273
+ 2272::Stardew Valley
2274
+ 2273::Trulon: The Shadow Engine
2275
+ 2274::STAR WARS™ Galactic Battlegrounds Saga
2276
+ 2275::Deponia Doomsday
2277
+ 2276::STAR WARS™ Rebellion
2278
+ 2277::Space Impossible
2279
+ 2278::XBlaze Code: Embryo
2280
+ 2279::Collider
2281
+ 2280::Where's My Mommy?
2282
+ 2281::Bus Simulator 16
2283
+ 2282::Karaski: What Goes Up...
2284
+ 2283::Acorn Assault: Rodent Revolution
2285
+ 2284::Rock Paper Scissors Champion
2286
+ 2285::Party Jousting
2287
+ 2286::Master of Orion 2
2288
+ 2287::Heaven's Hope - Special Edition
2289
+ 2288::And So It Was
2290
+ 2289::Gnomoria
2291
+ 2290::Governor of Poker 3
2292
+ 2291::Steamroll
2293
+ 2292::Broken Dreams
2294
+ 2293::The Quest
2295
+ 2294::Magdalena
2296
+ 2295::ChromaGun
2297
+ 2296::Kindred Spirits on the Roof
2298
+ 2297::SHOWTIME 2073
2299
+ 2298::NARUTO SHIPPUDEN: Ultimate Ninja STORM 4
2300
+ 2299::Mystic Destinies: Serendipity of Aeons
2301
+ 2300::Wayward Terran Frontier: Zero Falls
2302
+ 2301::Lucent Heart
2303
+ 2302::XCOM® 2
2304
+ 2303::Moonlight
2305
+ 2304::The Political Machine 2016
2306
+ 2305::Curvatron
2307
+ 2306::Rusty Lake Hotel
2308
+ 2307::Sunny Hillride
2309
+ 2308::CALENDULA
2310
+ 2309::Bombshell
2311
+ 2310::DarkMaus
2312
+ 2311::Aozora Meikyuu
2313
+ 2312::Mystic Saga
2314
+ 2313::Narcissu 10th Anniversary Anthology Project
2315
+ 2314::Super Mustache
2316
+ 2315::PulseCharge
2317
+ 2316::AI: Rampage
2318
+ 2317::FullBlast
2319
+ 2318::Eventide: Slavic Fable
2320
+ 2319::Rabi-Ribi
2321
+ 2320::Swiftly
2322
+ 2321::Crashlands
2323
+ 2322::Lif
2324
+ 2323::Let's Sing 2016
2325
+ 2324::Move or Die
2326
+ 2325::Naval Action
2327
+ 2326::Close Order
2328
+ 2327::Subterrain
2329
+ 2328::Pythagoria
2330
+ 2329::Warlords Battlecry III
2331
+ 2330::Between Me and The Night
2332
+ 2331::O3DX
2333
+ 2332::Rivalry
2334
+ 2333::Slybots: Frantic Zone
2335
+ 2334::Cyber Team Manager
2336
+ 2335::The Bug Butcher
2337
+ 2336::Resident Evil 0 / biohazard 0 HD REMASTER
2338
+ 2337::Darkest Dungeon®
2339
+ 2338::Protoshift
2340
+ 2339::Echoes of Aetheria
2341
+ 2340::Highrise Heroes: Word Challenge
2342
+ 2341::Evertown
2343
+ 2342::Fairy Tale Mysteries: The Puppet Thief
2344
+ 2343::Blood Code
2345
+ 2344::sZone-Online
2346
+ 2345::Linea, the Game
2347
+ 2346::A Wild Catgirl Appears!
2348
+ 2347::The Hurricane of the Varstray -Collateral hazard-
2349
+ 2348::Turok
2350
+ 2349::Lightbender
2351
+ 2350::Moonshot
2352
+ 2351::Puzzle Strike
2353
+ 2352::Dance of Death
2354
+ 2353::Gnomes Garden
2355
+ 2354::Swords and Sorcery - Underworld - Definitive Edition
2356
+ 2355::Monsterland
2357
+ 2356::Star Nomad 2
2358
+ 2357::Squad
2359
+ 2358::Project Pulsation
2360
+ 2359::Metal War Online: Retribution
2361
+ 2360::Stories of Bethem: Full Moon
2362
+ 2361::PewDiePie: Legend of the Brofist
2363
+ 2362::LIGHTNING RETURNS™: FINAL FANTASY® XIII
2364
+ 2363::Vehicle Simulator
2365
+ 2364::Romance of the Three Kingdoms Maker / 三国志ツクール
2366
+ 2365::Comic Book Hero: The Greatest Cape
2367
+ 2366::The Mean Greens - Plastic Warfare
2368
+ 2367::Manyland
2369
+ 2368::Nuclear Throne
2370
+ 2369::SquareCells
2371
+ 2370::Our Love Will Grow
2372
+ 2371::Dr. Langeskov, The Tiger, and The Terribly Cursed Emerald: A Whirlwind Heist
2373
+ 2372::Vampire Legends: The True Story of Kisilova
2374
+ 2373::Militia
2375
+ 2374::Legacy of Dorn: Herald of Oblivion
2376
+ 2375::Ruzar - The Life Stone
2377
+ 2376::Corgi Warlock
2378
+ 2377::The Last Dream: Developer's Edition
2379
+ 2378::Hit Tank PRO
2380
+ 2379::Mayjasmine Episode01 - What is God?
2381
+ 2380::Handball 16
2382
+ 2381::Yet Another World
2383
+ 2382::Might &amp; Magic Heroes Online
2384
+ 2383::Dead Acres
2385
+ 2384::Mighty Switch Force! Academy
2386
+ 2385::Idle Civilization
2387
+ 2386::Zero Punctuation: Hatfall - Hatters Gonna Hat Edition
2388
+ 2387::Switch Galaxy Ultra
2389
+ 2388::Vendetta - Curse of Raven's Cry
2390
+ 2389::Terraformer Expedition to Mars
2391
+ 2390::Nusakana
2392
+ 2391::The Consuming Shadow
2393
+ 2392::Hard West
2394
+ 2393::Rescue Team 5
2395
+ 2394::Dungeon Kingdom: Sign of the Moon
2396
+ 2395::Cross Set
2397
+ 2396::GOCCO OF WAR
2398
+ 2397::Back to Dinosaur Island
2399
+ 2398::Survivor Squad: Gauntlets
2400
+ 2399::Shadow Ninja: Apocalypse
2401
+ 2400::Knight Squad
2402
+ 2401::Trouble In The Manor
2403
+ 2402::Affected Zone Tactics
2404
+ 2403::World War II GI
2405
+ 2404::Last Heroes
2406
+ 2405::Steel Ocean
2407
+ 2406::STEEL STRIDER
2408
+ 2407::rFactor 2
2409
+ 2408::Legend (1994)
2410
+ 2409::Piercing Blow
2411
+ 2410::Impossible Creatures Steam Edition
2412
+ 2411::Fallout 4
2413
+ 2412::Steam Controller
2414
+ 2413::Defend The Highlands
2415
+ 2414::The Odyssey: Winds of Athena
2416
+ 2415::Without Within 2
2417
+ 2416::Diabolical
2418
+ 2417::HotLead
2419
+ 2418::Ironclads 2: American Civil War
2420
+ 2419::Conflicks - Revolutionary Space Battles
2421
+ 2420::The Secret Order 2: Masked Intent
2422
+ 2421::ePic Character Generator
2423
+ 2422::Darksiders II Deathinitive Edition
2424
+ 2423::Wave of Darkness
2425
+ 2424::Zero Reflex : Black Eye Edition
2426
+ 2425::Battle Battalions
2427
+ 2426::Dragon Fin Soup
2428
+ 2427::PONCHO
2429
+ 2428::Mirrored - Chapter 1
2430
+ 2429::Fallout: A Post Nuclear Role Playing Game
2431
+ 2430::Sonic Lost World
2432
+ 2431::Bit Shifter
2433
+ 2432::The Extinction
2434
+ 2433::The Seven Years War (1756-1763)
2435
+ 2434::Pray For Diamonds
2436
+ 2435::Devil's Bluff
2437
+ 2436::Morphine
2438
+ 2437::Deathless: The City's Thirst
2439
+ 2438::ALPAGES : THE FIVE BOOKS
2440
+ 2439::Warhammer: End Times - Vermintide
2441
+ 2440::RPG Maker MV
2442
+ 2441::200% Mixed Juice!
2443
+ 2442::Flight of the Paladin
2444
+ 2443::Heavy Fire: Shattered Spear
2445
+ 2444::Mu Complex
2446
+ 2445::Chaos Reborn
2447
+ 2446::System Shock: Enhanced Edition
2448
+ 2447::Wurm Unlimited
2449
+ 2448::PixelJunk™ Shooter Ultimate
2450
+ 2449::Eternal Step
2451
+ 2450::Rogue State
2452
+ 2451::The Jackbox Party Pack 2
2453
+ 2452::Minecraft: Story Mode - A Telltale Games Series
2454
+ 2453::Deus Ex: Revision
2455
+ 2454::Bedlam
2456
+ 2455::Beyond Sol
2457
+ 2456::Soccer Manager 2016
2458
+ 2457::March of Industry: Very Capitalist Factory Simulator Entertainments
2459
+ 2458::There Was A Caveman
2460
+ 2459::2064: Read Only Memories
2461
+ 2460::SKYHILL
2462
+ 2461::Korwin The Game
2463
+ 2462::Meridian: Age of Invention
2464
+ 2463::Arcana Heart 3 LOVE MAX!!!!!
2465
+ 2464::Stigmat
2466
+ 2465::Ninja Pizza Girl
2467
+ 2466::Lumber Island - That Special Place
2468
+ 2467::Puzzle Ball
2469
+ 2468::ARM PLANETARY PROSPECTORS Asteroid Resource Mining
2470
+ 2469::Black &amp; White Bushido
2471
+ 2470::Hylics
2472
+ 2471::Ratings War
2473
+ 2472::Silver Creek Falls: Chapter 2
2474
+ 2473::The Escapists: The Walking Dead
2475
+ 2474::NBA 2K16
2476
+ 2475::Blowy Fish
2477
+ 2476::Shoppy Mart: Steam Edition
2478
+ 2477::Shapes of Gray
2479
+ 2478::Kanji Training Game
2480
+ 2479::Golden Rush
2481
+ 2480::A Kiss For The Petals - Remembering How We Met
2482
+ 2481::Stairs
2483
+ 2482::Tic-Toc-Tower
2484
+ 2483::Orion: A Sci-Fi Visual Novel
2485
+ 2484::Keen Dreams
2486
+ 2485::Tango Fiesta
2487
+ 2486::Explosionade
2488
+ 2487::Invasion
2489
+ 2488::TankZone Battle
2490
+ 2489::Destiny Warriors RPG
2491
+ 2490::The Juicer
2492
+ 2491::Dungeon Nightmares II : The Memory
2493
+ 2492::Future Farmer
2494
+ 2493::Fate Tectonics
2495
+ 2494::Alien Robot Monsters
2496
+ 2495::Warhammer 40,000: Regicide
2497
+ 2496::Shower With Your Dad Simulator 2015: Do You Still Shower With Your Dad
2498
+ 2497::Act of Aggression - Reboot Edition
2499
+ 2498::Streamline
2500
+ 2499::Kitchen Simulator 2015
2501
+ 2500::Raiden IV: OverKill
2502
+ 2501::Cute Things Dying Violently
2503
+ 2502::Snik
2504
+ 2503::A Wise Use of Time
2505
+ 2504::Rampage Knights
2506
+ 2505::Zoo Empire
2507
+ 2506::Wars and Warriors: Joan of Arc
2508
+ 2507::Almightree: The Last Dreamer
2509
+ 2508::Rememoried
2510
+ 2509::Tinboy
2511
+ 2510::One More Line
2512
+ 2511::Port of Call
2513
+ 2512::Proto Raider
2514
+ 2513::Redemption: Eternal Quest
2515
+ 2514::Clown House (Palyaço Evi)
2516
+ 2515::I Shall Remain
2517
+ 2516::Grandia® II Anniversary Edition
2518
+ 2517::Flywrench
2519
+ 2518::Pretty Girls Mahjong Solitaire
2520
+ 2519::Owys
2521
+ 2520::Evoland 2
2522
+ 2521::Party Hard
2523
+ 2522::Hegemony III: Clash of the Ancients
2524
+ 2523::Crookz - The Big Heist
2525
+ 2524::WARMODE
2526
+ 2525::Age of Survival
2527
+ 2526::Pump-Action Captain
2528
+ 2527::Curse of the Crescent Isle DX
2529
+ 2528::In Between
2530
+ 2529::Forget Me Not: My Organic Garden
2531
+ 2530::ORBITOR
2532
+ 2531::Super Mega Baseball: Extra Innings
2533
+ 2532::VERSUS: The Lost Ones
2534
+ 2533::Zanzarah: The Hidden Portal
2535
+ 2534::ABD: A Beautiful Day
2536
+ 2535::Volume
2537
+ 2536::RPG MO
2538
+ 2537::Fingered
2539
+ 2538::Illuminascii
2540
+ 2539::Nightside
2541
+ 2540::Fitz the Fox
2542
+ 2541::Spider: Rite of the Shrouded Moon
2543
+ 2542::Shu's Garden
2544
+ 2543::Shadow of Kingdoms
2545
+ 2544::12 Labours of Hercules III: Girl Power
2546
+ 2545::Putrefaction
2547
+ 2546::Hidden: On the trail of the Ancients
2548
+ 2547::Dream
2549
+ 2548::Job the Leprechaun
2550
+ 2549::Go Home - Rage incoming
2551
+ 2550::The Deletion
2552
+ 2551::Stick 'Em Up 2: Paper Adventures
2553
+ 2552::Legend of Kay Anniversary
2554
+ 2553::Hogs of War
2555
+ 2554::Chronicon
2556
+ 2555::Spooky's Jump Scare Mansion
2557
+ 2556::Another Star
2558
+ 2557::Inferno 2
2559
+ 2558::The Viceroy
2560
+ 2559::Airport Madness 4
2561
+ 2560::Borderless Gaming
2562
+ 2561::Way of the Samurai 4
2563
+ 2562::FEIST
2564
+ 2563::Zenzizenzic
2565
+ 2564::Terra Nova: Strike Force Centauri
2566
+ 2565::Time Clickers
2567
+ 2566::RETSNOM
2568
+ 2567::Energy Balance
2569
+ 2568::Stranded In Time
2570
+ 2569::Niko: Through The Dream
2571
+ 2570::Executive Assault
2572
+ 2571::RPG Maker 2000
2573
+ 2572::001 Game Creator
2574
+ 2573::Fighties
2575
+ 2574::Lost Lands: The Four Horsemen
2576
+ 2575::Steve Chong Finds Out That Suicide is a Bad Idea
2577
+ 2576::MechaNika
2578
+ 2577::SEEP Universe
2579
+ 2578::TeraBlaster
2580
+ 2579::How to Take Off Your Mask
2581
+ 2580::Scraps: Modular Vehicle Combat
2582
+ 2581::Cricket Captain 2015
2583
+ 2582::Super Cyborg
2584
+ 2583::Clandestinity of Elsie
2585
+ 2584::Gary Grigsby's War in the East
2586
+ 2585::Cosmophony
2587
+ 2586::Trove
2588
+ 2587::The Red Solstice
2589
+ 2588::Weapons Genius
2590
+ 2589::Crash Dive
2591
+ 2590::Austin High
2592
+ 2591::Gaming In Color
2593
+ 2592::Corrosion: Cold Winter Waiting [Enhanced Edition]
2594
+ 2593::Quiplash
2595
+ 2594::RONIN
2596
+ 2595::Vector Thrust
2597
+ 2596::Unpossible
2598
+ 2597::Vapour
2599
+ 2598::Devilry
2600
+ 2599::Yargis - Space Melee
2601
+ 2600::Sumo Revise
2602
+ 2601::Kings of Kung Fu
2603
+ 2602::Super 3-D Noah's Ark
2604
+ 2603::Centauri Sector
2605
+ 2604::Medieval: Total War™ - Collection
2606
+ 2605::SHOGUN: Total War™ - Collection
2607
+ 2606::Interstellar Rift
2608
+ 2607::Devil May Cry® 4 Special Edition
2609
+ 2608::Masterspace
2610
+ 2609::Shot In The Dark
2611
+ 2610::Monster Jam Battlegrounds
2612
+ 2611::Sherlock Holmes Consulting Detective: The Case of the Mummy's Curse
2613
+ 2612::TransPlan
2614
+ 2613::Out of Reach
2615
+ 2614::Tap Heroes
2616
+ 2615::Wimp: Who Stole My Pants?
2617
+ 2616::12 Labours of Hercules II: The Cretan Bull
2618
+ 2617::XIIZEAL
2619
+ 2618::16bit Trader
2620
+ 2619::Butsbal
2621
+ 2620::SimpleRockets
2622
+ 2621::New kind of adventure
2623
+ 2622::Cave Coaster
2624
+ 2623::Organ Biker
2625
+ 2624::Flix The Flea
2626
+ 2625::Avenging Angel
2627
+ 2626::3D MiniGolf
2628
+ 2627::Horizon Shift
2629
+ 2628::Star Horizon
2630
+ 2629::Subject 13
2631
+ 2630::Locoland
2632
+ 2631::Chip's Challenge 1
2633
+ 2632::Chip's Challenge 2
2634
+ 2633::Portal of Evil: Stolen Runes Collector's Edition
2635
+ 2634::Spaceforce Rogue Universe HD
2636
+ 2635::GUILTY GEAR XX ACCENT CORE PLUS R
2637
+ 2636::Stones of Sorrow
2638
+ 2637::Magnetic: Cage Closed
2639
+ 2638::Quadrant
2640
+ 2639::Zombie Zoeds
2641
+ 2640::Dev Guy
2642
+ 2641::Crossfire: Dungeons
2643
+ 2642::Sunset
2644
+ 2643::Bunker - The Underground Game
2645
+ 2644::GameGuru
2646
+ 2645::Making History: The Calm and the Storm Gold Edition
2647
+ 2646::S.K.I.L.L. - Special Force 2 (Shooter)
2648
+ 2647::D Series OFF ROAD Driving Simulation
2649
+ 2648::Breezeblox
2650
+ 2649::CrossCode
2651
+ 2650::Broadsword : Age of Chivalry
2652
+ 2651::Grey Cubes
2653
+ 2652::Higurashi When They Cry Hou - Ch.1 Onikakushi
2654
+ 2653::Batla
2655
+ 2654::Close Combat - Panthers in the Fog
2656
+ 2655::Quell Memento
2657
+ 2656::Invisible, Inc.
2658
+ 2657::LOST ORBIT
2659
+ 2658::Despair
2660
+ 2659::Airport Firefighters - The Simulation
2661
+ 2660::Why Am I Dead At Sea
2662
+ 2661::FINAL FANTASY IV: THE AFTER YEARS
2663
+ 2662::Toren
2664
+ 2663::Shrooms
2665
+ 2664::Selfie : Sisters of the Amniotic Lens
2666
+ 2665::Yomi
2667
+ 2666::Star Trek™ : 25th Anniversary
2668
+ 2667::Snakebird
2669
+ 2668::Controller Companion
2670
+ 2669::Alexia Crow and the Cave of Heroes
2671
+ 2670::Egg Returns Home
2672
+ 2671::GemCraft - Chasing Shadows
2673
+ 2672::Exanima
2674
+ 2673::The Weaponographist
2675
+ 2674::Lux Delux
2676
+ 2675::Order of Battle: World War II
2677
+ 2676::Solarix
2678
+ 2677::Abyss Raiders: Uncharted
2679
+ 2678::Soul Locus
2680
+ 2679::R.B.I. Baseball 15
2681
+ 2680::Jump/Boxer
2682
+ 2681::Earthtongue
2683
+ 2682::The Little Crane That Could
2684
+ 2683::Kerbal Space Program
2685
+ 2684::Son of Nor
2686
+ 2685::Angry Video Game Nerd: The Movie
2687
+ 2686::STAR WARS™ - X-Wing Special Edition
2688
+ 2687::STAR WARS™ X-Wing vs TIE Fighter - Balance of Power Campaigns™
2689
+ 2688::Uncanny Valley
2690
+ 2689::Kaiju-A-GoGo
2691
+ 2690::Assassin’s Creed® Chronicles: China
2692
+ 2691::GameLoading: Rise of the Indies
2693
+ 2692::MX vs. ATV Unleashed
2694
+ 2693::Asguaard
2695
+ 2694::Grand Theft Auto V
2696
+ 2695::Mortal Kombat X
2697
+ 2696::BoxesWithGuns
2698
+ 2697::Escape Machines
2699
+ 2698::The Emptiness Deluxe Edition
2700
+ 2699::SWR JST DX Selective Memory Erase Effect
2701
+ 2700::Finders
2702
+ 2701::I am Bread
2703
+ 2702::Square Heroes
2704
+ 2703::Primal Carnage: Extinction
2705
+ 2704::Solar System Conflict
2706
+ 2705::VoidExpanse
2707
+ 2706::Chronicles of Teddy
2708
+ 2707::Titan Souls
2709
+ 2708::Out There: Ω Edition
2710
+ 2709::Motivational Growth
2711
+ 2710::Pillars of Eternity
2712
+ 2711::Cultures - Northland
2713
+ 2712::Please, Don’t Touch Anything
2714
+ 2713::Cyberpunk 3776
2715
+ 2714::It came from space, and ate our brains
2716
+ 2715::Othello
2717
+ 2716::World of Mixed Martial Arts 3
2718
+ 2717::VRC PRO
2719
+ 2718::An Alternative Reality: The Football Manager Documentary
2720
+ 2719::Immune - True Survival
2721
+ 2720::Top Trumps Turbo
2722
+ 2721::Vox Populi Vox Dei 2
2723
+ 2722::Tennis Elbow 2013
2724
+ 2723::Chicken Invaders 5
2725
+ 2724::Ghost Encounters: Deadwood - Collector's Edition
2726
+ 2725::Hektor
2727
+ 2726::Dodge
2728
+ 2727::Soccertron
2729
+ 2728::Dreaming Sarah
2730
+ 2729::Hotline Miami 2: Wrong Number
2731
+ 2730::Ori and the Blind Forest
2732
+ 2731::ShellShock Live
2733
+ 2732::Dreamscapes: Nightmare's Heir - Premium Edition
2734
+ 2733::After Reset RPG
2735
+ 2734::ASA: A Space Adventure - Remastered Edition
2736
+ 2735::Ubinota
2737
+ 2736::Pregnancy
2738
+ 2737::rFactor
2739
+ 2738::Savage Lands
2740
+ 2739::Synonymy
2741
+ 2740::Dustoff Heli Rescue
2742
+ 2741::Stay Alight
2743
+ 2742::Runestone Keeper
2744
+ 2743::Pneuma: Breath of Life
2745
+ 2744::The Dark Stone from Mebara
2746
+ 2745::Bard to the Future
2747
+ 2746::Tales of the Orient: The Rising Sun
2748
+ 2747::Belladonna
2749
+ 2748::Cubicle Quest
2750
+ 2749::A Druid's Duel
2751
+ 2750::Republique
2752
+ 2751::Disney•Pixar Finding Nemo
2753
+ 2752::Mystery of Neuschwanstein
2754
+ 2753::HassleHeart
2755
+ 2754::BlastZone 2
2756
+ 2755::Camera Obscura
2757
+ 2756::You Are Not A Banana: Better Edition
2758
+ 2757::The Book of Unwritten Tales 2
2759
+ 2758::Raptor: Call of The Shadows - 2015 Edition
2760
+ 2759::The Escapists
2761
+ 2760::Reload
2762
+ 2761::Plush
2763
+ 2762::Wooden Floor
2764
+ 2763::Total War: ATTILA
2765
+ 2764::Stock Car Extreme
2766
+ 2765::TerraTech
2767
+ 2766::Goats on a Bridge
2768
+ 2767::Terra Lander
2769
+ 2768::Mystery Masters: Psycho Train Deluxe Edition
2770
+ 2769::Just Get Through
2771
+ 2770::SickBrick
2772
+ 2771::Steam Heroes
2773
+ 2772::eden*
2774
+ 2773::Exodus Wars: Fractured Empire
2775
+ 2774::Deathtrap
2776
+ 2775::Front Office Football Seven
2777
+ 2776::Seven Kingdoms 2 HD
2778
+ 2777::Natural Soccer
2779
+ 2778::Infect and Destroy
2780
+ 2779::Supreme League of Patriots
2781
+ 2780::Life is Strange - Episode 1
2782
+ 2781::Drive to Hell
2783
+ 2782::Stardust Vanguards
2784
+ 2783::Streets of Chaos
2785
+ 2784::Heroes® of Might &amp; Magic® III - HD Edition
2786
+ 2785::Gravity Ghost
2787
+ 2786::The Uninvited: MacVenture Series
2788
+ 2787::The Old Tree
2789
+ 2788::Shadowgate: MacVenture Series
2790
+ 2789::868-HACK
2791
+ 2790::8BitMMO
2792
+ 2791::Pahelika: Secret Legends
2793
+ 2792::Phantom Breaker: Battle Grounds
2794
+ 2793::Dark Gates
2795
+ 2794::Lost in a Forest
2796
+ 2795::Kingdoms CCG
2797
+ 2796::Decisive Campaigns: The Blitzkrieg from Warsaw to Paris
2798
+ 2797::Mimpi
2799
+ 2798::Combat Monsters
2800
+ 2799::Citizens of Earth
2801
+ 2800::Disillusions Manga Horror
2802
+ 2801::MotorSport Revolution
2803
+ 2802::Double Dragon Trilogy
2804
+ 2803::HyperRogue
2805
+ 2804::Cahors Sunset
2806
+ 2805::Unhack
2807
+ 2806::BattleSpace
2808
+ 2807::Sky Gamblers: Storm Raiders
2809
+ 2808::Cubot
2810
+ 2809::dUpLicity ~Beyond the Lies~
2811
+ 2810::Towers of Altrac - Epic Defense Battles
2812
+ 2811::Ninja Guy
2813
+ 2812::Solar War
2814
+ 2813::One Late Night: Deadline
2815
+ 2814::Dwarf Tower
2816
+ 2815::Nameless ~The one thing you must recall~
2817
+ 2816::Xsyon - Prelude
2818
+ 2817::Rooster Teeth vs. Zombiens
2819
+ 2818::Sportsfriends
2820
+ 2819::Chronicles of a Dark Lord: Episode 1 Tides of Fate Complete
2821
+ 2820::The Repopulation
2822
+ 2821::Outcast 1.1
2823
+ 2822::Microsoft Flight Simulator X: Steam Edition
2824
+ 2823::Rime Berta
2825
+ 2824::Marvin's Mittens
2826
+ 2825::THE KING OF FIGHTERS '98 ULTIMATE MATCH FINAL EDITION
2827
+ 2826::Sky Mercenaries
2828
+ 2827::Last Inua
2829
+ 2828::MODO indie
2830
+ 2829::It's A Wipe!
2831
+ 2830::BlazBlue: Continuum Shift Extend
2832
+ 2831::About Love, Hate and the other ones
2833
+ 2832::Words for Evil
2834
+ 2833::ALLTYNEX Second
2835
+ 2834::Cargo 3
2836
+ 2835::Carmageddon TDR 2000
2837
+ 2836::Club Manager 2015
2838
+ 2837::Bombing Bastards
2839
+ 2838::fault - milestone one
2840
+ 2839::Magnifico
2841
+ 2840::1Quest
2842
+ 2841::Potatoman Seeks the Troof
2843
+ 2842::The Old City: Leviathan
2844
+ 2843::Rising World
2845
+ 2844::Tales from the Borderlands
2846
+ 2845::Feel-A-Maze
2847
+ 2846::Ilamentia
2848
+ 2847::Akaneiro: Demon Hunters
2849
+ 2848::The Jackbox Party Pack
2850
+ 2849::Eternal Winter
2851
+ 2850::Rollers of the Realm
2852
+ 2851::Cinemaware Anthology: 1986-1991
2853
+ 2852::The Sun and Moon
2854
+ 2853::Bloons TD 5
2855
+ 2854::Basketball Pro Management 2015
2856
+ 2855::Cherry Tree High I! My! Girls!
2857
+ 2856::Robotex
2858
+ 2857::Spriter Pro
2859
+ 2858::Assassin's Creed® Unity
2860
+ 2859::World of Subways 1 – The Path
2861
+ 2860::Koya Rift
2862
+ 2861::The Detail
2863
+ 2862::Lords Of The Fallen™
2864
+ 2863::Majestic Nights
2865
+ 2864::Buzz Aldrin's Space Program Manager
2866
+ 2865::NS2: Combat
2867
+ 2866::Depth
2868
+ 2867::Skara - The Blade Remains
2869
+ 2868::Hardland
2870
+ 2869::Squishy the Suicidal Pig
2871
+ 2870::Pajama Sam: Games to Play on Any Day
2872
+ 2871::Fatty Bear's Birthday Surprise
2873
+ 2872::Slave Zero
2874
+ 2873::Ziggurat
2875
+ 2874::Data Hacker: Corruption
2876
+ 2875::Door Kickers
2877
+ 2876::DarkEnd
2878
+ 2877::Supreme Ruler Ultimate
2879
+ 2878::May’s Mysteries: The Secret of Dragonville
2880
+ 2879::Borderlands: The Pre-Sequel
2881
+ 2880::Last Knight: Rogue Rider Edition
2882
+ 2881::Disney Epic Mickey 2: The Power of Two
2883
+ 2882::Disney•Pixar Cars 2: The Video Game
2884
+ 2883::Disney•Pixar Brave: The Video Game
2885
+ 2884::A Golden Wake
2886
+ 2885::Sid Meier's Colonization (Classic)
2887
+ 2886::Schein
2888
+ 2887::Boot Hill Heroes
2889
+ 2888::ShaderTool
2890
+ 2889::Split/Second
2891
+ 2890::Disney TRON: Evolution
2892
+ 2891::Gauntlet™ Slayer Edition
2893
+ 2892::Schrödinger’s Cat And The Raiders Of The Lost Quark
2894
+ 2893::TransOcean: The Shipping Company
2895
+ 2894::911: First Responders®
2896
+ 2895::Roadside Assistance Simulator
2897
+ 2896::Kraven Manor
2898
+ 2897::Dandelion - Wishes brought to you -
2899
+ 2898::Command: Modern Air / Naval Operations WOTY
2900
+ 2899::Outland
2901
+ 2900::Shadow Puppeteer
2902
+ 2901::The Stalin Subway
2903
+ 2902::openCanvas 6
2904
+ 2903::Boson X
2905
+ 2904::Season Match
2906
+ 2905::Bridge Constructor Medieval
2907
+ 2906::Voyage: Journey to the Moon
2908
+ 2907::Runers
2909
+ 2908::RECYCLE
2910
+ 2909::Train Fever
2911
+ 2910::Anarchy Arcade
2912
+ 2911::Dead Rising 3 Apocalypse Edition
2913
+ 2912::Ford Street Racing
2914
+ 2913::Ford Racing 3
2915
+ 2914::Ford Racing Off Road
2916
+ 2915::Hack 'n' Slash
2917
+ 2916::The Guild Gold Edition
2918
+ 2917::Transport Giant
2919
+ 2918::Toast Time
2920
+ 2919::FATE: The Traitor Soul
2921
+ 2920::The Great War 1918
2922
+ 2921::Bravada
2923
+ 2922::Sentris
2924
+ 2923::Mountain
2925
+ 2924::Haunted
2926
+ 2925::ACE - Arena: Cyber Evolution
2927
+ 2926::Dungeons: The Eye of Draconus
2928
+ 2927::Risen 3 - Titan Lords
2929
+ 2928::Chess 2: The Sequel
2930
+ 2929::Depth Hunter 2: Deep Dive
2931
+ 2930::Cubic Castles
2932
+ 2931::Ultra Street Fighter® IV
2933
+ 2932::Heroes of a Broken Land
2934
+ 2933::From the Depths
2935
+ 2934::Rex Rocket
2936
+ 2935::Space Hack
2937
+ 2936::Back to Bed
2938
+ 2937::Pure Pool
2939
+ 2938::Blackbay Asylum
2940
+ 2939::Guns and Robots
2941
+ 2940::Industry Empire
2942
+ 2941::Colin McRae Rally
2943
+ 2942::Spud's Quest
2944
+ 2943::Unrest
2945
+ 2944::Gods Will Be Watching
2946
+ 2945::Frayed Knights: The Skull of S'makh-Daon
2947
+ 2946::Terrorhedron Tower Defense
2948
+ 2947::UnEpic
2949
+ 2948::Fall of the New Age Premium Edition
2950
+ 2949::Murder Miners
2951
+ 2950::Total Pro Golf 3
2952
+ 2951::Small Town Terrors Pilgrim's Hook Collector's Edition
2953
+ 2952::Blood: One Unit Whole Blood
2954
+ 2953::Blood II: The Chosen + Expansion
2955
+ 2954::Quest for Infamy
2956
+ 2955::Stained
2957
+ 2956::Cyto
2958
+ 2957::Isomer
2959
+ 2958::Robot Rescue Revolution
2960
+ 2959::Alpha Zylon
2961
+ 2960::Wayward Manor
2962
+ 2961::Aperture Tag: The Paint Gun Testing Initiative
2963
+ 2962::Ground Pounders
2964
+ 2963::Shantae: Risky's Revenge - Director's Cut
2965
+ 2964::Wild Warfare
2966
+ 2965::Whispering Willows
2967
+ 2966::AutoTileGen
2968
+ 2967::Ship Simulator: Maritime Search and Rescue
2969
+ 2968::The House
2970
+ 2969::Railroad Pioneer
2971
+ 2970::Bridge Constructor Playground
2972
+ 2971::Soldiers: Heroes of World War II
2973
+ 2972::Shattered Planet
2974
+ 2973::School Bus Fun
2975
+ 2974::Aeon Command
2976
+ 2975::Asteria
2977
+ 2976::Aura Kingdom
2978
+ 2977::Battleplan: American Civil War
2979
+ 2978::Z
2980
+ 2979::Sniper Elite 3
2981
+ 2980::Heroes Rise: The Prodigy
2982
+ 2981::Pulstar
2983
+ 2982::Crimsonland
2984
+ 2983::Heileen 2: The Hands Of Fate
2985
+ 2984::Ichi
2986
+ 2985::Tex Murphy: Overseer
2987
+ 2986::SPINTIRES™
2988
+ 2987::Space Run
2989
+ 2988::The Incredible Adventures of Van Helsing II
2990
+ 2989::Dungeon of Elements
2991
+ 2990::Probability 0
2992
+ 2991::The Entente Gold
2993
+ 2992::Floating Point
2994
+ 2993::The Lost Crown
2995
+ 2994::Haunt the House: Terrortown
2996
+ 2995::Racer 8
2997
+ 2996::Putt-Putt®: Pep's Birthday Surprise
2998
+ 2997::Pajama Sam's Sock Works
2999
+ 2998::Pajama Sam's Lost &amp; Found
3000
+ 2999::Ionball 2: Ionstorm
3001
+ 3000::Freespace 2
3002
+ 3001::Crimzon Clover WORLD IGNITION
3003
+ 3002::Dead Bits
3004
+ 3003::Pandora: First Contact
3005
+ 3004::Manhunter
3006
+ 3005::Astebreed: Definitive Edition
3007
+ 3006::Pixel Boy and the Ever Expanding Dungeon
3008
+ 3007::Salammbô: Battle for Carthage
3009
+ 3008::Lost Marbles
3010
+ 3009::Battlepaths
3011
+ 3010::Legends of Persia
3012
+ 3011::1001 Spikes
3013
+ 3012::Defiance
3014
+ 3013::Dark Raid
3015
+ 3014::Noir Syndrome
3016
+ 3015::Metal Planet
3017
+ 3016::Splatter - Zombie Apocalypse
3018
+ 3017::Meltdown
3019
+ 3018::Pajama Sam 4: Life Is Rough When You Lose Your Stuff!
3020
+ 3019::Putt-Putt® Enters the Race
3021
+ 3020::Game Character Hub
3022
+ 3021::ibb &amp; obb
3023
+ 3022::FINAL FANTASY III
3024
+ 3023::Antisquad
3025
+ 3024::Super Game Jam
3026
+ 3025::Panzer Tactics HD
3027
+ 3026::Ballad of Solar
3028
+ 3027::Monochroma
3029
+ 3028::G-Ball
3030
+ 3029::Arcadecraft
3031
+ 3030::Wolfenstein: The New Order
3032
+ 3031::Q.U.B.E: Director's Cut
3033
+ 3032::World of Guns: Gun Disassembly
3034
+ 3033::Firefighters 2014
3035
+ 3034::Always Sometimes Monsters
3036
+ 3035::Panzer Elite Action Gold Edition
3037
+ 3036::Jumpdrive
3038
+ 3037::DYNASTY WARRIORS 8: Xtreme Legends Complete Edition / 真・三國無双7 with 猛将伝
3039
+ 3038::Ascendant
3040
+ 3039::Empress Of The Deep
3041
+ 3040::Memento Mori 2
3042
+ 3041::Iron Storm
3043
+ 3042::Empress Of The Deep 2: Song Of The Blue Whale
3044
+ 3043::RUNNING WITH RIFLES
3045
+ 3044::1849
3046
+ 3045::Terminal Velocity
3047
+ 3046::Secret Agent
3048
+ 3047::Action! - Gameplay Recording and Streaming
3049
+ 3048::Tesla Effect: A Tex Murphy Adventure
3050
+ 3049::The Impossible Game
3051
+ 3050::Malevolence: The Sword of Ahkranox
3052
+ 3051::8BitBoy™
3053
+ 3052::GunZ 2: The Second Duel
3054
+ 3053::Haegemonia: Legions of Iron
3055
+ 3054::Putt-Putt® and Pep's Dog on a Stick
3056
+ 3055::Spy Fox 2 "Some Assembly Required"
3057
+ 3056::Pajama Sam 2: Thunder and Lightning Aren't So Frightening
3058
+ 3057::Colonies Online
3059
+ 3058::Farming World
3060
+ 3059::The Dungeoning
3061
+ 3060::Eurofighter Typhoon
3062
+ 3061::Death Rally (Classic)
3063
+ 3062::Millennium - A New Hope
3064
+ 3063::Battlepillars Gold Edition
3065
+ 3064::Slip
3066
+ 3065::Escape The Museum
3067
+ 3066::Evopollution
3068
+ 3067::Always Remember Me
3069
+ 3068::Millie
3070
+ 3069::The Last Federation
3071
+ 3070::Thinking with Time Machine
3072
+ 3071::Sentinel
3073
+ 3072::FRACT OSC
3074
+ 3073::Starlight Inception™™
3075
+ 3074::Life Goes On: Done to Death
3076
+ 3075::Wargame: Red Dragon
3077
+ 3076::NEStalgia
3078
+ 3077::Dark Lore Mysteries: The Hunt For Truth
3079
+ 3078::Avoid - Sensory Overload
3080
+ 3079::Strike Suit Zero: Director's Cut
3081
+ 3080::LEGO® The Hobbit™
3082
+ 3081::Aggression: Europe Under Fire
3083
+ 3082::Haunted Past: Realm of Ghosts
3084
+ 3083::Mirror Mysteries 2
3085
+ 3084::Warlock 2: The Exiled
3086
+ 3085::Princess Isabella - Return of the Curse
3087
+ 3086::ReignMaker
3088
+ 3087::White Haven Mysteries
3089
+ 3088::Spy Fox in "Dry Cereal"
3090
+ 3089::Praetorians
3091
+ 3090::Adventure Time: Finn and Jake's Epic Quest
3092
+ 3091::Collapse
3093
+ 3092::Castlevania: Lords of Shadow – Mirror of Fate HD
3094
+ 3093::Creeper World 3: Arc Eternal
3095
+ 3094::TUG
3096
+ 3095::Rogue's Tale
3097
+ 3096::Real Horror Stories Ultimate Edition
3098
+ 3097::Batman™: Arkham Origins Blackgate - Deluxe Edition
3099
+ 3098::Gray Matter
3100
+ 3099::3 Stars of Destiny
3101
+ 3100::Hive
3102
+ 3101::Strategic War in Europe
3103
+ 3102::YAIBA: NINJA GAIDEN Z
3104
+ 3103::Ubersoldier II
3105
+ 3104::Explodemon
3106
+ 3105::Call of Duty®: Ghosts
3107
+ 3106::Desert Gunner
3108
+ 3107::Desert Thunder
3109
+ 3108::US and THEM
3110
+ 3109::Revolution Ace
3111
+ 3110::10 Second Ninja
3112
+ 3111::Incoming Forces
3113
+ 3112::City of Steam: Arkadia
3114
+ 3113::Return to Mysterious Island 2
3115
+ 3114::Infested Planet
3116
+ 3115::Corporate Lifestyle Simulator
3117
+ 3116::Dragons and Titans
3118
+ 3117::Soulbringer
3119
+ 3118::The Book of Legends
3120
+ 3119::Post Master
3121
+ 3120::Riptide GP2
3122
+ 3121::Race To Mars
3123
+ 3122::Mitsurugi Kamui Hikae
3124
+ 3123::Procyon
3125
+ 3124::Where Angels Cry
3126
+ 3125::Iron Soul
3127
+ 3126::Chuck's Challenge 3D
3128
+ 3127::One Finger Death Punch
3129
+ 3128::Descent 2
3130
+ 3129::Humanity Asset
3131
+ 3130::Holy Avatar vs. Maidens of the Dead
3132
+ 3131::Ninja Cats vs Samurai Dogs
3133
+ 3132::Rambo The Video Game: Baker Team
3134
+ 3133::Go! Go! Nippon! ~My First Trip to Japan~
3135
+ 3134::Major Mayhem
3136
+ 3135::Journal
3137
+ 3136::WazHack
3138
+ 3137::Drox Operative
3139
+ 3138::Masters of the World - Geopolitical Simulator 3
3140
+ 3139::Super Killer Hornet: Resurrection
3141
+ 3140::Dead Man's Draw
3142
+ 3141::The LEGO® Movie - Videogame
3143
+ 3142::Aveyond 3-1: Lord of Twilight
3144
+ 3143::Graviteam Tactics: Operation Star
3145
+ 3144:://N.P.P.D. RUSH//- The milk of Ultraviolet
3146
+ 3145::Let's Sing
3147
+ 3146::MorphVOX Pro - Voice Changer
3148
+ 3147::Loadout
3149
+ 3148::Glacier 3: The Meltdown
3150
+ 3149::Might &amp; Magic X - Legacy
3151
+ 3150::KAMI
3152
+ 3151::Original War
3153
+ 3152::Strike Vector
3154
+ 3153::Broken Age
3155
+ 3154::KickBeat Steam Edition
3156
+ 3155::Hitman: Contracts
3157
+ 3156::Blackguards
3158
+ 3157::Steam Bandits: Outpost
3159
+ 3158::Loren The Amazon Princess
3160
+ 3159::METAL GEAR RISING: REVENGEANCE
3161
+ 3160::Realms of Arkania 1 - Blade of Destiny Classic
3162
+ 3161::3089 -- Futuristic Action RPG
3163
+ 3162::Kingdom Rush
3164
+ 3163::Unearthed: Trail of Ibn Battuta - Episode 1 - Gold Edition
3165
+ 3164::Grimm
3166
+ 3165::Dementium II HD
3167
+ 3166::The Walking Dead: Season 2
3168
+ 3167::Zigfrak
3169
+ 3168::RPG Maker XP
3170
+ 3169::Stick it to The Man!
3171
+ 3170::Blockland
3172
+ 3171::Just Cause 2: Multiplayer Mod
3173
+ 3172::Samurai Gunn
3174
+ 3173::Small World 2
3175
+ 3174::OMSI 2: Steam Edition
3176
+ 3175::MURI
3177
+ 3176::Nimble Quest
3178
+ 3177::The Novelist
3179
+ 3178::Speedball 2 HD
3180
+ 3179::FINAL FANTASY VIII
3181
+ 3180::Draw a Stickman: EPIC
3182
+ 3181::Postmortem: One Must Die (Extended Cut)
3183
+ 3182::Vector
3184
+ 3183::Half-Life: Before
3185
+ 3184::Tank Operations: European Campaign
3186
+ 3185::Gas Guzzlers Extreme
3187
+ 3186::Blood Knights
3188
+ 3187::Earth 2140
3189
+ 3188::Injustice: Gods Among Us Ultimate Edition
3190
+ 3189::Intake
3191
+ 3190::Type:Rider
3192
+ 3191::Anomaly Warzone Earth Mobile Campaign
3193
+ 3192::Valdis Story: Abyssal City
3194
+ 3193::Magicka: Wizards of the Square Tablet
3195
+ 3194::Sniper Elite: Nazi Zombie Army 2
3196
+ 3195::Heli Heroes
3197
+ 3196::Journey of a Roach
3198
+ 3197::The 7th Guest
3199
+ 3198::Stronghold HD
3200
+ 3199::FORCED: Slightly Better Edition
3201
+ 3200::Dragon's Lair 2: Time Warp
3202
+ 3201::Demonicon
3203
+ 3202::NARUTO SHIPPUDEN: Ultimate Ninja STORM 3 Full Burst HD
3204
+ 3203::WRC 4 FIA World Rally Championship
3205
+ 3204::Democracy 3
3206
+ 3205::Talisman: Prologue
3207
+ 3206::Urban Chaos
3208
+ 3207::Megabyte Punch
3209
+ 3208::Day One : Garry's Incident
3210
+ 3209::Dysfunctional Systems: Learning to Manage Chaos
3211
+ 3210::Giana Sisters: Twisted Dreams - Rise of the Owlverlord
3212
+ 3211::Gorky 17
3213
+ 3212::KnightShift
3214
+ 3213::Legends of Aethereus
3215
+ 3214::Daikatana
3216
+ 3215::Salvation Prophecy
3217
+ 3216::Air Conflicts: Vietnam
3218
+ 3217::Silent Storm Gold Edition
3219
+ 3218::PlayClaw 5 - Game Recording and Streaming
3220
+ 3219::Artemis Spaceship Bridge Simulator
3221
+ 3220::Memoria
3222
+ 3221::Brothers - A Tale of Two Sons
3223
+ 3222::Hate Plus
3224
+ 3223::Tom Clancy’s Splinter Cell Blacklist
3225
+ 3224::Skullgirls
3226
+ 3225::Spelunky
3227
+ 3226::Guncraft
3228
+ 3227::Leisure Suit Larry in the Land of the Lounge Lizards: Reloaded
3229
+ 3228::Storm
3230
+ 3229::The Apogee Throwback Pack
3231
+ 3230::Interstellar Marines
3232
+ 3231::Risk
3233
+ 3232::The Night of the Rabbit
3234
+ 3233::Cubetractor
3235
+ 3234::The Swapper
3236
+ 3235::The Incredible Adventures of Van Helsing
3237
+ 3236::Dust: An Elysian Tail
3238
+ 3237::Super Puzzle Platformer Deluxe
3239
+ 3238::Gunpoint
3240
+ 3239::Super Sanctum TD
3241
+ 3240::Anomaly 2
3242
+ 3241::Jack Lumber
3243
+ 3242::Far Cry 3 - Blood Dragon
3244
+ 3243::Dyad
3245
+ 3244::Darkfall Unholy Wars
3246
+ 3245::ORION: Prelude
3247
+ 3246::Sacred Citadel
3248
+ 3247::Age of Empires II HD
3249
+ 3248::Dungeon Hearts
3250
+ 3249::DisplayFusion
3251
+ 3250::Cities in Motion 2
3252
+ 3251::DLC Quest
3253
+ 3252::Shattered Haven
3254
+ 3253::Angry Birds Space
3255
+ 3254::Go Home Dinosaurs!
3256
+ 3255::The Showdown Effect
3257
+ 3256::Arma II: DayZ Mod
3258
+ 3257::The Banner Saga: Factions
3259
+ 3258::Brutal Legend
3260
+ 3259::Impire
3261
+ 3260::Construct 2
3262
+ 3261::Proteus
3263
+ 3262::Sonic &amp; All-Stars Racing Transformed
3264
+ 3263::Omerta - City of Gangsters
3265
+ 3264::10,000,000
3266
+ 3265::BIT.TRIP VOID
3267
+ 3266::Ace of Spades: Battle Builder
3268
+ 3267::GTR 2 FIA GT Racing Game
3269
+ 3268::Party of Sin
3270
+ 3269::Far Cry 3
3271
+ 3270::LEGO The Lord of the Rings
3272
+ 3271::Tomb Raider V: Chronicles
3273
+ 3272::Little Inferno
3274
+ 3273::Sonic Adventure 2
3275
+ 3274::Scribblenauts Unlimited
3276
+ 3275::Cherry Tree High Comedy Club
3277
+ 3276::Natural Selection 2
3278
+ 3277::Pid
3279
+ 3278::iBomber Attack
3280
+ 3279::Conquest of Elysium 3
3281
+ 3280::Deadlight
3282
+ 3281::Viking: Battle for Asgard
3283
+ 3282::Zombie Driver HD
3284
+ 3283::Aerofly FS 1 Flight Simulator
3285
+ 3284::Mark of the Ninja
3286
+ 3285::Of Orcs And Men
3287
+ 3286::R.A.W. Realms of Ancient War
3288
+ 3287::Blood Bowl: Chaos Edition
3289
+ 3288::McPixel
3290
+ 3289::Planets Under Attack
3291
+ 3290::Call of Duty: Black Ops - Mac Edition
3292
+ 3291::Torchlight II
3293
+ 3292::Air Conflicts: Pacific Carriers
3294
+ 3293::Continent of the Ninth Seal
3295
+ 3294::Tryst
3296
+ 3295::The Basement Collection
3297
+ 3296::Spirits
3298
+ 3297::I Am Alive
3299
+ 3298::DARK SOULS™: Prepare To Die™ Edition
3300
+ 3299::Great Big War Game
3301
+ 3300::Super Crate Box
3302
+ 3301::Galactic Civilizations® I: Ultimate Edition
3303
+ 3302::Cladun X2 / クラシックダンジョンX2
3304
+ 3303::Check vs Mate
3305
+ 3304::Thirty Flights of Loving
3306
+ 3305::Symphony
3307
+ 3306::Deponia
3308
+ 3307::Unmechanical
3309
+ 3308::eXceed - Gun Bullet Children
3310
+ 3309::Inversion™
3311
+ 3310::Wanderlust: Rebirth
3312
+ 3311::Orcs Must Die! 2
3313
+ 3312::Prototype 2
3314
+ 3313::Dungeonbowl - Knockout Edition
3315
+ 3314::Endless Space® - Collection
3316
+ 3315::Adventures of Shuggy
3317
+ 3316::Fray: Reloaded Edition
3318
+ 3317::Game of Thrones
3319
+ 3318::Iron Front: Digital War Edition
3320
+ 3319::Conflict Desert Storm™
3321
+ 3320::Magical Diary: Horse Hall
3322
+ 3321::Thief™ Gold
3323
+ 3322::Botanicula
3324
+ 3323::Avernum 6
3325
+ 3324::Avernum 4
3326
+ 3325::Blades of Time
3327
+ 3326::Sherlock Holmes and The Hound of The Baskervilles
3328
+ 3327::Lone Survivor: The Director's Cut
3329
+ 3328::The Walking Dead
3330
+ 3329::A Valley Without Wind
3331
+ 3330::Warlock - Master of the Arcane
3332
+ 3331::Avernum: Escape From the Pit
3333
+ 3332::Superbrothers: Sword &amp; Sworcery EP
3334
+ 3333::The Witcher 2: Assassins of Kings Enhanced Edition
3335
+ 3334::Deep Black: Reloaded
3336
+ 3335::Super MNC
3337
+ 3336::All Zombies Must Die!: Scorepocalypse
3338
+ 3337::Stronghold Kingdoms
3339
+ 3338::Blackwell Deception
3340
+ 3339::Blackwell Convergence
3341
+ 3340::Kingdoms of Amalur: Reckoning™
3342
+ 3341::Sonic CD
3343
+ 3342::Unstoppable Gorg
3344
+ 3343::Crusader Kings Complete
3345
+ 3344::Insane 2
3346
+ 3345::SOL: Exodus
3347
+ 3346::Age of Empires® III: Complete Collection
3348
+ 3347::LEGO Harry Potter: Years 5-7
3349
+ 3348::Choplifter HD
3350
+ 3349::Sideway™ New York
3351
+ 3350::Renegade Ops
3352
+ 3351::Worms Pinball
3353
+ 3352::Worms Blast
3354
+ 3353::Steel Storm: Burning Retribution
3355
+ 3354::Might &amp; Magic: Clash of Heroes
3356
+ 3355::Dungeons - The Dark Lord
3357
+ 3356::Hitogata Happa
3358
+ 3357::The Clockwork Man: The Hidden World
3359
+ 3358::A Game of Thrones - Genesis
3360
+ 3359::F1 2011
3361
+ 3360::Bastion
3362
+ 3361::From Dust
3363
+ 3362::Avadon: The Black Fortress
3364
+ 3363::Legend of Fae
3365
+ 3364::8-Bit Commando
3366
+ 3365::Chantelise - A Tale of Two Sisters
3367
+ 3366::E.Y.E: Divine Cybermancy
3368
+ 3367::Kohan: Ahriman's Gift
3369
+ 3368::Cossacks: European Wars
3370
+ 3369::American Conquest: Fight Back
3371
+ 3370::American Conquest
3372
+ 3371::Cossacks II: Napoleonic Wars
3373
+ 3372::Cossacks II: Battle for Europe
3374
+ 3373::Achron
3375
+ 3374::Gatling Gears
3376
+ 3375::Demolition Company Gold Edition
3377
+ 3376::Trauma
3378
+ 3377::ARMA: Gold Edition
3379
+ 3378::Trapped Dead
3380
+ 3379::Operation Flashpoint: Red River
3381
+ 3380::Vertex Dispenser
3382
+ 3381::Elizabeth Find M.D. - Diagnosis Mystery - Season 2
3383
+ 3382::Zombie Pirates
3384
+ 3383::The Next BIG Thing
3385
+ 3384::Cargo! The Quest for Gravity
3386
+ 3385::Hydrophobia: Prophecy
3387
+ 3386::Portal 2 - The Final Hours
3388
+ 3387::Syberia
3389
+ 3388::NightSky
3390
+ 3389::SEGA Bass Fishing
3391
+ 3390::Atom Zombie Smasher
3392
+ 3391::Post Apocalyptic Mayhem
3393
+ 3392::Revenge of the Titans
3394
+ 3393::LEGO® Star Wars™ III - The Clone Wars™
3395
+ 3394::Shift 2 Unleashed
3396
+ 3395::Dungeon Siege
3397
+ 3396::Agricultural Simulator 2011: Extended Edition
3398
+ 3397::Men of War: Assault Squad
3399
+ 3398::Painkiller Redemption
3400
+ 3399::Magicka
3401
+ 3400::Blue Toad Murder Files™: The Mysteries of Little Riddle
3402
+ 3401::EVE Online
3403
+ 3402::Dive to the Titanic
3404
+ 3403::The UnderGarden
3405
+ 3404::Alien Breed 3: Descent
3406
+ 3405::MX vs. ATV Reflex
3407
+ 3406::Blood Bowl® Legendary Edition
3408
+ 3407::Winter Voices
3409
+ 3408::NyxQuest: Kindred Spirits
3410
+ 3409::Lost Horizon
3411
+ 3410::Aura: Fate of the Ages
3412
+ 3411::Lugaru HD
3413
+ 3412::Diamond Dan
3414
+ 3413::Power of Defense
3415
+ 3414::The Guild II
3416
+ 3415::Theatre of War
3417
+ 3416::Theatre of War 2: Kursk 1943
3418
+ 3417::Day of Defeat: Source
3419
+ 3418::Blacklight: Tango Down
3420
+ 3419::Future Wars
3421
+ 3420::Chaser
3422
+ 3421::Singularity™
3423
+ 3422::F.E.A.R.
3424
+ 3423::Gridrunner Revolution
3425
+ 3424::Dawn of Discovery™: Venice
3426
+ 3425::Arsenal of Democracy: A Hearts of Iron Game
3427
+ 3426::Silent Hunter 5®: Battle of the Atlantic
3428
+ 3427::Shatter
3429
+ 3428::Dragon Age™: Origins Awakening
3430
+ 3429::Command &amp; Conquer 4: Tiberian Twilight
3431
+ 3430::Dark Fall: Lost Souls
3432
+ 3431::Hazen: The Dark Whispers
3433
+ 3432::Ironclads: American Civil War
3434
+ 3433::Booster Trooper
3435
+ 3434::Nancy Drew®: Warnings at Waverly Academy
3436
+ 3435::Aliens versus Predator Classic 2000
3437
+ 3436::Aliens vs. Predator™
3438
+ 3437::Gyromancer
3439
+ 3438::Rogue Warrior
3440
+ 3439::King's Bounty: Armored Princess
3441
+ 3440::Broken Sword 3 - the Sleeping Dragon
3442
+ 3441::Broken Sword 2 - the Smoking Mirror: Remastered
3443
+ 3442::Altitude
3444
+ 3443::9th Company: Roots Of Terror
3445
+ 3444::Samorost 2
3446
+ 3445::Nancy Drew®: The Haunted Carousel
3447
+ 3446::Sacraboar
3448
+ 3447::Nancy Drew®: Danger on Deception Island
3449
+ 3448::Borderlands
3450
+ 3449::Big Brain Wolf
3451
+ 3450::Lucidity™
3452
+ 3451::Red Faction Guerrilla Steam Edition
3453
+ 3452::Zuma's Revenge!
3454
+ 3453::STAR WARS™ - Dark Forces
3455
+ 3454::STAR WARS™ Jedi Knight - Mysteries of the Sith™
3456
+ 3455::Twin Sector
3457
+ 3456::MDK
3458
+ 3457::AaAaAA!!! - A Reckless Disregard for Gravity
3459
+ 3458::MDK 2
3460
+ 3459::Nation Red
3461
+ 3460::Red Faction
3462
+ 3461::Red Faction II
3463
+ 3462::Batman: Arkham Asylum Game of the Year Edition
3464
+ 3463::Cooking Dash®
3465
+ 3464::Chocolatier®: Decadence by Design™
3466
+ 3465::East India Company
3467
+ 3466::Sniper Elite
3468
+ 3467::Bad Rats: the Rats' Revenge
3469
+ 3468::The Dig®
3470
+ 3469::Supreme Ruler 2020 Gold
3471
+ 3470::Delta Force: Task Force Dagger
3472
+ 3471::Tachyon: The Fringe
3473
+ 3472::Delta Force
3474
+ 3473::Delta Force — Black Hawk Down: Team Sabre
3475
+ 3474::Arma 2
3476
+ 3475::Trine Enchanted Edition
3477
+ 3476::Overlord II
3478
+ 3477::Lunnye Devitsy
3479
+ 3478::F-16 Multirole Fighter
3480
+ 3479::Crayon Physics Deluxe
3481
+ 3480::Alien Shooter: Revisited
3482
+ 3481::Zombie Shooter
3483
+ 3482::Restaurant Empire II
3484
+ 3483::Mightier
3485
+ 3484::Dark Sector
3486
+ 3485::The Path
3487
+ 3486::COIL
3488
+ 3487::Caster
3489
+ 3488::Eternal Silence
3490
+ 3489::Eschalon: Book I
3491
+ 3490::Spectromancer
3492
+ 3491::Hospital Tycoon
3493
+ 3492::Tom Clancy's Splinter Cell Double Agent®
3494
+ 3493::Driver® Parallel Lines
3495
+ 3494::Warhammer 40,000: Dawn of War II
3496
+ 3495::Puzzle Quest: Galactrix
3497
+ 3496::SlamIt Pinball Big Score
3498
+ 3497::Tom Clancy's EndWar™
3499
+ 3498::Grand Ages: Rome
3500
+ 3499::Monster Trucks Nitro
3501
+ 3500::Europa Universalis III Complete
3502
+ 3501::Exodus from the Earth
3503
+ 3502::Starscape
3504
+ 3503::Precipice of Darkness, Episode Two
3505
+ 3504::7 Wonders: Treasures of Seven
3506
+ 3505::Left 4 Dead
3507
+ 3506::Prince of Persia: Warrior Within™
3508
+ 3507::Prince of Persia®: The Sands of Time
3509
+ 3508::Prince of Persia®
3510
+ 3509::Need for Speed Undercover
3511
+ 3510::Mass Effect
3512
+ 3511::SPORE™
3513
+ 3512::Imperium Romanum Gold Edition
3514
+ 3513::INSURGENCY: Modern Infantry Combat
3515
+ 3514::Age of Chivalry
3516
+ 3515::D.I.P.R.I.P. Warm Up
3517
+ 3516::Zombie Panic! Source
3518
+ 3517::Insecticide Part 1
3519
+ 3518::Sacred Gold
3520
+ 3519::Tom Clancy's Ghost Recon® Desert Siege™
3521
+ 3520::Tom Clancy's Ghost Recon®
3522
+ 3521::Silverfall: Earth Awakening
3523
+ 3522::Assassin's Creed™: Director's Cut Edition
3524
+ 3523::Trackmania United Forever Star Edition
3525
+ 3524::Virtual Villagers: A New Home
3526
+ 3525::Cold Fear™
3527
+ 3526::Tom Clancy's Rainbow Six Lockdown™
3528
+ 3527::Precipice of Darkness, Episode One
3529
+ 3528::Warhammer® 40,000: Dawn of War® - Soulstorm
3530
+ 3529::Safecracker: The Ultimate Puzzle Adventure
3531
+ 3530::Full Spectrum Warrior: Ten Hammers
3532
+ 3531::ThreadSpace: Hyperbol
3533
+ 3532::Full Spectrum Warrior
3534
+ 3533::DOOM 3
3535
+ 3534::Quake III Arena
3536
+ 3535::QUAKE II Mission Pack: Ground Zero
3537
+ 3536::HeXen: Deathkings of the Dark Citadel
3538
+ 3537::QUAKE III: Team Arena
3539
+ 3538::Painkiller Overdose
3540
+ 3539::Call of Juarez™
3541
+ 3540::Call of Duty® 4: Modern Warfare®
3542
+ 3541::Amazing Adventures The Lost Tomb™
3543
+ 3542::Heretic: Shadow of the Serpent Riders
3544
+ 3543::Sam &amp; Max 105: Reality 2.0
3545
+ 3544::Geometry Wars: Retro Evolved
3546
+ 3545::Just Cause
3547
+ 3546::Hitman: Codename 47
3548
+ 3547::Commandos 2: Men of Courage
3549
+ 3548::Thief: Deadly Shadows
3550
+ 3549::Deus Ex: Game of the Year Edition
3551
+ 3550::Jade Empire™: Special Edition
3552
+ 3551::Peggle Deluxe
3553
+ 3552::Silverfall
3554
+ 3553::Runaway, The Dream of The Turtle
3555
+ 3554::GUN™
3556
+ 3555::Call of Duty: United Offensive
3557
+ 3556::Sid Meier's Civilization® III Complete
3558
+ 3557::Shattered Union
3559
+ 3558::Medieval II: Total War™
3560
+ 3559::Heroes of Annihilated Empires
3561
+ 3560::Global Adventures
3562
+ 3561::Six Days of Snow
3563
+ 3562::Bombernauts
3564
+ 3563::Bloodworks
3565
+ 3564::Sid Meier's Pirates!
3566
+ 3565::Half-Life 2: Lost Coast
3567
+ 3566::Red Orchestra: Ostfront 41-45
3568
+ 3567::Space Empires IV Deluxe
3569
+ 3568::Earth 2160
3570
+ 3569::Half-Life Deathmatch: Source
3571
+ 3570::SiN Episodes: Emergence
3572
+ 3571::Typer Shark! Deluxe
3573
+ 3572::Bejeweled Deluxe
3574
+ 3573::Dynomite Deluxe
3575
+ 3574::AstroPop Deluxe
3576
+ 3575::Zuma Deluxe
3577
+ 3576::Insaniquarium Deluxe
3578
+ 3577::PLAYERUNKN4WN: Zombie
3579
+ 3578::Gothic 1
3580
+ 3579::Geneforge 1
3581
+ 3580::Counter-Strike: Condition Zero
data/ref/steam/test.ipynb ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stdout",
10
+ "output_type": "stream",
11
+ "text": [
12
+ " seq next len_seq \\\n",
13
+ "0 [3442, 3448, 3451, 97, 180, 78, 3300, 152, 340... 3462 10 \n",
14
+ "1 [3448, 3451, 97, 180, 78, 3300, 152, 3401, 185... 3354 10 \n",
15
+ "2 [3451, 97, 180, 78, 3300, 152, 3401, 185, 3462... 274 10 \n",
16
+ "3 [97, 180, 78, 3300, 152, 3401, 185, 3462, 3354... 3276 10 \n",
17
+ "4 [180, 78, 3300, 152, 3401, 185, 3462, 3354, 27... 3225 10 \n",
18
+ "... ... ... ... \n",
19
+ "151051 [129, 2411, 1033, 204, 498, 3242, 3505, 3224, ... 1734 10 \n",
20
+ "151052 [2411, 1033, 204, 498, 3242, 3505, 3224, 2524,... 1945 10 \n",
21
+ "151053 [1033, 204, 498, 3242, 3505, 3224, 2524, 1099,... 1134 10 \n",
22
+ "151054 [3431, 2808, 3214, 3284, 3001, 800, 2636, 3581... 2636 7 \n",
23
+ "151055 [14, 58, 3510, 15, 68, 86, 3238, 2411, 1134, 2... 1073 10 \n",
24
+ "\n",
25
+ " review \\\n",
26
+ "0 [Maybe a niche thing, but it has solid gamepla... \n",
27
+ "1 [Borderlands at its core is spectacular: the g... \n",
28
+ "2 [You get to be the hero of the people WHILE bl... \n",
29
+ "3 [The game is far from perfect, I could point o... \n",
30
+ "4 [Very instructive', \"There's a ton of stuff wr... \n",
31
+ "... ... \n",
32
+ "151051 [This game is the best when I was bored playin... \n",
33
+ "151052 [I don't even know where to begin on how good ... \n",
34
+ "151053 [BrainBread 2 has really fun single player bec... \n",
35
+ "151054 [Starting the game, you will go into an area w... \n",
36
+ "151055 [Think the original Doom but with more awesome... \n",
37
+ "\n",
38
+ " next_review \n",
39
+ "0 I played to the end, I feel obligated to recom... \n",
40
+ "1 I was pretty shocked by how much I liked this ... \n",
41
+ "2 words cant explain how good this game is GET I... \n",
42
+ "3 best game i ever bought a rts with actual play... \n",
43
+ "4 Extremely punishing game, but every death is p... \n",
44
+ "... ... \n",
45
+ "151051 I had this game since it first came out \n",
46
+ "151052 give me a reason to actually hate white people. \n",
47
+ "151053 Heroes and Generals is a really fun game but r... \n",
48
+ "151054 The game is fun it's a cool concept, and I lik... \n",
49
+ "151055 It's Wasted Potential but still a pretty good ... \n",
50
+ "\n",
51
+ "[151056 rows x 5 columns]\n"
52
+ ]
53
+ }
54
+ ],
55
+ "source": [
56
+ "import pandas as pd\n",
57
+ "\n",
58
+ "# 读取pickle文件\n",
59
+ "df = pd.read_pickle(\"./train_data.df\")\n",
60
+ "\n",
61
+ "# 打印数据框\n",
62
+ "print(df)"
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "code",
67
+ "execution_count": 3,
68
+ "metadata": {},
69
+ "outputs": [
70
+ {
71
+ "name": "stdout",
72
+ "output_type": "stream",
73
+ "text": [
74
+ " seq next len_seq \\\n",
75
+ "0 [1221, 899, 490, 1985, 255, 3058, 3119, 2930, ... 1417 10 \n",
76
+ "1 [3448, 855, 2710, 6, 2694, 3581, 3581, 3581, 3... 2694 5 \n",
77
+ "2 [3255, 3399, 3330, 3500, 2729, 1335, 68, 1866,... 642 10 \n",
78
+ "3 [273, 3462, 2656, 2033, 3333, 3284, 2110, 470,... 470 8 \n",
79
+ "4 [3284, 1134, 2992, 727, 3442, 831, 6, 76, 2899... 2899 9 \n",
80
+ "... ... ... ... \n",
81
+ "1189 [6, 2986, 2683, 495, 3580, 3171, 234, 164, 2, ... 2853 10 \n",
82
+ "1190 [2262, 2694, 1358, 883, 2174, 6, 2683, 1604, 4... 1478 10 \n",
83
+ "1191 [150, 1099, 3488, 3580, 2725, 3171, 535, 1622,... 206 10 \n",
84
+ "1192 [206, 6, 52, 3272, 204, 926, 498, 2712, 2496, ... 3202 10 \n",
85
+ "1193 [3251, 3300, 1822, 150, 2160, 2046, 6, 1038, 2... 1602 10 \n",
86
+ "\n",
87
+ " review \\\n",
88
+ "0 [Really nice game, Would recommend!, meh, This... \n",
89
+ "1 [finished it, its good but after i finished it... \n",
90
+ "2 [Paradox Interactive\\tdouble the price for alm... \n",
91
+ "3 [Nice, Combat system can be better, All the fu... \n",
92
+ "4 [9/10, 10/10, 9/10, 10/10, -99999/10, 9/10, id... \n",
93
+ "... ... \n",
94
+ "1189 [RDM simulator, 192 cubic tonnes of mud simula... \n",
95
+ "1190 [do I have a brain?, 18+, relationship of Park... \n",
96
+ "1191 [For DC, Любителям Аниме., 123, Ну такое., Hor... \n",
97
+ "1192 [Peed all over my dog then I started the game.... \n",
98
+ "1193 [Great for a few hours until you beat it., Fun... \n",
99
+ "\n",
100
+ " next_review \n",
101
+ "0 HUGE improvements on graphics and gameplay and... \n",
102
+ "1 ok. \n",
103
+ "2 Unstable \n",
104
+ "3 You've probably already heard that this game i... \n",
105
+ "4 10/10 \n",
106
+ "... ... \n",
107
+ "1189 best game ever made simulator \n",
108
+ "1190 new vision, new way \n",
109
+ "1191 Кратко: мужик живёт с мамой в маленьком фургоне \n",
110
+ "1192 Story mode kinda sucks but the rest of the gam... \n",
111
+ "1193 While being free-to-play, Robocraft seems very... \n",
112
+ "\n",
113
+ "[1194 rows x 5 columns]\n"
114
+ ]
115
+ }
116
+ ],
117
+ "source": [
118
+ "# 读取pickle文件\n",
119
+ "df = pd.read_pickle(\"./Test_data.df\")\n",
120
+ "\n",
121
+ "# 打印数据框\n",
122
+ "print(df)"
123
+ ]
124
+ },
125
+ {
126
+ "cell_type": "code",
127
+ "execution_count": 3,
128
+ "metadata": {},
129
+ "outputs": [],
130
+ "source": [
131
+ "interaction_pairs = []\n",
132
+ "\n",
133
+ "for idx, row in df.iterrows():\n",
134
+ " user_id = idx # 如果你有显式的用户ID,这里应该使用那个用户ID\n",
135
+ " items = row['seq'] + [row['next']]\n",
136
+ " for item in items:\n",
137
+ " interaction_pairs.append((user_id, item))\n",
138
+ "\n",
139
+ "# 转换成DataFrame\n",
140
+ "interaction_df = pd.DataFrame(interaction_pairs, columns=['user_id', 'item_id'])"
141
+ ]
142
+ },
143
+ {
144
+ "cell_type": "code",
145
+ "execution_count": 4,
146
+ "metadata": {},
147
+ "outputs": [
148
+ {
149
+ "name": "stdout",
150
+ "output_type": "stream",
151
+ "text": [
152
+ "Interaction Density: 0.0031\n"
153
+ ]
154
+ }
155
+ ],
156
+ "source": [
157
+ "# 创建用户-项矩阵\n",
158
+ "user_item_matrix = pd.crosstab(interaction_df['user_id'], interaction_df['item_id'])\n",
159
+ "\n",
160
+ "# 计算非零元素的比例来确定稠密度\n",
161
+ "non_zero_count = user_item_matrix.astype(bool).sum().sum() # 计算非零元素的总数\n",
162
+ "total_elements = user_item_matrix.size # 矩阵中元素的总数\n",
163
+ "\n",
164
+ "interaction_density = non_zero_count / total_elements\n",
165
+ "print(f\"Interaction Density: {interaction_density:.4f}\")"
166
+ ]
167
+ }
168
+ ],
169
+ "metadata": {
170
+ "kernelspec": {
171
+ "display_name": "MOE4REC",
172
+ "language": "python",
173
+ "name": "python3"
174
+ },
175
+ "language_info": {
176
+ "codemirror_mode": {
177
+ "name": "ipython",
178
+ "version": 3
179
+ },
180
+ "file_extension": ".py",
181
+ "mimetype": "text/x-python",
182
+ "name": "python",
183
+ "nbconvert_exporter": "python",
184
+ "pygments_lexer": "ipython3",
185
+ "version": "3.9.19"
186
+ }
187
+ },
188
+ "nbformat": 4,
189
+ "nbformat_minor": 2
190
+ }
data/ref/steam/train_data.df ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90f8e2ec827427d271cf0b03db45ed5ac3231840fbd022ac57cfae6061ed5b63
3
+ size 7322362
data/steam_data.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os.path as op
3
+ import numpy as np
4
+ import pickle as pkl
5
+ import torch.utils.data as data
6
+
7
+ import pandas as pd
8
+ import random
9
+
10
+ class SteamData(data.Dataset):
11
+ def __init__(self, data_dir=r'data/ref/steam',
12
+ stage=None,
13
+ cans_num=10,
14
+ sep=", ",
15
+ no_augment=True):
16
+ self.__dict__.update(locals())
17
+ self.aug = (stage=='train') and not no_augment
18
+ self.padding_item_id=3581
19
+ self.check_files()
20
+
21
+ # 返回session_data['seq']长度
22
+ def __len__(self):
23
+ return len(self.session_data['seq'])
24
+
25
+ # 获取索引i的样本(批次i)
26
+ def __getitem__(self, i):
27
+ temp = self.session_data.iloc[i]
28
+ candidates = self.negative_sampling(temp['seq_unpad'],temp['next'])
29
+ cans_name=[self.item_id2name[can] for can in candidates]
30
+ sample = {
31
+ 'seq': temp['seq'],
32
+ 'seq_name': temp['seq_title'],
33
+ 'len_seq': temp['len_seq'],
34
+ 'seq_str': self.sep.join(temp['seq_title']),
35
+ 'cans': candidates,
36
+ 'cans_name': cans_name,
37
+ 'cans_str': self.sep.join(cans_name),
38
+ 'len_cans': self.cans_num,
39
+ 'item_id': temp['next'],
40
+ 'item_name': temp['next_item_name'],
41
+ 'correct_answer': temp['next_item_name']
42
+ }
43
+ return sample
44
+
45
+ # 进行负采样, 返回序列ID列表
46
+ def negative_sampling(self,seq_unpad,next_item):
47
+ # canset: 所有游戏id中不在seq_unpad中的游戏id
48
+ canset=[i for i in list(self.item_id2name.keys()) if i not in seq_unpad and i!=next_item]
49
+ # 随机选择cans_num-1个游戏id, 加上next_item
50
+ candidates=random.sample(canset, self.cans_num-1)+[next_item]
51
+ random.shuffle(candidates)
52
+ return candidates
53
+
54
+ # 检查并加载数据文件
55
+ def check_files(self):
56
+ self.item_id2name=self.get_game_id2name()
57
+ if self.stage=='train':
58
+ filename="train_data.df"
59
+ elif self.stage=='val':
60
+ filename="Val_data.df"
61
+ elif self.stage=='test':
62
+ filename="Test_data.df"
63
+ data_path=op.join(self.data_dir, filename)
64
+ # 根据data_path和id2name字典加载数据
65
+ self.session_data = self.session_data4frame(data_path, self.item_id2name)
66
+
67
+ # 获取游戏id到游戏名的映射, 返回字典
68
+ def get_game_id2name(self):
69
+ game_id2name = dict()
70
+ item_path=op.join(self.data_dir, 'id2name.txt')
71
+ with open(item_path, 'r') as f:
72
+ for l in f.readlines():
73
+ ll = l.strip('\n').split('::')
74
+ game_id2name[int(ll[0])] = ll[1].strip()
75
+ return game_id2name
76
+
77
+ # 对数据进行预处理
78
+ def session_data4frame(self, datapath, game_id2name):
79
+ # 根据datapath读取pd数据
80
+ train_data = pd.read_pickle(datapath)
81
+ train_data = train_data[train_data['len_seq'] >= 3]
82
+ # 从序列中移除填充项
83
+ def remove_padding(xx):
84
+ x = xx[:]
85
+ for i in range(10):
86
+ try:
87
+ x.remove(self.padding_item_id)
88
+ except:
89
+ break
90
+ return x
91
+ # 去除pad的train_data序列 -> train_data['seq_unpad']
92
+ train_data['seq_unpad'] = train_data['seq'].apply(remove_padding)
93
+ # 序列号 -> 游戏名
94
+ def seq_to_title(x):
95
+ return [game_id2name[x_i] for x_i in x]
96
+ # 转换train_data ID序列为游戏名序列 -> train_data['seq_title']
97
+ train_data['seq_title'] = train_data['seq_unpad'].apply(seq_to_title)
98
+ # 单个序列 -> 游戏名
99
+ def next_item_title(x):
100
+ return game_id2name[x]
101
+ # 转换train_data['next'] ID序列为游戏名序列 -> train_data['next_item_name']
102
+ train_data['next_item_name'] = train_data['next'].apply(next_item_title)
103
+ return train_data
debug/modeling_llama.py ADDED
@@ -0,0 +1,886 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch LLaMA model."""
21
+ import math
22
+ from typing import List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from ...activations import ACT2FN
30
+ from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
31
+ from ...modeling_utils import PreTrainedModel
32
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
33
+ from .configuration_llama import LlamaConfig
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ _CONFIG_FOR_DOC = "LlamaConfig"
39
+
40
+
41
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
42
+ def _make_causal_mask(
43
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
44
+ ):
45
+ """
46
+ Make causal mask used for bi-directional self-attention.
47
+ """
48
+ bsz, tgt_len = input_ids_shape
49
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
50
+ mask_cond = torch.arange(mask.size(-1), device=device)
51
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
52
+ mask = mask.to(dtype)
53
+
54
+ if past_key_values_length > 0:
55
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
56
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
57
+
58
+
59
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
60
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
61
+ """
62
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
63
+ """
64
+ bsz, src_len = mask.size()
65
+ tgt_len = tgt_len if tgt_len is not None else src_len
66
+
67
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
68
+
69
+ inverted_mask = 1.0 - expanded_mask
70
+
71
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
72
+
73
+
74
+ class LlamaRMSNorm(nn.Module):
75
+ def __init__(self, hidden_size, eps=1e-6):
76
+ """
77
+ LlamaRMSNorm is equivalent to T5LayerNorm
78
+ """
79
+ super().__init__()
80
+ self.weight = nn.Parameter(torch.ones(hidden_size))
81
+ self.variance_epsilon = eps
82
+
83
+ def forward(self, hidden_states):
84
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
85
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
86
+
87
+ # convert into half-precision if necessary
88
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
89
+ hidden_states = hidden_states.to(self.weight.dtype)
90
+
91
+ return self.weight * hidden_states
92
+
93
+
94
+ class LlamaRotaryEmbedding(torch.nn.Module):
95
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
96
+ super().__init__()
97
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
98
+ self.register_buffer("inv_freq", inv_freq)
99
+
100
+ # Build here to make `torch.jit.trace` work.
101
+ self.max_seq_len_cached = max_position_embeddings
102
+ t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
103
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
104
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
105
+ emb = torch.cat((freqs, freqs), dim=-1)
106
+ self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
107
+ self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
108
+
109
+ def forward(self, x, seq_len=None):
110
+ # x: [bs, num_attention_heads, seq_len, head_size]
111
+ # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
112
+ if seq_len > self.max_seq_len_cached:
113
+ self.max_seq_len_cached = seq_len
114
+ t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
115
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
116
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
117
+ emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
118
+ self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
119
+ self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
120
+ return (
121
+ self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
122
+ self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
123
+ )
124
+
125
+
126
+ def rotate_half(x):
127
+ """Rotates half the hidden dims of the input."""
128
+ x1 = x[..., : x.shape[-1] // 2]
129
+ x2 = x[..., x.shape[-1] // 2 :]
130
+ return torch.cat((-x2, x1), dim=-1)
131
+
132
+
133
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
134
+ gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
135
+ gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
136
+ cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
137
+ sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
138
+ q_embed = (q * cos) + (rotate_half(q) * sin)
139
+ k_embed = (k * cos) + (rotate_half(k) * sin)
140
+ return q_embed, k_embed
141
+
142
+
143
+ class LlamaMLP(nn.Module):
144
+ def __init__(
145
+ self,
146
+ hidden_size: int,
147
+ intermediate_size: int,
148
+ hidden_act: str,
149
+ ):
150
+ super().__init__()
151
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
152
+ self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
153
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
154
+ self.act_fn = ACT2FN[hidden_act]
155
+
156
+ def forward(self, x):
157
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
158
+
159
+
160
+ class LlamaAttention(nn.Module):
161
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
162
+
163
+ def __init__(self, config: LlamaConfig):
164
+ super().__init__()
165
+ self.config = config
166
+ self.hidden_size = config.hidden_size
167
+ self.num_heads = config.num_attention_heads
168
+ self.head_dim = self.hidden_size // self.num_heads
169
+ self.max_position_embeddings = config.max_position_embeddings
170
+
171
+ if (self.head_dim * self.num_heads) != self.hidden_size:
172
+ raise ValueError(
173
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
174
+ f" and `num_heads`: {self.num_heads})."
175
+ )
176
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
177
+ self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
178
+ self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
179
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
180
+ self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
181
+
182
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
183
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
184
+
185
+ def forward(
186
+ self,
187
+ hidden_states: torch.Tensor,
188
+ attention_mask: Optional[torch.Tensor] = None,
189
+ position_ids: Optional[torch.LongTensor] = None,
190
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
191
+ output_attentions: bool = False,
192
+ use_cache: bool = False,
193
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
194
+ bsz, q_len, _ = hidden_states.size()
195
+
196
+ query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
197
+ key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
198
+ value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
199
+
200
+ kv_seq_len = key_states.shape[-2]
201
+ if past_key_value is not None:
202
+ kv_seq_len += past_key_value[0].shape[-2]
203
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
204
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
205
+ # [bsz, nh, t, hd]
206
+
207
+ if past_key_value is not None:
208
+ # reuse k, v, self_attention
209
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
210
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
211
+
212
+ past_key_value = (key_states, value_states) if use_cache else None
213
+
214
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
215
+
216
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
217
+ raise ValueError(
218
+ f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
219
+ f" {attn_weights.size()}"
220
+ )
221
+
222
+ if attention_mask is not None:
223
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
224
+ raise ValueError(
225
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
226
+ )
227
+ attn_weights = attn_weights + attention_mask
228
+ attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
229
+
230
+ # upcast attention to fp32
231
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
232
+ attn_output = torch.matmul(attn_weights, value_states)
233
+
234
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
235
+ raise ValueError(
236
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
237
+ f" {attn_output.size()}"
238
+ )
239
+
240
+ attn_output = attn_output.transpose(1, 2)
241
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
242
+
243
+ attn_output = self.o_proj(attn_output)
244
+
245
+ if not output_attentions:
246
+ attn_weights = None
247
+
248
+ return attn_output, attn_weights, past_key_value
249
+
250
+
251
+ class LlamaDecoderLayer(nn.Module):
252
+ def __init__(self, config: LlamaConfig):
253
+ super().__init__()
254
+ self.hidden_size = config.hidden_size
255
+ self.self_attn = LlamaAttention(config=config)
256
+ self.mlp = LlamaMLP(
257
+ hidden_size=self.hidden_size,
258
+ intermediate_size=config.intermediate_size,
259
+ hidden_act=config.hidden_act,
260
+ )
261
+ self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
262
+ self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
263
+
264
+ def forward(
265
+ self,
266
+ hidden_states: torch.Tensor,
267
+ attention_mask: Optional[torch.Tensor] = None,
268
+ position_ids: Optional[torch.LongTensor] = None,
269
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
270
+ output_attentions: Optional[bool] = False,
271
+ use_cache: Optional[bool] = False,
272
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
273
+ """
274
+ Args:
275
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
276
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
277
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
278
+ output_attentions (`bool`, *optional*):
279
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
280
+ returned tensors for more detail.
281
+ use_cache (`bool`, *optional*):
282
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
283
+ (see `past_key_values`).
284
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
285
+ """
286
+
287
+ residual = hidden_states
288
+
289
+ hidden_states = self.input_layernorm(hidden_states)
290
+
291
+ # Self Attention
292
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
293
+ hidden_states=hidden_states,
294
+ attention_mask=attention_mask,
295
+ position_ids=position_ids,
296
+ past_key_value=past_key_value,
297
+ output_attentions=output_attentions,
298
+ use_cache=use_cache,
299
+ )
300
+ hidden_states = residual + hidden_states
301
+
302
+ # Fully Connected
303
+ residual = hidden_states
304
+ hidden_states = self.post_attention_layernorm(hidden_states)
305
+ hidden_states = self.mlp(hidden_states)
306
+ hidden_states = residual + hidden_states
307
+
308
+ outputs = (hidden_states,)
309
+
310
+ if output_attentions:
311
+ outputs += (self_attn_weights,)
312
+
313
+ if use_cache:
314
+ outputs += (present_key_value,)
315
+
316
+ return outputs
317
+
318
+
319
+ LLAMA_START_DOCSTRING = r"""
320
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
321
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
322
+ etc.)
323
+
324
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
325
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
326
+ and behavior.
327
+
328
+ Parameters:
329
+ config ([`LlamaConfig`]):
330
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
331
+ load the weights associated with the model, only the configuration. Check out the
332
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
333
+ """
334
+
335
+
336
+ @add_start_docstrings(
337
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
338
+ LLAMA_START_DOCSTRING,
339
+ )
340
+ class LlamaPreTrainedModel(PreTrainedModel):
341
+ config_class = LlamaConfig
342
+ base_model_prefix = "model"
343
+ supports_gradient_checkpointing = True
344
+ _no_split_modules = ["LlamaDecoderLayer"]
345
+ _keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
346
+
347
+ def _init_weights(self, module):
348
+ std = self.config.initializer_range
349
+ if isinstance(module, nn.Linear):
350
+ module.weight.data.normal_(mean=0.0, std=std)
351
+ if module.bias is not None:
352
+ module.bias.data.zero_()
353
+ elif isinstance(module, nn.Embedding):
354
+ module.weight.data.normal_(mean=0.0, std=std)
355
+ if module.padding_idx is not None:
356
+ module.weight.data[module.padding_idx].zero_()
357
+
358
+ def _set_gradient_checkpointing(self, module, value=False):
359
+ if isinstance(module, LlamaModel):
360
+ module.gradient_checkpointing = value
361
+
362
+
363
+ LLAMA_INPUTS_DOCSTRING = r"""
364
+ Args:
365
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
366
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
367
+ it.
368
+
369
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
370
+ [`PreTrainedTokenizer.__call__`] for details.
371
+
372
+ [What are input IDs?](../glossary#input-ids)
373
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
374
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
375
+
376
+ - 1 for tokens that are **not masked**,
377
+ - 0 for tokens that are **masked**.
378
+
379
+ [What are attention masks?](../glossary#attention-mask)
380
+
381
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
382
+ [`PreTrainedTokenizer.__call__`] for details.
383
+
384
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
385
+ `past_key_values`).
386
+
387
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
388
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
389
+ information on the default strategy.
390
+
391
+ - 1 indicates the head is **not masked**,
392
+ - 0 indicates the head is **masked**.
393
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
394
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
395
+ config.n_positions - 1]`.
396
+
397
+ [What are position IDs?](../glossary#position-ids)
398
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
399
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
400
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
401
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
402
+
403
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
404
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
405
+
406
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
407
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
408
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
409
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
410
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
411
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
412
+ model's internal embedding lookup matrix.
413
+ use_cache (`bool`, *optional*):
414
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
415
+ `past_key_values`).
416
+ output_attentions (`bool`, *optional*):
417
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
418
+ tensors for more detail.
419
+ output_hidden_states (`bool`, *optional*):
420
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
421
+ more detail.
422
+ return_dict (`bool`, *optional*):
423
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
424
+ """
425
+
426
+
427
+ @add_start_docstrings(
428
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
429
+ LLAMA_START_DOCSTRING,
430
+ )
431
+ class LlamaModel(LlamaPreTrainedModel):
432
+ """
433
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
434
+
435
+ Args:
436
+ config: LlamaConfig
437
+ """
438
+
439
+ def __init__(self, config: LlamaConfig):
440
+ super().__init__(config)
441
+ self.padding_idx = config.pad_token_id
442
+ self.vocab_size = config.vocab_size
443
+
444
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
445
+ self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)])
446
+ self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
447
+
448
+ self.gradient_checkpointing = False
449
+ # Initialize weights and apply final processing
450
+ self.post_init()
451
+
452
+ def get_input_embeddings(self):
453
+ return self.embed_tokens
454
+
455
+ def set_input_embeddings(self, value):
456
+ self.embed_tokens = value
457
+
458
+ # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
459
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
460
+ # create causal mask
461
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
462
+ combined_attention_mask = None
463
+ if input_shape[-1] > 1:
464
+ combined_attention_mask = _make_causal_mask(
465
+ input_shape,
466
+ inputs_embeds.dtype,
467
+ device=inputs_embeds.device,
468
+ past_key_values_length=past_key_values_length,
469
+ )
470
+
471
+ if attention_mask is not None:
472
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
473
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
474
+ inputs_embeds.device
475
+ )
476
+ combined_attention_mask = (
477
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
478
+ )
479
+
480
+ return combined_attention_mask
481
+
482
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
483
+ def forward(
484
+ self,
485
+ input_ids: torch.LongTensor = None,
486
+ attention_mask: Optional[torch.Tensor] = None,
487
+ position_ids: Optional[torch.LongTensor] = None,
488
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
489
+ inputs_embeds: Optional[torch.FloatTensor] = None,
490
+ use_cache: Optional[bool] = None,
491
+ output_attentions: Optional[bool] = None,
492
+ output_hidden_states: Optional[bool] = None,
493
+ return_dict: Optional[bool] = None,
494
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
495
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
496
+ output_hidden_states = (
497
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
498
+ )
499
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
500
+
501
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
502
+
503
+ # retrieve input_ids and inputs_embeds
504
+ if input_ids is not None and inputs_embeds is not None:
505
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
506
+ elif input_ids is not None:
507
+ batch_size, seq_length = input_ids.shape
508
+ elif inputs_embeds is not None:
509
+ batch_size, seq_length, _ = inputs_embeds.shape
510
+ else:
511
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
512
+
513
+ seq_length_with_past = seq_length
514
+ past_key_values_length = 0
515
+
516
+ if past_key_values is not None:
517
+ past_key_values_length = past_key_values[0][0].shape[2]
518
+ seq_length_with_past = seq_length_with_past + past_key_values_length
519
+
520
+ if position_ids is None:
521
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
522
+ position_ids = torch.arange(
523
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
524
+ )
525
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
526
+ else:
527
+ position_ids = position_ids.view(-1, seq_length).long()
528
+
529
+ if inputs_embeds is None:
530
+ inputs_embeds = self.embed_tokens(input_ids)
531
+ # embed positions
532
+ if attention_mask is None:
533
+ attention_mask = torch.ones(
534
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
535
+ )
536
+ attention_mask = self._prepare_decoder_attention_mask(
537
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
538
+ )
539
+
540
+ hidden_states = inputs_embeds
541
+
542
+ if self.gradient_checkpointing and self.training:
543
+ if use_cache:
544
+ logger.warning_once(
545
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
546
+ )
547
+ use_cache = False
548
+
549
+ # decoder layers
550
+ all_hidden_states = () if output_hidden_states else None
551
+ all_self_attns = () if output_attentions else None
552
+ next_decoder_cache = () if use_cache else None
553
+
554
+ for idx, decoder_layer in enumerate(self.layers):
555
+ if output_hidden_states:
556
+ all_hidden_states += (hidden_states,)
557
+
558
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
559
+
560
+ if self.gradient_checkpointing and self.training:
561
+
562
+ def create_custom_forward(module):
563
+ def custom_forward(*inputs):
564
+ # None for past_key_value
565
+ return module(*inputs, output_attentions, None)
566
+
567
+ return custom_forward
568
+
569
+ layer_outputs = torch.utils.checkpoint.checkpoint(
570
+ create_custom_forward(decoder_layer),
571
+ hidden_states,
572
+ attention_mask,
573
+ position_ids,
574
+ None,
575
+ )
576
+ else:
577
+ layer_outputs = decoder_layer(
578
+ hidden_states,
579
+ attention_mask=attention_mask,
580
+ position_ids=position_ids,
581
+ past_key_value=past_key_value,
582
+ output_attentions=output_attentions,
583
+ use_cache=use_cache,
584
+ )
585
+
586
+ hidden_states = layer_outputs[0]
587
+
588
+ if use_cache:
589
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
590
+
591
+ if output_attentions:
592
+ all_self_attns += (layer_outputs[1],)
593
+
594
+ hidden_states = self.norm(hidden_states)
595
+
596
+ # add hidden states from the last decoder layer
597
+ if output_hidden_states:
598
+ all_hidden_states += (hidden_states,)
599
+
600
+ next_cache = next_decoder_cache if use_cache else None
601
+ if not return_dict:
602
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
603
+ return BaseModelOutputWithPast(
604
+ last_hidden_state=hidden_states,
605
+ past_key_values=next_cache,
606
+ hidden_states=all_hidden_states,
607
+ attentions=all_self_attns,
608
+ )
609
+
610
+
611
+ class LlamaForCausalLM(LlamaPreTrainedModel):
612
+ def __init__(self, config):
613
+ super().__init__(config)
614
+ self.model = LlamaModel(config)
615
+
616
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
617
+
618
+ # Initialize weights and apply final processing
619
+ self.post_init()
620
+
621
+ def get_input_embeddings(self):
622
+ return self.model.embed_tokens
623
+
624
+ def set_input_embeddings(self, value):
625
+ self.model.embed_tokens = value
626
+
627
+ def get_output_embeddings(self):
628
+ return self.lm_head
629
+
630
+ def set_output_embeddings(self, new_embeddings):
631
+ self.lm_head = new_embeddings
632
+
633
+ def set_decoder(self, decoder):
634
+ self.model = decoder
635
+
636
+ def get_decoder(self):
637
+ return self.model
638
+
639
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
640
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
641
+ def forward(
642
+ self,
643
+ input_ids: torch.LongTensor = None,
644
+ attention_mask: Optional[torch.Tensor] = None,
645
+ position_ids: Optional[torch.LongTensor] = None,
646
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
647
+ inputs_embeds: Optional[torch.FloatTensor] = None,
648
+ labels: Optional[torch.LongTensor] = None,
649
+ use_cache: Optional[bool] = None,
650
+ output_attentions: Optional[bool] = None,
651
+ output_hidden_states: Optional[bool] = None,
652
+ return_dict: Optional[bool] = None,
653
+ # !!!
654
+ **kwargs
655
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
656
+ r"""
657
+ Args:
658
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
659
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
660
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
661
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
662
+
663
+ Returns:
664
+
665
+ Example:
666
+
667
+ ```python
668
+ >>> from transformers import AutoTokenizer, LlamaForCausalLM
669
+
670
+ >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
671
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
672
+
673
+ >>> prompt = "Hey, are you consciours? Can you talk to me?"
674
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
675
+
676
+ >>> # Generate
677
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
678
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
679
+ "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
680
+ ```"""
681
+
682
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
683
+ output_hidden_states = (
684
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
685
+ )
686
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
687
+
688
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
689
+ outputs = self.model(
690
+ input_ids=input_ids,
691
+ attention_mask=attention_mask,
692
+ position_ids=position_ids,
693
+ past_key_values=past_key_values,
694
+ inputs_embeds=inputs_embeds,
695
+ use_cache=use_cache,
696
+ output_attentions=output_attentions,
697
+ output_hidden_states=output_hidden_states,
698
+ return_dict=return_dict,
699
+ )
700
+
701
+ hidden_states = outputs[0]
702
+ logits = self.lm_head(hidden_states)
703
+
704
+ loss = None
705
+ if labels is not None:
706
+ # Shift so that tokens < n predict n
707
+ shift_logits = logits[..., :-1, :].contiguous()
708
+ shift_labels = labels[..., 1:].contiguous()
709
+ # Flatten the tokens
710
+ loss_fct = CrossEntropyLoss()
711
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
712
+ shift_labels = shift_labels.view(-1)
713
+ # Enable model parallelism
714
+ shift_labels = shift_labels.to(shift_logits.device)
715
+ loss = loss_fct(shift_logits, shift_labels)
716
+
717
+ if not return_dict:
718
+ output = (logits,) + outputs[1:]
719
+ return (loss,) + output if loss is not None else output
720
+
721
+ return CausalLMOutputWithPast(
722
+ loss=loss,
723
+ logits=logits,
724
+ past_key_values=outputs.past_key_values,
725
+ hidden_states=outputs.hidden_states,
726
+ attentions=outputs.attentions,
727
+ )
728
+
729
+ def prepare_inputs_for_generation(
730
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
731
+ ):
732
+ if past_key_values:
733
+ input_ids = input_ids[:, -1:]
734
+
735
+ position_ids = kwargs.get("position_ids", None)
736
+ if attention_mask is not None and position_ids is None:
737
+ # create position_ids on the fly for batch generation
738
+ position_ids = attention_mask.long().cumsum(-1) - 1
739
+ position_ids.masked_fill_(attention_mask == 0, 1)
740
+ if past_key_values:
741
+ position_ids = position_ids[:, -1].unsqueeze(-1)
742
+
743
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
744
+ if inputs_embeds is not None and past_key_values is None:
745
+ model_inputs = {"inputs_embeds": inputs_embeds}
746
+ else:
747
+ model_inputs = {"input_ids": input_ids}
748
+
749
+ model_inputs.update(
750
+ {
751
+ "position_ids": position_ids,
752
+ "past_key_values": past_key_values,
753
+ "use_cache": kwargs.get("use_cache"),
754
+ "attention_mask": attention_mask,
755
+ }
756
+ )
757
+ return model_inputs
758
+
759
+ @staticmethod
760
+ def _reorder_cache(past_key_values, beam_idx):
761
+ reordered_past = ()
762
+ for layer_past in past_key_values:
763
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
764
+ return reordered_past
765
+
766
+
767
+ @add_start_docstrings(
768
+ """
769
+ The LLaMa Model transformer with a sequence classification head on top (linear layer).
770
+
771
+ [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
772
+ (e.g. GPT-2) do.
773
+
774
+ Since it does classification on the last token, it requires to know the position of the last token. If a
775
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
776
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
777
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
778
+ each row of the batch).
779
+ """,
780
+ LLAMA_START_DOCSTRING,
781
+ )
782
+ class LlamaForSequenceClassification(LlamaPreTrainedModel):
783
+ _keys_to_ignore_on_load_missing = [r"lm_head.weight"]
784
+
785
+ def __init__(self, config):
786
+ super().__init__(config)
787
+ self.num_labels = config.num_labels
788
+ self.model = LlamaModel(config)
789
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
790
+
791
+ # Initialize weights and apply final processing
792
+ self.post_init()
793
+
794
+ def get_input_embeddings(self):
795
+ return self.model.embed_tokens
796
+
797
+ def set_input_embeddings(self, value):
798
+ self.model.embed_tokens = value
799
+
800
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
801
+ def forward(
802
+ self,
803
+ input_ids: torch.LongTensor = None,
804
+ attention_mask: Optional[torch.Tensor] = None,
805
+ position_ids: Optional[torch.LongTensor] = None,
806
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
807
+ inputs_embeds: Optional[torch.FloatTensor] = None,
808
+ labels: Optional[torch.LongTensor] = None,
809
+ use_cache: Optional[bool] = None,
810
+ output_attentions: Optional[bool] = None,
811
+ output_hidden_states: Optional[bool] = None,
812
+ return_dict: Optional[bool] = None,
813
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
814
+ r"""
815
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
816
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
817
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
818
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
819
+ """
820
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
821
+
822
+ transformer_outputs = self.model(
823
+ input_ids,
824
+ attention_mask=attention_mask,
825
+ position_ids=position_ids,
826
+ past_key_values=past_key_values,
827
+ inputs_embeds=inputs_embeds,
828
+ use_cache=use_cache,
829
+ output_attentions=output_attentions,
830
+ output_hidden_states=output_hidden_states,
831
+ return_dict=return_dict,
832
+ )
833
+ hidden_states = transformer_outputs[0]
834
+ logits = self.score(hidden_states)
835
+
836
+ if input_ids is not None:
837
+ batch_size = input_ids.shape[0]
838
+ else:
839
+ batch_size = inputs_embeds.shape[0]
840
+
841
+ if self.config.pad_token_id is None and batch_size != 1:
842
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
843
+ if self.config.pad_token_id is None:
844
+ sequence_lengths = -1
845
+ else:
846
+ if input_ids is not None:
847
+ sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
848
+ else:
849
+ sequence_lengths = -1
850
+
851
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
852
+
853
+ loss = None
854
+ if labels is not None:
855
+ labels = labels.to(logits.device)
856
+ if self.config.problem_type is None:
857
+ if self.num_labels == 1:
858
+ self.config.problem_type = "regression"
859
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
860
+ self.config.problem_type = "single_label_classification"
861
+ else:
862
+ self.config.problem_type = "multi_label_classification"
863
+
864
+ if self.config.problem_type == "regression":
865
+ loss_fct = MSELoss()
866
+ if self.num_labels == 1:
867
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
868
+ else:
869
+ loss = loss_fct(pooled_logits, labels)
870
+ elif self.config.problem_type == "single_label_classification":
871
+ loss_fct = CrossEntropyLoss()
872
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
873
+ elif self.config.problem_type == "multi_label_classification":
874
+ loss_fct = BCEWithLogitsLoss()
875
+ loss = loss_fct(pooled_logits, labels)
876
+ if not return_dict:
877
+ output = (pooled_logits,) + transformer_outputs[1:]
878
+ return ((loss,) + output) if loss is not None else output
879
+
880
+ return SequenceClassifierOutputWithPast(
881
+ loss=loss,
882
+ logits=pooled_logits,
883
+ past_key_values=transformer_outputs.past_key_values,
884
+ hidden_states=transformer_outputs.hidden_states,
885
+ attentions=transformer_outputs.attentions,
886
+ )
debug/utils.py ADDED
The diff for this file is too large to render. See raw diff
 
main.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pytorch_lightning as pl
3
+ from argparse import ArgumentParser
4
+ from pytorch_lightning import Trainer
5
+ import pytorch_lightning.callbacks as plc
6
+ from pytorch_lightning.loggers import TensorBoardLogger, CSVLogger
7
+
8
+ from model.model_interface import MInterface
9
+ from data.data_interface import DInterface
10
+ from recommender.A_SASRec_final_bce_llm import SASRec, Caser, GRU
11
+ from SASRecModules_ori import *
12
+ from transformers import LlamaForCausalLM, LlamaTokenizer
13
+
14
+ def load_callbacks(args):
15
+ callbacks = []
16
+ callbacks.append(plc.EarlyStopping(
17
+ monitor='metric',
18
+ mode='max',
19
+ patience=10,
20
+ min_delta=0.001
21
+ ))
22
+
23
+ callbacks.append(plc.ModelCheckpoint(
24
+ monitor='metric',
25
+ dirpath=args.ckpt_dir,
26
+ filename='{epoch:02d}-{metric:.3f}',
27
+ save_top_k=-1,
28
+ mode='max',
29
+ save_last=True,
30
+ #train_time_interval=args.val_check_interval
31
+ every_n_epochs=1
32
+ ))
33
+
34
+ if args.lr_scheduler:
35
+ callbacks.append(plc.LearningRateMonitor(
36
+ logging_interval='step'))
37
+ return callbacks
38
+
39
+ def main(args):
40
+ pl.seed_everything(args.seed)
41
+ model = MInterface(**vars(args))
42
+ if args.ckpt_path:
43
+ ckpt = torch.load(args.ckpt_path, map_location='cpu')
44
+
45
+ model.load_state_dict(ckpt['state_dict'], strict=False)
46
+ print("load checkpoints from {}".format(args.ckpt_path))
47
+
48
+ data_module = DInterface(llm_tokenizer=model.llama_tokenizer,**vars(args))
49
+
50
+ args.max_steps=len(data_module.trainset) * args.max_epochs // (args.accumulate_grad_batches * args.batch_size)
51
+ logger = TensorBoardLogger(save_dir='./log/', name=args.log_dir)
52
+ args.callbacks = load_callbacks(args)
53
+ args.logger = logger
54
+ if not os.path.exists(args.ckpt_dir):
55
+ os.makedirs(args.ckpt_dir)
56
+
57
+ trainer = Trainer.from_argparse_args(args)
58
+
59
+ if args.auto_lr_find:
60
+ lr_finder=trainer.tuner.lr_find(model=model, datamodule=data_module, min_lr=1e-10, max_lr=1e-3, num_training=100)
61
+ fig=lr_finder.plot(suggest=True)
62
+ fig_path="lr_finder.png"
63
+ fig.savefig(fig_path)
64
+ print("Saving to {}".format(fig_path))
65
+ model.hparams.lr=lr_finder.suggestion()
66
+
67
+ if args.mode == 'train':
68
+ trainer.fit(model=model, datamodule=data_module)
69
+ else:
70
+ trainer.test(model=model, datamodule=data_module)
71
+
72
+
73
+ if __name__ == '__main__':
74
+ torch.multiprocessing.set_start_method('spawn')
75
+ parser = ArgumentParser()
76
+
77
+ parser.add_argument('--accelerator', default='gpu', type=str)
78
+ parser.add_argument('--devices', default=-1, type=list)
79
+ parser.add_argument('--precision', default=16, type=int)
80
+ parser.add_argument('--amp_backend', default="native", type=str)
81
+
82
+ parser.add_argument('--batch_size', default=4, type=int)
83
+ parser.add_argument('--num_workers', default=8, type=int)
84
+ parser.add_argument('--seed', default=1234, type=int)
85
+ parser.add_argument('--lr', default=1e-4, type=float)
86
+ parser.add_argument('--accumulate_grad_batches', default=32, type=int)
87
+ parser.add_argument('--check_val_every_n_epoch', default=1, type=int)
88
+
89
+ parser.add_argument('--lr_scheduler', default='cosine', choices=['cosine'], type=str)
90
+ parser.add_argument('--lr_decay_min_lr', default=1e-6, type=float)
91
+ parser.add_argument('--lr_warmup_start_lr', default=1e-6, type=float)
92
+
93
+ parser.add_argument('--load_best', action='store_true')
94
+ parser.add_argument('--load_dir', default=None, type=str)
95
+ parser.add_argument('--load_ver', default=None, type=str)
96
+ parser.add_argument('--load_v_num', default=None, type=int)
97
+
98
+ parser.add_argument('--dataset', default='steam_data', type=str)
99
+ parser.add_argument('--data_dir', default='LLaRA_MOE/data/ref/steam', type=str)
100
+ parser.add_argument('--model_name', default='mlp_projector', type=str)
101
+ parser.add_argument('--loss', default='lm', type=str)
102
+ parser.add_argument('--weight_decay', default=1e-5, type=float)
103
+ parser.add_argument('--no_augment', action='store_true')
104
+ parser.add_argument('--ckpt_dir', default='LLaRA_MOE/checkpoints/steam/', type=str)
105
+ parser.add_argument('--log_dir', default='steam_logs', type=str)
106
+
107
+ parser.add_argument('--rec_size', default=64, type=int)
108
+ parser.add_argument('--padding_item_id', default=3581, type=int)
109
+ parser.add_argument('--llm_path', default='meta-llama/Llama-2-7b-hf', type=str)
110
+ parser.add_argument('--rec_model_path', default='LLaRA_MOE/rec_model/SASRec_steam.pt', type=str)
111
+ parser.add_argument('--prompt_path', default='LLaRA_MOE/prompt/game.txt', type=str)
112
+ parser.add_argument('--output_dir', default='LLaRA_MOE/output/steam_moe/', type=str)
113
+ parser.add_argument('--ckpt_path', type=str)
114
+ parser.add_argument('--rec_embed', default="SASRec", choices=['SASRec', 'Caser','GRU'], type=str)
115
+
116
+ parser.add_argument('--aug_prob', default=0.5, type=float)
117
+ parser.add_argument('--mode', default='test', choices=['train', 'test'], type=str)
118
+ parser.add_argument('--auto_lr_find', default=False, action='store_true')
119
+ parser.add_argument('--metric', default='hr', choices=['hr'], type=str)
120
+ parser.add_argument('--max_epochs', default=5, type=int)
121
+ parser.add_argument('--save', default='part', choices=['part', 'all'], type=str)
122
+ parser.add_argument('--cans_num', default=20, type=int)
123
+
124
+ # Finetuning
125
+ parser.add_argument('--llm_tuning', default='moelora', choices=['lora', 'freeze','freeze_lora', 'moelora'], type=str)
126
+ parser.add_argument('--peft_dir', default=None, type=str)
127
+ parser.add_argument('--peft_config', default=None, type=str)
128
+ parser.add_argument('--lora_r', default=8, type=float)
129
+ parser.add_argument('--lora_alpha', default=32, type=float)
130
+ parser.add_argument('--lora_dropout', default=0.1, type=float)
131
+ parser.add_argument('--num_moe', default=4, type=int)
132
+ parser.add_argument('--gating', default='Dense', type=str)
133
+
134
+ parser.add_argument('--local_rank', default=3, type=int)
135
+
136
+ parser.add_argument('--if_rand', default=False, type=bool)
137
+
138
+ parser.add_argument('--router', default='unshare', choices=['share', 'unshare'], type=str)
139
+
140
+ args = parser.parse_args()
141
+
142
+ if 'movielens' in args.data_dir:
143
+ args.padding_item_id = 1682
144
+ elif 'steam' in args.data_dir:
145
+ args.padding_item_id = 3581
146
+ elif 'lastfm' in args.data_dir:
147
+ args.padding_item_id = 4606
148
+
149
+ main(args)
model/__init__.py ADDED
File without changes
model/mlp_projector.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch import nn
2
+
3
+ class MlpProjector(nn.Module):
4
+ def __init__(self, rec_size=64, llm_size=4096):
5
+ super().__init__()
6
+ self.mlp_proj = nn.Sequential(
7
+ nn.Linear(rec_size, llm_size),
8
+ nn.GELU(),
9
+ nn.Linear(llm_size, llm_size)
10
+ )
11
+
12
+ def forward(self, x):
13
+ x = self.mlp_proj(x)
14
+ return x
model/model_interface.py ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import torch
3
+ import importlib
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+ import torch.optim.lr_scheduler as lrs
7
+
8
+ import pytorch_lightning as pl
9
+
10
+ from transformers import LlamaForCausalLM, LlamaTokenizer
11
+ import random
12
+ from pandas.core.frame import DataFrame
13
+ import os.path as op
14
+ import os
15
+ from optims import LinearWarmupCosineLRScheduler
16
+ import numpy as np
17
+ from .peft import get_peft_config, get_peft_model, get_peft_model_state_dict, LoraConfig, TaskType, PeftModel, MoeLoraConfig, MoeLoraModel
18
+ import pickle
19
+ from .router.nlpr import LambdaLayer, ResidualBlock, GateFunction, NLPRecommendationRouter, build_router
20
+
21
+
22
+ # from peft import get_peft_config, get_peft_model, get_peft_model_state_dict, LoraConfig, TaskType, PeftModel
23
+ class MInterface(pl.LightningModule):
24
+ def __init__(self,
25
+ **kargs):
26
+ super().__init__()
27
+ self.save_hyperparameters()
28
+ self.load_llm(self.hparams.llm_path)
29
+
30
+ if self.hparams.router == 'share':
31
+ self.router = build_router()
32
+
33
+ self.load_rec_model(self.hparams.rec_model_path)
34
+ self.load_projector()
35
+ self.gradient_storage = {}
36
+
37
+ def forward(self, batch):
38
+ targets = batch["tokens"].input_ids.masked_fill(
39
+ batch["tokens"].input_ids == self.llama_tokenizer.pad_token_id, -100
40
+ ) # [batch_size, max_len]
41
+
42
+ targets = targets.masked_fill((batch["tokens"].token_type_ids == 0)[:,1:], -100)
43
+ # targets = targets.masked_fill((batch["tokens"].token_type_ids == 0)[:,:], -100)
44
+
45
+ input_embeds, user_embeds = self.wrap_emb(batch)
46
+
47
+ if self.hparams.router == 'share':
48
+ gate_weights = self.router(user_embeds)
49
+ outputs = self.llama_model(
50
+ inputs_embeds=input_embeds,
51
+ attention_mask=batch["tokens"].attention_mask,
52
+ return_dict=True,
53
+ labels=targets,
54
+ use_cache=False,
55
+ user_embeds=user_embeds,
56
+ gate_weights=gate_weights
57
+ )
58
+ return outputs
59
+
60
+ outputs = self.llama_model(
61
+ inputs_embeds=input_embeds,
62
+ attention_mask=batch["tokens"].attention_mask,
63
+ return_dict=True,
64
+ labels=targets,
65
+ use_cache=False,
66
+ user_embeds=user_embeds
67
+ )
68
+ return outputs
69
+
70
+ def generate(self, batch,temperature=0.8,do_sample=False,num_beams=1,max_gen_length=64,min_gen_length=1,repetition_penalty=1.0,length_penalty=1.0, num_return_sequences=1):
71
+ input_embeds, user_embeds = self.wrap_emb(batch)
72
+ if self.hparams.router == 'share':
73
+ gate_weights = self.router(user_embeds)
74
+ generate_ids = self.llama_model.generate(
75
+ inputs_embeds=input_embeds,
76
+ attention_mask=batch["tokens"].attention_mask,
77
+ temperature=temperature,
78
+ do_sample=do_sample,
79
+ num_beams=num_beams,
80
+ max_new_tokens=max_gen_length,
81
+ min_new_tokens=min_gen_length,
82
+ pad_token_id=self.llama_tokenizer.pad_token_id,
83
+ repetition_penalty=repetition_penalty,
84
+ length_penalty=length_penalty,
85
+ num_return_sequences=num_return_sequences,
86
+ user_embeds=user_embeds,
87
+ gate_weights = gate_weights
88
+ )
89
+ output_text=self.llama_tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
90
+ outputs=[text.strip() for text in output_text]
91
+ return outputs
92
+
93
+ gate_weights = self.router(user_embeds)
94
+
95
+ generate_ids = self.llama_model.generate(
96
+ inputs_embeds=input_embeds,
97
+ attention_mask=batch["tokens"].attention_mask,
98
+ temperature=temperature,
99
+ do_sample=do_sample,
100
+ num_beams=num_beams,
101
+ max_new_tokens=max_gen_length,
102
+ min_new_tokens=min_gen_length,
103
+ pad_token_id=self.llama_tokenizer.pad_token_id,
104
+ repetition_penalty=repetition_penalty,
105
+ length_penalty=length_penalty,
106
+ num_return_sequences=num_return_sequences,
107
+
108
+ user_embeds=user_embeds,
109
+ gate_weights = gate_weights
110
+ )
111
+ output_text=self.llama_tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
112
+ outputs=[text.strip() for text in output_text]
113
+ return outputs
114
+
115
+ def capture_and_store_gradients(self):
116
+ for name, param in self.llama_model.named_parameters():
117
+ if "lora" in name and param.grad is not None:
118
+ if name not in self.gradient_storage:
119
+ self.gradient_storage[name] = []
120
+ self.gradient_storage[name].append(param.grad.clone().detach())
121
+
122
+ if self.trainer.global_step % 10 == 0:
123
+ self.save_gradients_to_file()
124
+
125
+ def save_gradients_to_file(self):
126
+ directory = self.hparams.capture_dir
127
+ if not os.path.exists(directory):
128
+ os.makedirs(directory)
129
+ file_path = os.path.join(directory, f'gradients_step_{self.trainer.global_step}.pkl')
130
+ with open(file_path, 'wb') as f:
131
+ pickle.dump(self.gradient_storage, f)
132
+ self.gradient_storage = {}
133
+
134
+ def training_step(self, batch, batch_idx):
135
+ if self.scheduler:
136
+ self.scheduler.step(self.trainer.global_step, self.current_epoch, self.trainer.max_steps)
137
+ if batch["flag"]:
138
+ for name, param in self.projector.named_parameters():
139
+ param.requires_grad = False
140
+ else:
141
+ for name, param in self.projector.named_parameters():
142
+ param.requires_grad = True
143
+ out = self(batch)
144
+ loss = self.configure_loss(out)
145
+ self.log('loss', loss, on_step=True, on_epoch=True, prog_bar=True)
146
+ self.log('lr', self.scheduler.optimizer.param_groups[0]['lr'], on_step=True, on_epoch=True, prog_bar=True)
147
+ self.log('global_step_num', self.trainer.global_step, on_step=True, on_epoch=True, prog_bar=True)
148
+
149
+ return loss
150
+
151
+ def on_validation_epoch_start(self):
152
+ self.val_content={
153
+ "generate":[],
154
+ "real":[],
155
+ "cans":[],
156
+ }
157
+
158
+ @torch.no_grad()
159
+ def validation_step(self, batch, batch_idx):
160
+ generate_output = self.generate(batch)
161
+ output=[]
162
+ for i,generate in enumerate(generate_output):
163
+ real=batch['correct_answer'][i]
164
+ cans=batch['cans_name'][i]
165
+ generate=generate.strip().split("\n")[0]
166
+ output.append((generate,real,cans))
167
+ return output
168
+
169
+ def on_validation_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
170
+ for generate,real,cans in outputs:
171
+ self.val_content["generate"].append(generate)
172
+ self.val_content["real"].append(real)
173
+ self.val_content["cans"].append(cans)
174
+
175
+ def on_validation_epoch_end(self):
176
+ df=DataFrame(self.val_content)
177
+ if not os.path.exists(self.hparams.output_dir):
178
+ os.makedirs(self.hparams.output_dir)
179
+ df.to_csv(op.join(self.hparams.output_dir, 'valid.csv'))
180
+ prediction_valid_ratio,hr=self.calculate_hr1(self.val_content)
181
+ metric=hr*prediction_valid_ratio
182
+ self.log('val_prediction_valid', prediction_valid_ratio, on_step=False, on_epoch=True, prog_bar=True)
183
+ self.log('val_hr', hr, on_step=False, on_epoch=True, prog_bar=True)
184
+ self.log('metric', metric, on_step=False, on_epoch=True, prog_bar=True)
185
+
186
+ def on_test_epoch_start(self):
187
+ self.test_content={
188
+ "generate":[],
189
+ "real":[],
190
+ "cans":[],
191
+ }
192
+
193
+ @torch.no_grad()
194
+ def test_step(self, batch, batch_idx):
195
+ generate_output = self.generate(batch)
196
+ output=[]
197
+ for i,generate in enumerate(generate_output):
198
+ real=batch['correct_answer'][i]
199
+ cans=batch['cans_name'][i]
200
+ generate=generate.strip().split("\n")[0]
201
+ output.append((generate,real,cans))
202
+ return output
203
+
204
+ def on_test_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
205
+ for generate,real,cans in outputs:
206
+ self.test_content["generate"].append(generate)
207
+ self.test_content["real"].append(real)
208
+ self.test_content["cans"].append(cans)
209
+
210
+ def on_test_epoch_end(self):
211
+ df=DataFrame(self.test_content)
212
+ if not os.path.exists(self.hparams.output_dir):
213
+ os.makedirs(self.hparams.output_dir)
214
+ df.to_csv(op.join(self.hparams.output_dir, 'test.csv'))
215
+ prediction_valid_ratio,hr=self.calculate_hr1(self.test_content)
216
+ metric=hr*prediction_valid_ratio
217
+
218
+ self.log('test_prediction_valid', prediction_valid_ratio, on_step=False, on_epoch=True, prog_bar=True)
219
+ self.log('test_hr', hr, on_step=False, on_epoch=True, prog_bar=True)
220
+ self.log('metric', metric, on_step=False, on_epoch=True, prog_bar=True)
221
+
222
+ def configure_optimizers(self):
223
+ if hasattr(self.hparams, 'weight_decay'):
224
+ weight_decay = self.hparams.weight_decay
225
+ else:
226
+ weight_decay = 0
227
+ optimizer = torch.optim.SGD([
228
+ {'params': self.projector.parameters(), 'lr': self.hparams.lr, 'weight_decay':weight_decay},
229
+
230
+ {'params': self.router.parameters(), 'lr': self.hparams.lr * 0.3, 'weight_decay':weight_decay},
231
+
232
+ {'params': [p for n, p in self.llama_model.named_parameters() if "gating" not in n], 'lr': self.hparams.lr},
233
+ # {'params': [p for n, p in self.llama_model.named_parameters() if "gating" in n], 'lr': self.hparams.lr * 1, 'weight_decay':weight_decay}
234
+
235
+ # {'params': self.llama_model.parameters(), 'lr': self.hparams.lr},
236
+ ])
237
+
238
+
239
+ for i, param_group in enumerate(optimizer.param_groups):
240
+ print(f"Initial LR for group {i}: {param_group['lr']}")
241
+ total_params = sum(p.numel() for p in param_group['params'])
242
+ print(f"Parameter Group {i}: {total_params} parameters")
243
+
244
+ if self.hparams.lr_scheduler is None:
245
+ return optimizer
246
+ else:
247
+ max_step = self.trainer.max_steps
248
+ warmup_steps = max_step // 20
249
+ print(f'max_step: {max_step}')
250
+ print(f'warmup_steps: {warmup_steps}')
251
+ if self.hparams.lr_scheduler == 'cosine':
252
+
253
+ init_lr_list = [
254
+ self.hparams.lr,
255
+ self.hparams.lr * 0.3,
256
+ self.hparams.lr * 1
257
+ ]
258
+ min_lr_list = [
259
+ self.hparams.lr_decay_min_lr,
260
+ self.hparams.lr_decay_min_lr * 0.3,
261
+ self.hparams.lr_decay_min_lr * 1
262
+ ]
263
+ warmup_start_lr_list = [
264
+ self.hparams.lr_warmup_start_lr,
265
+ self.hparams.lr_warmup_start_lr * 0.3,
266
+ self.hparams.lr_warmup_start_lr * 1
267
+ ]
268
+ self.scheduler = LinearWarmupCosineLRScheduler(
269
+ optimizer=optimizer,
270
+ max_step=max_step,
271
+ min_lr_list=min_lr_list,
272
+ init_lr_list=init_lr_list,
273
+ warmup_steps=warmup_steps,
274
+ warmup_start_lr_list=warmup_start_lr_list
275
+ )
276
+
277
+
278
+ for i, param_group in enumerate(optimizer.param_groups):
279
+ print(f"Initial LR for group {i}: {param_group['lr']}")
280
+ total_params = sum(p.numel() for p in param_group['params'])
281
+ print(f"Parameter Group {i}: {total_params} parameters")
282
+
283
+
284
+ else:
285
+ self.scheduler = None
286
+ raise ValueError('Invalid lr_scheduler type!')
287
+ return optimizer
288
+
289
+ def configure_loss(self, out, labels=None):
290
+ loss = self.hparams.loss.lower()
291
+ if loss == 'lm':
292
+ return out.loss
293
+ else:
294
+ raise ValueError("Invalid Loss Type!")
295
+
296
+ def on_save_checkpoint(self, checkpoint):
297
+ if self.hparams.save == 'part':
298
+ checkpoint.pop('optimizer_states')
299
+ to_be_removed = []
300
+ for key, value in checkpoint['state_dict'].items():
301
+ try:
302
+ if not self.get_parameter(key).requires_grad:
303
+ to_be_removed.append(key)
304
+ except AttributeError:
305
+ to_be_removed.append(key)
306
+ for key in to_be_removed:
307
+ checkpoint['state_dict'].pop(key)
308
+ elif self.hparams.save == 'all':
309
+ pass
310
+
311
+ def load_llm(self, llm_path):
312
+ print('Loading LLAMA')
313
+ self.llama_tokenizer = LlamaTokenizer.from_pretrained(llm_path, use_fast=False)
314
+ self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
315
+ self.llama_tokenizer.add_special_tokens({'pad_token': '[PAD]'})
316
+ self.llama_tokenizer.padding_side = "right"
317
+ self.llama_tokenizer.add_special_tokens({'additional_special_tokens': ['[PH]','[HistoryEmb]','[CansEmb]','[ItemEmb]']})
318
+ self.llama_model = LlamaForCausalLM.from_pretrained(llm_path, device_map="auto",load_in_8bit=True)
319
+ self.llama_model.resize_token_embeddings(len(self.llama_tokenizer))
320
+ if self.hparams.llm_tuning == 'lora':
321
+ if self.hparams.peft_dir:
322
+ self.llama_model = PeftModel.from_pretrained(self.llm_model, self.hparams.peft_dir, is_trainable=True)
323
+ else:
324
+ if self.hparams.peft_config:
325
+ peft_config = LoraConfig(**LoraConfig.from_json_file(self.hparams.peft_config))
326
+ else:
327
+ peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM,
328
+ inference_mode=False,
329
+ r=self.hparams.lora_r,
330
+ lora_alpha=self.hparams.lora_alpha,
331
+ lora_dropout=self.hparams.lora_dropout,
332
+ target_modules=['k_proj', 'v_proj', 'q_proj', 'o_proj', 'gate_proj', 'up_proj', 'down_proj'])
333
+ self.peft_config = peft_config
334
+ self.llama_model = get_peft_model(self.llama_model, peft_config)
335
+ self.llama_model.print_trainable_parameters()
336
+ elif self.hparams.llm_tuning == 'freeze':
337
+ for name, param in self.llama_model.named_parameters():
338
+ param.requires_grad = False
339
+ elif self.hparams.llm_tuning == 'freeze_lora':
340
+ if self.hparams.peft_dir:
341
+ self.llama_model = PeftModel.from_pretrained(self.llm_model, self.hparams.peft_dir, is_trainable=True)
342
+ else:
343
+ if self.hparams.peft_config:
344
+ peft_config = LoraConfig(**LoraConfig.from_json_file(self.hparams.peft_config))
345
+ else:
346
+ peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM,
347
+ inference_mode=False,
348
+ r=self.hparams.lora_r,
349
+ lora_alpha=self.hparams.lora_alpha,
350
+ lora_dropout=self.hparams.lora_dropout,
351
+ target_modules=['k_proj', 'v_proj', 'q_proj', 'o_proj', 'gate_proj', 'up_proj', 'down_proj'])
352
+ self.peft_config = peft_config
353
+ self.llama_model = get_peft_model(self.llama_model, peft_config)
354
+ for name, param in self.llama_model.named_parameters():
355
+ param.requires_grad = False
356
+ self.llama_model.print_trainable_parameters()
357
+ elif self.hparams.llm_tuning == 'moelora':
358
+ if self.hparams.peft_dir:
359
+ self.llama_model = PeftModel.from_pretrained(self.llm_model, self.hparams.peft_dir, is_trainable=True)
360
+ else:
361
+ if self.hparams.peft_config:
362
+ peft_config = MoeLoraConfig(**MoeLoraConfig.from_json_file(self.hparams.peft_config))
363
+ else:
364
+ peft_config = MoeLoraConfig(task_type=TaskType.CAUSAL_LM,
365
+ inference_mode=False,
366
+ r=self.hparams.lora_r,
367
+ lora_alpha=self.hparams.lora_alpha,
368
+ lora_dropout=self.hparams.lora_dropout,
369
+ target_modules=['k_proj', 'v_proj', 'q_proj', 'o_proj', 'gate_proj', 'up_proj', 'down_proj'],
370
+ num_moe=self.hparams.num_moe,
371
+ gating=self.hparams.gating)
372
+ self.peft_config = peft_config
373
+ self.llama_model = get_peft_model(self.llama_model, peft_config)
374
+
375
+ """for name, param in self.llama_model.named_parameters():
376
+ if "gating" not in name:
377
+ param.requires_grad = False"""
378
+ self.llama_model.print_trainable_parameters()
379
+ else:
380
+ raise NotImplementedError()
381
+
382
+ print('Loading LLAMA Done')
383
+
384
+ def load_projector(self):
385
+ name = self.hparams.model_name
386
+ camel_name = ''.join([i.capitalize() for i in name.split('_')])
387
+ try:
388
+ Model = getattr(importlib.import_module(
389
+ '.'+name, package=__package__), camel_name)
390
+ except:
391
+ raise ValueError(
392
+ f'Invalid Module File Name or Invalid Class Name {name}.{camel_name}!')
393
+ self.projector = self.instancialize(Model, rec_size=self.hparams.rec_size, llm_size=self.llama_model.config.hidden_size)
394
+
395
+ def instancialize(self, Model, **other_args):
396
+ class_args = inspect.getargspec(Model.__init__).args[1:]
397
+ inkeys = self.hparams.keys()
398
+ args1 = {}
399
+ for arg in class_args:
400
+ if arg in inkeys:
401
+ args1[arg] = getattr(self.hparams, arg)
402
+ args1.update(other_args)
403
+ # args1: args在hparams中有的部分
404
+ return Model(**args1)
405
+
406
+ def load_rec_model(self, rec_model_path):
407
+ print('Loading Rec Model')
408
+ self.rec_model = torch.load(rec_model_path, map_location="cpu")
409
+ self.rec_model.eval()
410
+ for name, param in self.rec_model.named_parameters():
411
+ param.requires_grad = False
412
+ print('Loding Rec model Done')
413
+
414
+ def encode_items(self, seq):
415
+ if self.hparams.rec_embed=="SASRec":
416
+ item_rec_embs=self.rec_model.cacu_x(seq)
417
+ elif self.hparams.rec_embed in ['Caser','GRU']:
418
+ item_rec_embs=self.rec_model.item_embeddings(seq)
419
+ item_txt_embs=self.projector(item_rec_embs)
420
+ return item_txt_embs
421
+
422
+ def encode_users(self, seq, len_seq):
423
+ if self.hparams.rec_embed=="SASRec":
424
+ user_rec_embs=self.rec_model.cacul_h(seq, len_seq)
425
+ elif self.hparams.rec_embed in ['Caser','GRU']:
426
+ user_rec_embs=self.rec_model.item_embeddings(seq)
427
+
428
+ user_txt_embs=self.projector(user_rec_embs)
429
+ return user_rec_embs
430
+
431
+ def embed_tokens(self, token_ids):
432
+ embeds = self.llama_model.base_model.embed_tokens(token_ids)
433
+ return embeds
434
+
435
+ # batch -> embeds
436
+ def wrap_emb(self, batch):
437
+ input_embeds = self.llama_model.get_input_embeddings()(batch["tokens"].input_ids)
438
+
439
+
440
+
441
+ his_token_id=self.llama_tokenizer("[HistoryEmb]", return_tensors="pt",add_special_tokens=False).input_ids.item()
442
+ cans_token_id=self.llama_tokenizer("[CansEmb]", return_tensors="pt",add_special_tokens=False).input_ids.item()
443
+ item_token_id=self.llama_tokenizer("[ItemEmb]", return_tensors="pt",add_special_tokens=False).input_ids.item()
444
+
445
+
446
+ his_item_embeds = self.encode_items(batch["seq"])
447
+ cans_item_embeds = self.encode_items(batch["cans"])
448
+ item_embeds=self.encode_items(batch["item_id"])
449
+
450
+
451
+ user_embeds=self.encode_users(batch["seq"], batch["len_seq"])
452
+
453
+ for i in range(len(batch["len_seq"])):
454
+ if (batch["tokens"].input_ids[i]==his_token_id).nonzero().shape[0]>0:
455
+ idx_tensor=(batch["tokens"].input_ids[i]==his_token_id).nonzero().view(-1)
456
+ for idx, item_emb in zip(idx_tensor,his_item_embeds[i,:batch["len_seq"][i].item()]):
457
+ input_embeds[i,idx]=item_emb
458
+ if (batch["tokens"].input_ids[i]==cans_token_id).nonzero().shape[0]>0:
459
+ idx_tensor=(batch["tokens"].input_ids[i]==cans_token_id).nonzero().view(-1)
460
+ for idx, item_emb in zip(idx_tensor,cans_item_embeds[i,:batch["len_cans"][i].item()]):
461
+ input_embeds[i,idx]=item_emb
462
+ if (batch["tokens"].input_ids[i]==item_token_id).nonzero().shape[0]>0:
463
+ idx=(batch["tokens"].input_ids[i]==item_token_id).nonzero().item()
464
+ input_embeds[i,idx]=item_embeds[i]
465
+
466
+ return input_embeds, user_embeds
467
+
468
+ def calculate_hr1(self,eval_content):
469
+ correct_num=0
470
+ valid_num=0
471
+ total_num=0
472
+ for i,generate in enumerate(eval_content["generate"]):
473
+ real=eval_content["real"][i]
474
+ cans=eval_content["cans"][i]
475
+ total_num+=1
476
+ generate=generate.strip().lower().strip()
477
+ real=real.strip().lower().strip()
478
+ cans=[item.strip().lower().strip() for item in cans]
479
+ gen_cans_list=[]
480
+ for cans_item in cans:
481
+ if cans_item in generate:
482
+ gen_cans_list.append(cans_item)
483
+ if len(gen_cans_list)==1:
484
+ valid_num+=1
485
+ if real == gen_cans_list[0]:
486
+ correct_num+=1
487
+ valid_ratio=valid_num/total_num
488
+ if valid_num>0:
489
+ hr1=correct_num/valid_num
490
+ else:
491
+ hr1=0
492
+ return valid_ratio,hr1
model/peft/__init__.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all.
4
+
5
+ # coding=utf-8
6
+ # Copyright 2023-present the HuggingFace Inc. team.
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+
20
+ __version__ = "0.4.0.dev0"
21
+
22
+ from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING, get_peft_config, get_peft_model
23
+ from .peft_model import (
24
+ PeftModel,
25
+ PeftModelForCausalLM,
26
+ PeftModelForSeq2SeqLM,
27
+ PeftModelForSequenceClassification,
28
+ PeftModelForTokenClassification,
29
+ PeftModelForQuestionAnswering,
30
+ )
31
+ from .tuners import (
32
+ AdaptionPromptConfig,
33
+ AdaptionPromptModel,
34
+ LoraConfig,
35
+ LoraModel,
36
+ AdaLoraConfig,
37
+ AdaLoraModel,
38
+ PrefixEncoder,
39
+ PrefixTuningConfig,
40
+ PromptEmbedding,
41
+ PromptEncoder,
42
+ PromptEncoderConfig,
43
+ PromptEncoderReparameterizationType,
44
+ PromptTuningConfig,
45
+ PromptTuningInit,
46
+ MoeLoraConfig,
47
+ MoeLoraModel,
48
+ )
49
+ from .utils import (
50
+ TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
51
+ PeftConfig,
52
+ PeftType,
53
+ PromptLearningConfig,
54
+ TaskType,
55
+ bloom_model_postprocess_past_key_value,
56
+ get_peft_model_state_dict,
57
+ prepare_model_for_int8_training,
58
+ prepare_model_for_kbit_training,
59
+ set_peft_model_state_dict,
60
+ shift_tokens_right,
61
+ )
model/peft/import_utils.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import importlib
16
+
17
+
18
+ def is_bnb_available():
19
+ return importlib.util.find_spec("bitsandbytes") is not None
20
+
21
+
22
+ def is_bnb_4bit_available():
23
+ if not is_bnb_available():
24
+ return False
25
+
26
+ import bitsandbytes as bnb
27
+
28
+ return hasattr(bnb.nn, "Linear4bit")
model/peft/mapping.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from __future__ import annotations
17
+
18
+ from typing import TYPE_CHECKING, Any, Dict
19
+
20
+ from .peft_model import (
21
+ PeftModel,
22
+ PeftModelForCausalLM,
23
+ PeftModelForQuestionAnswering,
24
+ PeftModelForSeq2SeqLM,
25
+ PeftModelForSequenceClassification,
26
+ PeftModelForTokenClassification,
27
+ )
28
+ from .tuners import (
29
+ AdaLoraConfig,
30
+ AdaptionPromptConfig,
31
+ LoraConfig,
32
+ PrefixTuningConfig,
33
+ PromptEncoderConfig,
34
+ PromptTuningConfig,
35
+ MoeLoraConfig,
36
+ )
37
+ from .utils import PromptLearningConfig
38
+
39
+
40
+ if TYPE_CHECKING:
41
+ from transformers import PreTrainedModel
42
+
43
+ from .utils.config import PeftConfig
44
+
45
+
46
+ MODEL_TYPE_TO_PEFT_MODEL_MAPPING = {
47
+ "SEQ_CLS": PeftModelForSequenceClassification,
48
+ "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM,
49
+ "CAUSAL_LM": PeftModelForCausalLM,
50
+ "TOKEN_CLS": PeftModelForTokenClassification,
51
+ "QUESTION_ANS": PeftModelForQuestionAnswering,
52
+ }
53
+
54
+ PEFT_TYPE_TO_CONFIG_MAPPING = {
55
+ "ADAPTION_PROMPT": AdaptionPromptConfig,
56
+ "PROMPT_TUNING": PromptTuningConfig,
57
+ "PREFIX_TUNING": PrefixTuningConfig,
58
+ "P_TUNING": PromptEncoderConfig,
59
+ "LORA": LoraConfig,
60
+ "ADALORA": AdaLoraConfig,
61
+ "MOELORA": MoeLoraConfig,
62
+ }
63
+
64
+
65
+ def get_peft_config(config_dict: Dict[str, Any]):
66
+ """
67
+ Returns a Peft config object from a dictionary.
68
+
69
+ Args:
70
+ config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters.
71
+ """
72
+
73
+ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict)
74
+
75
+
76
+ def _prepare_prompt_learning_config(peft_config: PeftConfig, model_config: Dict[str, Any]):
77
+ if peft_config.num_layers is None:
78
+ if "num_hidden_layers" in model_config:
79
+ num_layers = model_config["num_hidden_layers"]
80
+ elif "num_layers" in model_config:
81
+ num_layers = model_config["num_layers"]
82
+ elif "n_layer" in model_config:
83
+ num_layers = model_config["n_layer"]
84
+ else:
85
+ raise ValueError("Please specify `num_layers` in `peft_config`")
86
+ peft_config.num_layers = num_layers
87
+
88
+ if peft_config.token_dim is None:
89
+ if "hidden_size" in model_config:
90
+ token_dim = model_config["hidden_size"]
91
+ elif "n_embd" in model_config:
92
+ token_dim = model_config["n_embd"]
93
+ elif "d_model" in model_config:
94
+ token_dim = model_config["d_model"]
95
+ else:
96
+ raise ValueError("Please specify `token_dim` in `peft_config`")
97
+ peft_config.token_dim = token_dim
98
+
99
+ if peft_config.num_attention_heads is None:
100
+ if "num_attention_heads" in model_config:
101
+ num_attention_heads = model_config["num_attention_heads"]
102
+ elif "n_head" in model_config:
103
+ num_attention_heads = model_config["n_head"]
104
+ elif "num_heads" in model_config:
105
+ num_attention_heads = model_config["num_heads"]
106
+ elif "encoder_attention_heads" in model_config:
107
+ num_attention_heads = model_config["encoder_attention_heads"]
108
+ else:
109
+ raise ValueError("Please specify `num_attention_heads` in `peft_config`")
110
+ peft_config.num_attention_heads = num_attention_heads
111
+
112
+ if getattr(peft_config, "encoder_hidden_size", None) is None:
113
+ setattr(peft_config, "encoder_hidden_size", peft_config.token_dim)
114
+
115
+ return peft_config
116
+
117
+
118
+ def get_peft_model(model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default") -> PeftModel:
119
+ """
120
+ Returns a Peft model object from a model and a config.
121
+
122
+ Args:
123
+ model ([`transformers.PreTrainedModel`]): Model to be wrapped.
124
+ peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model.
125
+ """
126
+ model_config = model.config.to_dict() if hasattr(model.config, "to_dict") else model.config
127
+ peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None)
128
+ if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not isinstance(
129
+ peft_config, PromptLearningConfig
130
+ ):
131
+ return PeftModel(model, peft_config, adapter_name=adapter_name)
132
+ if isinstance(peft_config, PromptLearningConfig):
133
+ peft_config = _prepare_prompt_learning_config(peft_config, model_config)
134
+ return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config, adapter_name=adapter_name)
model/peft/peft_model.py ADDED
@@ -0,0 +1,1619 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from __future__ import annotations
17
+
18
+ import inspect
19
+ import os
20
+ import warnings
21
+ from contextlib import contextmanager
22
+ from copy import deepcopy
23
+ from typing import Any, Dict, Optional, Union
24
+
25
+ import torch
26
+ from accelerate import dispatch_model, infer_auto_device_map
27
+ from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules
28
+ from accelerate.utils import get_balanced_memory
29
+ from huggingface_hub import hf_hub_download
30
+ from huggingface_hub.utils import EntryNotFoundError
31
+ from safetensors.torch import load_file as safe_load_file
32
+ from safetensors.torch import save_file as safe_save_file
33
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
34
+ from transformers import PreTrainedModel
35
+ from transformers.modeling_outputs import QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
36
+ from transformers.utils import PushToHubMixin
37
+
38
+ from . import __version__
39
+ from .tuners import (
40
+ AdaLoraModel,
41
+ AdaptionPromptModel,
42
+ LoraModel,
43
+ PrefixEncoder,
44
+ PromptEmbedding,
45
+ PromptEncoder,
46
+ MoeLoraModel,
47
+ )
48
+ from .utils import (
49
+ SAFETENSORS_WEIGHTS_NAME,
50
+ TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
51
+ WEIGHTS_NAME,
52
+ PeftConfig,
53
+ PeftType,
54
+ PromptLearningConfig,
55
+ TaskType,
56
+ _set_adapter,
57
+ _set_trainable,
58
+ add_library_to_model_card,
59
+ get_peft_model_state_dict,
60
+ hub_file_exists,
61
+ set_peft_model_state_dict,
62
+ shift_tokens_right,
63
+ )
64
+
65
+
66
+ PEFT_TYPE_TO_MODEL_MAPPING = {
67
+ PeftType.LORA: LoraModel,
68
+ PeftType.PROMPT_TUNING: PromptEmbedding,
69
+ PeftType.P_TUNING: PromptEncoder,
70
+ PeftType.PREFIX_TUNING: PrefixEncoder,
71
+ PeftType.ADALORA: AdaLoraModel,
72
+ PeftType.ADAPTION_PROMPT: AdaptionPromptModel,
73
+ PeftType.MOELORA: MoeLoraModel,
74
+ }
75
+
76
+
77
+ class PeftModel(PushToHubMixin, torch.nn.Module):
78
+ """
79
+ Base model encompassing various Peft methods.
80
+
81
+ Args:
82
+ model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft.
83
+ peft_config ([`PeftConfig`]): The configuration of the Peft model.
84
+
85
+
86
+ **Attributes**:
87
+ - **base_model** ([`~transformers.PreTrainedModel`]) -- The base transformer model used for Peft.
88
+ - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model.
89
+ - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when
90
+ saving the model.
91
+ - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if
92
+ using [`PromptLearningConfig`].
93
+ - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if
94
+ using [`PromptLearningConfig`].
95
+ - **transformer_backbone_name** (`str`) -- The name of the transformer
96
+ backbone in the base model if using [`PromptLearningConfig`].
97
+ - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone
98
+ in the base model if using [`PromptLearningConfig`].
99
+ """
100
+
101
+ def __init__(self, model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default"):
102
+ super().__init__()
103
+ self.base_model = model
104
+ self.config = self.base_model.config
105
+ self.modules_to_save = None
106
+ self.peft_config = {}
107
+ self.active_adapter = adapter_name
108
+ self.peft_type = peft_config.peft_type
109
+ self.base_model_torch_dtype = getattr(model, "dtype", None)
110
+ if not isinstance(peft_config, PromptLearningConfig):
111
+ self.peft_config[adapter_name] = peft_config
112
+ self.base_model = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type](
113
+ self.base_model, self.peft_config, adapter_name
114
+ )
115
+ self.set_additional_trainable_modules(peft_config, adapter_name)
116
+ else:
117
+ self.add_adapter(adapter_name, peft_config)
118
+
119
+ if getattr(model, "is_gradient_checkpointing", True):
120
+ model = self._prepare_model_for_gradient_checkpointing(model)
121
+
122
+ def save_pretrained(self, save_directory: str, safe_serialization: bool = False, **kwargs: Any):
123
+ r"""
124
+ This function saves the adapter model and the adapter configuration files to a directory, so that it can be
125
+ reloaded using the [`LoraModel.from_pretrained`] class method, and also used by the [`LoraModel.push_to_hub`]
126
+ method.
127
+
128
+ Args:
129
+ save_directory (`str`):
130
+ Directory where the adapter model and configuration files will be saved (will be created if it does not
131
+ exist).
132
+ kwargs (additional keyword arguments, *optional*):
133
+ Additional keyword arguments passed along to the `push_to_hub` method.
134
+ """
135
+ if os.path.isfile(save_directory):
136
+ raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
137
+ os.makedirs(save_directory, exist_ok=True)
138
+ self.create_or_update_model_card(save_directory)
139
+
140
+ for adapter_name, peft_config in self.peft_config.items():
141
+ # save only the trainable weights
142
+ output_state_dict = get_peft_model_state_dict(
143
+ self, state_dict=kwargs.get("state_dict", None), adapter_name=adapter_name
144
+ )
145
+ output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory
146
+ os.makedirs(output_dir, exist_ok=True)
147
+
148
+ if safe_serialization:
149
+ safe_save_file(
150
+ output_state_dict, os.path.join(output_dir, SAFETENSORS_WEIGHTS_NAME), metadata={"format": "pt"}
151
+ )
152
+ else:
153
+ torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME))
154
+
155
+ # save the config and change the inference mode to `True`
156
+ if peft_config.base_model_name_or_path is None:
157
+ peft_config.base_model_name_or_path = (
158
+ self.base_model.__dict__.get("name_or_path", None)
159
+ if isinstance(peft_config, PromptLearningConfig)
160
+ else self.base_model.model.__dict__.get("name_or_path", None)
161
+ )
162
+ inference_mode = peft_config.inference_mode
163
+ peft_config.inference_mode = True
164
+ peft_config.save_pretrained(output_dir)
165
+ peft_config.inference_mode = inference_mode
166
+
167
+ @classmethod
168
+ def from_pretrained(
169
+ cls,
170
+ model: PreTrainedModel,
171
+ model_id: Union[str, os.PathLike],
172
+ adapter_name: str = "default",
173
+ is_trainable: bool = False,
174
+ config: Optional[PeftConfig] = None,
175
+ **kwargs: Any,
176
+ ):
177
+ r"""
178
+ Instantiate a [`LoraModel`] from a pretrained Lora configuration and weights.
179
+
180
+ Args:
181
+ model ([`~transformers.PreTrainedModel`]):
182
+ The model to be adapted. The model should be initialized with the
183
+ [`~transformers.PreTrainedModel.from_pretrained`] method from the 🤗 Transformers library.
184
+ model_id (`str` or `os.PathLike`):
185
+ The name of the Lora configuration to use. Can be either:
186
+ - A string, the `model id` of a Lora configuration hosted inside a model repo on the Hugging Face
187
+ Hub.
188
+ - A path to a directory containing a Lora configuration file saved using the `save_pretrained`
189
+ method (`./my_lora_config_directory/`).
190
+ adapter_name (`str`, *optional*, defaults to `"default"`):
191
+ The name of the adapter to be loaded. This is useful for loading multiple adapters.
192
+ is_trainable (`bool`, *optional*, defaults to `False`):
193
+ Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and use for
194
+ inference
195
+ config ([`~peft.PeftConfig`], *optional*):
196
+ The configuration object to use instead of an automatically loaded configuation. This configuration
197
+ object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already
198
+ loaded before calling `from_pretrained`.
199
+ kwargs: (`optional`):
200
+ Additional keyword arguments passed along to the specific Lora configuration class.
201
+ """
202
+ from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING
203
+
204
+ # load the config
205
+ if config is None:
206
+ config = PEFT_TYPE_TO_CONFIG_MAPPING[
207
+ PeftConfig._get_peft_type(
208
+ model_id,
209
+ subfolder=kwargs.get("subfolder", None),
210
+ revision=kwargs.get("revision", None),
211
+ cache_dir=kwargs.get("cache_dir", None),
212
+ )
213
+ ].from_pretrained(model_id, subfolder=kwargs.get("subfolder", None), **kwargs)
214
+ elif isinstance(config, PeftConfig):
215
+ config.inference_mode = not is_trainable
216
+ else:
217
+ raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}")
218
+
219
+ if (getattr(model, "hf_device_map", None) is not None) and len(
220
+ set(model.hf_device_map.values()).intersection({"cpu", "disk"})
221
+ ) > 0:
222
+ remove_hook_from_submodules(model)
223
+
224
+ if isinstance(config, PromptLearningConfig) and is_trainable:
225
+ raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.")
226
+ else:
227
+ config.inference_mode = not is_trainable
228
+
229
+ if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys():
230
+ model = cls(model, config, adapter_name)
231
+ else:
232
+ model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name)
233
+ model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs)
234
+ return model
235
+
236
+ def _setup_prompt_encoder(self, adapter_name: str):
237
+ config = self.peft_config[adapter_name]
238
+ self.prompt_encoder = torch.nn.ModuleDict({})
239
+ self.prompt_tokens = {}
240
+ transformer_backbone = None
241
+ for name, module in self.base_model.named_children():
242
+ for param in module.parameters():
243
+ param.requires_grad = False
244
+ if isinstance(module, PreTrainedModel):
245
+ # Make sure to freeze Tranformers model
246
+ if transformer_backbone is None:
247
+ transformer_backbone = module
248
+ self.transformer_backbone_name = name
249
+
250
+ if config.num_transformer_submodules is None:
251
+ config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1
252
+
253
+ for named_param, value in list(transformer_backbone.named_parameters()):
254
+ if value.shape[0] == self.base_model.config.vocab_size:
255
+ self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(".weight", ""))
256
+ break
257
+
258
+ if config.peft_type == PeftType.PROMPT_TUNING:
259
+ prompt_encoder = PromptEmbedding(config, self.word_embeddings)
260
+ elif config.peft_type == PeftType.P_TUNING:
261
+ prompt_encoder = PromptEncoder(config)
262
+ elif config.peft_type == PeftType.PREFIX_TUNING:
263
+ prompt_encoder = PrefixEncoder(config)
264
+ else:
265
+ raise ValueError("Not supported")
266
+ self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder}))
267
+ self.prompt_tokens[adapter_name] = torch.arange(
268
+ config.num_virtual_tokens * config.num_transformer_submodules
269
+ ).long()
270
+
271
+ def _prepare_model_for_gradient_checkpointing(self, model: PreTrainedModel):
272
+ r"""
273
+ Prepares the model for gradient checkpointing if necessary
274
+ """
275
+ if not (getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)):
276
+ if hasattr(model, "enable_input_require_grads"):
277
+ model.enable_input_require_grads()
278
+ else:
279
+
280
+ def make_inputs_require_grad(module, input, output):
281
+ output.requires_grad_(True)
282
+
283
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
284
+ return model
285
+
286
+ def get_prompt_embedding_to_save(self, adapter_name: str):
287
+ """
288
+ Returns the prompt embedding to save when saving the model. Only applicable when `peft_config.peft_type !=
289
+ PeftType.LORA`.
290
+ """
291
+ prompt_encoder = self.prompt_encoder[adapter_name]
292
+ prompt_tokens = (
293
+ self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(prompt_encoder.embedding.weight.device)
294
+ )
295
+ if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING:
296
+ prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens]
297
+ prompt_embeddings = prompt_encoder(prompt_tokens)
298
+ return prompt_embeddings[0].detach().cpu()
299
+
300
+ def get_prompt(self, batch_size: int):
301
+ """
302
+ Returns the virtual prompts to use for Peft. Only applicable when `peft_config.peft_type != PeftType.LORA`.
303
+ """
304
+ peft_config = self.active_peft_config
305
+ prompt_encoder = self.prompt_encoder[self.active_adapter]
306
+ prompt_tokens = (
307
+ self.prompt_tokens[self.active_adapter]
308
+ .unsqueeze(0)
309
+ .expand(batch_size, -1)
310
+ .to(prompt_encoder.embedding.weight.device)
311
+ )
312
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
313
+ prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens]
314
+ if peft_config.inference_mode:
315
+ past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)
316
+ else:
317
+ past_key_values = prompt_encoder(prompt_tokens)
318
+ if self.base_model_torch_dtype is not None:
319
+ past_key_values = past_key_values.to(self.base_model_torch_dtype)
320
+ past_key_values = past_key_values.view(
321
+ batch_size,
322
+ peft_config.num_virtual_tokens,
323
+ peft_config.num_layers * 2,
324
+ peft_config.num_attention_heads,
325
+ peft_config.token_dim // peft_config.num_attention_heads,
326
+ )
327
+ if peft_config.num_transformer_submodules == 2:
328
+ past_key_values = torch.cat([past_key_values, past_key_values], dim=2)
329
+ past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(
330
+ peft_config.num_transformer_submodules * 2
331
+ )
332
+ if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None:
333
+ post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type]
334
+ past_key_values = post_process_fn(past_key_values)
335
+ return past_key_values
336
+ else:
337
+ if peft_config.inference_mode:
338
+ prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)
339
+ else:
340
+ prompts = prompt_encoder(prompt_tokens)
341
+ return prompts
342
+
343
+ def print_trainable_parameters(self):
344
+ """
345
+ Prints the number of trainable parameters in the model.
346
+ """
347
+ trainable_params = 0
348
+ all_param = 0
349
+ for _, param in self.named_parameters():
350
+ num_params = param.numel()
351
+ # if using DS Zero 3 and the weights are initialized empty
352
+ if num_params == 0 and hasattr(param, "ds_numel"):
353
+ num_params = param.ds_numel
354
+
355
+ all_param += num_params
356
+ if param.requires_grad:
357
+ trainable_params += num_params
358
+ print(
359
+ f"trainable params: {trainable_params:,d} || all params: {all_param:,d} || trainable%: {100 * trainable_params / all_param}"
360
+ )
361
+
362
+ def __getattr__(self, name: str):
363
+ """Forward missing attributes to the wrapped module."""
364
+ try:
365
+ return super().__getattr__(name) # defer to nn.Module's logic
366
+ except AttributeError:
367
+ return getattr(self.base_model, name)
368
+
369
+ def forward(self, *args: Any, **kwargs: Any):
370
+ """
371
+ Forward pass of the model.
372
+ """
373
+ return self.get_base_model()(*args, **kwargs)
374
+
375
+ @contextmanager
376
+ def disable_adapter(self):
377
+ """
378
+ Disables the adapter module.
379
+ """
380
+ try:
381
+ if isinstance(self.peft_config[self.active_adapter], PromptLearningConfig):
382
+ old_forward = self.forward
383
+ self.forward = self.base_model.forward
384
+ else:
385
+ self.base_model.disable_adapter_layers()
386
+ yield
387
+ finally:
388
+ if isinstance(self.peft_config[self.active_adapter], PromptLearningConfig):
389
+ self.forward = old_forward
390
+ else:
391
+ self.base_model.enable_adapter_layers()
392
+
393
+ def get_base_model(self):
394
+ """
395
+ Returns the base model.
396
+ """
397
+ return self.base_model if isinstance(self.active_peft_config, PromptLearningConfig) else self.base_model.model
398
+
399
+ def add_adapter(self, adapter_name: str, peft_config: PeftConfig):
400
+ if peft_config.peft_type != self.peft_type:
401
+ raise ValueError(
402
+ f"Cannot combine adapters with different peft types. "
403
+ f"Found {self.peft_type} and {peft_config.peft_type}."
404
+ )
405
+ self.peft_config[adapter_name] = peft_config
406
+ if isinstance(peft_config, PromptLearningConfig):
407
+ self._setup_prompt_encoder(adapter_name)
408
+ else:
409
+ self.base_model.add_adapter(adapter_name, peft_config)
410
+
411
+ self.set_additional_trainable_modules(peft_config, adapter_name)
412
+
413
+ def set_additional_trainable_modules(self, peft_config, adapter_name):
414
+ if getattr(peft_config, "modules_to_save", None) is not None:
415
+ if self.modules_to_save is None:
416
+ self.modules_to_save = set(peft_config.modules_to_save)
417
+ else:
418
+ self.modules_to_save.update(peft_config.modules_to_save)
419
+ _set_trainable(self, adapter_name)
420
+
421
+ @classmethod
422
+ def _split_kwargs(cls, kwargs: Dict[str, Any]):
423
+ hf_hub_download_kwargs = {}
424
+ other_kwargs = {}
425
+
426
+ for key, value in kwargs.items():
427
+ if key in inspect.signature(hf_hub_download).parameters:
428
+ hf_hub_download_kwargs[key] = value
429
+ else:
430
+ other_kwargs[key] = value
431
+
432
+ return hf_hub_download_kwargs, other_kwargs
433
+
434
+ def load_adapter(self, model_id: str, adapter_name: str, is_trainable: bool = False, **kwargs: Any):
435
+ from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
436
+
437
+ hf_hub_download_kwargs, kwargs = self._split_kwargs(kwargs)
438
+
439
+ if adapter_name not in self.peft_config:
440
+ # load the config
441
+ peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[
442
+ PeftConfig._get_peft_type(
443
+ model_id,
444
+ subfolder=kwargs.get("subfolder", None),
445
+ revision=kwargs.get("revision", None),
446
+ cache_dir=kwargs.get("cache_dir", None),
447
+ )
448
+ ].from_pretrained(
449
+ model_id,
450
+ subfolder=kwargs.get("subfolder", None),
451
+ revision=kwargs.get("revision", None),
452
+ cache_dir=kwargs.get("cache_dir", None),
453
+ )
454
+ if isinstance(peft_config, PromptLearningConfig) and is_trainable:
455
+ raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.")
456
+ else:
457
+ peft_config.inference_mode = not is_trainable
458
+ self.add_adapter(adapter_name, peft_config)
459
+
460
+ # load weights if any
461
+ path = os.path.join(model_id, kwargs["subfolder"]) if kwargs.get("subfolder", None) is not None else model_id
462
+
463
+ if os.path.exists(os.path.join(path, SAFETENSORS_WEIGHTS_NAME)):
464
+ filename = os.path.join(path, SAFETENSORS_WEIGHTS_NAME)
465
+ use_safetensors = True
466
+ elif os.path.exists(os.path.join(path, WEIGHTS_NAME)):
467
+ filename = os.path.join(path, WEIGHTS_NAME)
468
+ use_safetensors = False
469
+ else:
470
+ has_remote_safetensors_file = hub_file_exists(
471
+ model_id, SAFETENSORS_WEIGHTS_NAME, revision=kwargs.get("revision", None)
472
+ )
473
+ use_safetensors = has_remote_safetensors_file
474
+
475
+ if has_remote_safetensors_file:
476
+ # Priority 1: load safetensors weights
477
+ filename = hf_hub_download(
478
+ model_id,
479
+ SAFETENSORS_WEIGHTS_NAME,
480
+ subfolder=kwargs.get("subfolder", None),
481
+ **hf_hub_download_kwargs,
482
+ )
483
+ else:
484
+ try:
485
+ filename = hf_hub_download(
486
+ model_id, WEIGHTS_NAME, subfolder=kwargs.get("subfolder", None), **hf_hub_download_kwargs
487
+ )
488
+ except EntryNotFoundError:
489
+ raise ValueError(
490
+ f"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. "
491
+ f"Please check that the file {WEIGHTS_NAME} or {SAFETENSORS_WEIGHTS_NAME} is present at {model_id}."
492
+ )
493
+
494
+ if use_safetensors:
495
+ adapters_weights = safe_load_file(filename, device="cuda" if torch.cuda.is_available() else "cpu")
496
+ else:
497
+ adapters_weights = torch.load(
498
+ filename, map_location=torch.device("cuda" if torch.cuda.is_available() else "cpu")
499
+ )
500
+
501
+ # load the weights into the model
502
+ load_result = set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name)
503
+ if (
504
+ (getattr(self, "hf_device_map", None) is not None)
505
+ and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0)
506
+ and len(self.peft_config) == 1
507
+ ):
508
+ device_map = kwargs.get("device_map", "auto")
509
+ max_memory = kwargs.get("max_memory", None)
510
+ offload_dir = kwargs.get("offload_folder", None)
511
+ offload_index = kwargs.get("offload_index", None)
512
+
513
+ dispatch_model_kwargs = {}
514
+ # Safety checker for previous `accelerate` versions
515
+ # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/
516
+ if "offload_index" in inspect.signature(dispatch_model).parameters:
517
+ dispatch_model_kwargs["offload_index"] = offload_index
518
+
519
+ no_split_module_classes = self._no_split_modules
520
+
521
+ if device_map != "sequential":
522
+ max_memory = get_balanced_memory(
523
+ self,
524
+ max_memory=max_memory,
525
+ no_split_module_classes=no_split_module_classes,
526
+ low_zero=(device_map == "balanced_low_0"),
527
+ )
528
+ if isinstance(device_map, str):
529
+ device_map = infer_auto_device_map(
530
+ self, max_memory=max_memory, no_split_module_classes=no_split_module_classes
531
+ )
532
+ dispatch_model(
533
+ self,
534
+ device_map=device_map,
535
+ offload_dir=offload_dir,
536
+ **dispatch_model_kwargs,
537
+ )
538
+ hook = AlignDevicesHook(io_same_device=True)
539
+ if isinstance(self.peft_config[adapter_name], PromptLearningConfig):
540
+ remove_hook_from_submodules(self.prompt_encoder)
541
+ add_hook_to_module(self.get_base_model(), hook)
542
+
543
+ # Set model in evaluation mode to deactivate Dropout modules by default
544
+ if not is_trainable:
545
+ self.eval()
546
+ return load_result
547
+
548
+ def set_adapter(self, adapter_name: str):
549
+ """
550
+ Sets the active adapter.
551
+ """
552
+ if adapter_name not in self.peft_config:
553
+ raise ValueError(f"Adapter {adapter_name} not found.")
554
+ self.active_adapter = adapter_name
555
+ if not isinstance(self.peft_config[adapter_name], PromptLearningConfig):
556
+ self.base_model.set_adapter(adapter_name)
557
+ _set_adapter(self, adapter_name)
558
+
559
+ @property
560
+ def active_peft_config(self):
561
+ return self.peft_config[self.active_adapter]
562
+
563
+ def create_or_update_model_card(self, output_dir: str):
564
+ """
565
+ Updates or create model card to include information about peft:
566
+ 1. Adds `peft` library tag
567
+ 2. Adds peft version
568
+ 3. Adds quantization information if it was used
569
+ """
570
+ # Adds `peft` library tag
571
+ add_library_to_model_card(output_dir)
572
+
573
+ with open(os.path.join(output_dir, "README.md"), "r") as f:
574
+ lines = f.readlines()
575
+
576
+ quantization_config = None
577
+ if hasattr(self.config, "quantization_config"):
578
+ quantization_config = self.config.quantization_config.to_dict()
579
+ training_config_text = ""
580
+ # Adds quantization information if it was used
581
+ if quantization_config is not None:
582
+ training_config_text += "\nThe following `bitsandbytes` quantization config was used during training:\n"
583
+ training_config_text += "\n".join([f"- {name}: {value}" for name, value in quantization_config.items()])
584
+ training_config_text += "\n"
585
+
586
+ training_procedure_heading = "## Training procedure\n"
587
+ if training_procedure_heading in lines:
588
+ lines.insert(lines.index(training_procedure_heading) + 2, training_config_text)
589
+ else:
590
+ lines.append(f"{training_procedure_heading}\n{training_config_text}")
591
+
592
+ # Adds peft version
593
+ framework_block_heading = "### Framework versions\n"
594
+ if framework_block_heading in lines:
595
+ lines.insert(lines.index(framework_block_heading) + 2, f"- PEFT {__version__}\n")
596
+ else:
597
+ lines.append(f"{framework_block_heading}\n\n- PEFT {__version__}\n")
598
+
599
+ # write the lines back to README.md
600
+ with open(os.path.join(output_dir, "README.md"), "w") as f:
601
+ f.writelines(lines)
602
+
603
+
604
+ class PeftModelForSequenceClassification(PeftModel):
605
+ """
606
+ Peft model for sequence classification tasks.
607
+
608
+ Args:
609
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
610
+ peft_config ([`PeftConfig`]): Peft config.
611
+
612
+ **Attributes**:
613
+ - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
614
+ - **cls_layer_name** (`str`) -- The name of the classification layer.
615
+
616
+ Example:
617
+
618
+ ```py
619
+ >>> from transformers import AutoModelForSequenceClassification
620
+ >>> from peft import PeftModelForSequenceClassification, get_peft_config
621
+
622
+ >>> config = {
623
+ ... "peft_type": "PREFIX_TUNING",
624
+ ... "task_type": "SEQ_CLS",
625
+ ... "inference_mode": False,
626
+ ... "num_virtual_tokens": 20,
627
+ ... "token_dim": 768,
628
+ ... "num_transformer_submodules": 1,
629
+ ... "num_attention_heads": 12,
630
+ ... "num_layers": 12,
631
+ ... "encoder_hidden_size": 768,
632
+ ... "prefix_projection": False,
633
+ ... "postprocess_past_key_value_function": None,
634
+ ... }
635
+
636
+ >>> peft_config = get_peft_config(config)
637
+ >>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased")
638
+ >>> peft_model = PeftModelForSequenceClassification(model, peft_config)
639
+ >>> peft_model.print_trainable_parameters()
640
+ trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117
641
+ ```
642
+ """
643
+
644
+ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"):
645
+ super().__init__(model, peft_config, adapter_name)
646
+ if self.modules_to_save is None:
647
+ self.modules_to_save = {"classifier", "score"}
648
+ else:
649
+ self.modules_to_save.update({"classifier", "score"})
650
+
651
+ for name, _ in self.base_model.named_children():
652
+ if any(module_name in name for module_name in self.modules_to_save):
653
+ self.cls_layer_name = name
654
+ break
655
+
656
+ # to make sure classifier layer is trainable
657
+ _set_trainable(self, adapter_name)
658
+
659
+ def forward(
660
+ self,
661
+ input_ids=None,
662
+ attention_mask=None,
663
+ inputs_embeds=None,
664
+ labels=None,
665
+ output_attentions=None,
666
+ output_hidden_states=None,
667
+ return_dict=None,
668
+ **kwargs,
669
+ ):
670
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
671
+ peft_config = self.active_peft_config
672
+ if not isinstance(peft_config, PromptLearningConfig):
673
+ return self.base_model(
674
+ input_ids=input_ids,
675
+ attention_mask=attention_mask,
676
+ inputs_embeds=inputs_embeds,
677
+ labels=labels,
678
+ output_attentions=output_attentions,
679
+ output_hidden_states=output_hidden_states,
680
+ return_dict=return_dict,
681
+ **kwargs,
682
+ )
683
+
684
+ batch_size = input_ids.shape[0]
685
+ if attention_mask is not None:
686
+ # concat prompt attention mask
687
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
688
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
689
+ if kwargs.get("position_ids", None) is not None:
690
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
691
+ kwargs["position_ids"] = None
692
+ kwargs.update(
693
+ {
694
+ "attention_mask": attention_mask,
695
+ "labels": labels,
696
+ "output_attentions": output_attentions,
697
+ "output_hidden_states": output_hidden_states,
698
+ "return_dict": return_dict,
699
+ }
700
+ )
701
+
702
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
703
+ return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
704
+ else:
705
+ if kwargs.get("token_type_ids", None) is not None:
706
+ kwargs["token_type_ids"] = torch.cat(
707
+ (
708
+ torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device),
709
+ kwargs["token_type_ids"],
710
+ ),
711
+ dim=1,
712
+ ).long()
713
+ if inputs_embeds is None:
714
+ inputs_embeds = self.word_embeddings(input_ids)
715
+ prompts = self.get_prompt(batch_size=batch_size)
716
+ prompts = prompts.to(inputs_embeds.dtype)
717
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
718
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
719
+
720
+ def _prefix_tuning_forward(
721
+ self,
722
+ input_ids=None,
723
+ attention_mask=None,
724
+ inputs_embeds=None,
725
+ labels=None,
726
+ output_attentions=None,
727
+ output_hidden_states=None,
728
+ return_dict=None,
729
+ **kwargs,
730
+ ):
731
+ batch_size = input_ids.shape[0]
732
+ past_key_values = self.get_prompt(batch_size)
733
+ fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
734
+ kwargs.update(
735
+ {
736
+ "input_ids": input_ids,
737
+ "attention_mask": attention_mask,
738
+ "inputs_embeds": inputs_embeds,
739
+ "output_attentions": output_attentions,
740
+ "output_hidden_states": output_hidden_states,
741
+ "return_dict": return_dict,
742
+ "past_key_values": past_key_values,
743
+ }
744
+ )
745
+ if "past_key_values" in fwd_params:
746
+ return self.base_model(labels=labels, **kwargs)
747
+ else:
748
+ transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
749
+ fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
750
+ if "past_key_values" not in fwd_params:
751
+ raise ValueError("Model does not support past key values which are required for prefix tuning.")
752
+ outputs = transformer_backbone_name(**kwargs)
753
+ pooled_output = outputs[1] if len(outputs) > 1 else outputs[0]
754
+ if "dropout" in [name for name, _ in list(self.base_model.named_children())]:
755
+ pooled_output = self.base_model.dropout(pooled_output)
756
+ logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output)
757
+
758
+ loss = None
759
+ if labels is not None:
760
+ if self.config.problem_type is None:
761
+ if self.base_model.num_labels == 1:
762
+ self.config.problem_type = "regression"
763
+ elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
764
+ self.config.problem_type = "single_label_classification"
765
+ else:
766
+ self.config.problem_type = "multi_label_classification"
767
+
768
+ if self.config.problem_type == "regression":
769
+ loss_fct = MSELoss()
770
+ if self.base_model.num_labels == 1:
771
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
772
+ else:
773
+ loss = loss_fct(logits, labels)
774
+ elif self.config.problem_type == "single_label_classification":
775
+ loss_fct = CrossEntropyLoss()
776
+ loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1))
777
+ elif self.config.problem_type == "multi_label_classification":
778
+ loss_fct = BCEWithLogitsLoss()
779
+ loss = loss_fct(logits, labels)
780
+ if not return_dict:
781
+ output = (logits,) + outputs[2:]
782
+ return ((loss,) + output) if loss is not None else output
783
+
784
+ return SequenceClassifierOutput(
785
+ loss=loss,
786
+ logits=logits,
787
+ hidden_states=outputs.hidden_states,
788
+ attentions=outputs.attentions,
789
+ )
790
+
791
+
792
+ class PeftModelForCausalLM(PeftModel):
793
+ """
794
+ Peft model for causal language modeling.
795
+
796
+ Args:
797
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
798
+ peft_config ([`PeftConfig`]): Peft config.
799
+
800
+
801
+ Example:
802
+
803
+ ```py
804
+ >>> from transformers import AutoModelForCausalLM
805
+ >>> from peft import PeftModelForCausalLM, get_peft_config
806
+
807
+ >>> config = {
808
+ ... "peft_type": "PREFIX_TUNING",
809
+ ... "task_type": "CAUSAL_LM",
810
+ ... "inference_mode": False,
811
+ ... "num_virtual_tokens": 20,
812
+ ... "token_dim": 1280,
813
+ ... "num_transformer_submodules": 1,
814
+ ... "num_attention_heads": 20,
815
+ ... "num_layers": 36,
816
+ ... "encoder_hidden_size": 1280,
817
+ ... "prefix_projection": False,
818
+ ... "postprocess_past_key_value_function": None,
819
+ ... }
820
+
821
+ >>> peft_config = get_peft_config(config)
822
+ >>> model = AutoModelForCausalLM.from_pretrained("gpt2-large")
823
+ >>> peft_model = PeftModelForCausalLM(model, peft_config)
824
+ >>> peft_model.print_trainable_parameters()
825
+ trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544
826
+ ```
827
+ """
828
+
829
+ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"):
830
+ super().__init__(model, peft_config, adapter_name)
831
+ # 备份self.base_model_prepare_inputs_for_generation
832
+ self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
833
+ self.base_model._validate_model_kwargs = self.base_model_validate_model_kwargs
834
+
835
+ def forward(
836
+ self,
837
+ input_ids=None,
838
+ attention_mask=None,
839
+ inputs_embeds=None,
840
+ labels=None,
841
+ output_attentions=None,
842
+ output_hidden_states=None,
843
+ return_dict=None,
844
+ **kwargs,
845
+ ):
846
+ peft_config = self.active_peft_config
847
+
848
+ # 添加
849
+ # print("kwargs = ", kwargs)
850
+
851
+ if not isinstance(peft_config, PromptLearningConfig):
852
+ if self.base_model.config.model_type == "mpt":
853
+ if inputs_embeds is not None:
854
+ raise AssertionError("forward in MPTForCausalLM does not support inputs_embeds")
855
+ return self.base_model(
856
+ input_ids=input_ids,
857
+ attention_mask=attention_mask,
858
+ labels=labels,
859
+ output_attentions=output_attentions,
860
+ output_hidden_states=output_hidden_states,
861
+ return_dict=return_dict,
862
+ **kwargs,
863
+ )
864
+
865
+ return self.base_model(
866
+ input_ids=input_ids,
867
+ attention_mask=attention_mask,
868
+ inputs_embeds=inputs_embeds,
869
+ labels=labels,
870
+ output_attentions=output_attentions,
871
+ output_hidden_states=output_hidden_states,
872
+ return_dict=return_dict,
873
+ **kwargs,
874
+ )
875
+
876
+ batch_size = input_ids.shape[0]
877
+ if attention_mask is not None:
878
+ # concat prompt attention mask
879
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
880
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
881
+
882
+ if kwargs.get("position_ids", None) is not None:
883
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
884
+ kwargs["position_ids"] = None
885
+ if kwargs.get("token_type_ids", None) is not None:
886
+ warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids")
887
+ kwargs["token_type_ids"] = None
888
+ kwargs.update(
889
+ {
890
+ "attention_mask": attention_mask,
891
+ "labels": labels,
892
+ "output_attentions": output_attentions,
893
+ "output_hidden_states": output_hidden_states,
894
+ "return_dict": return_dict,
895
+ }
896
+ )
897
+
898
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
899
+ past_key_values = self.get_prompt(batch_size)
900
+ return self.base_model(input_ids=input_ids, past_key_values=past_key_values, **kwargs)
901
+ else:
902
+ if inputs_embeds is None:
903
+ inputs_embeds = self.word_embeddings(input_ids)
904
+ # concat prompt labels
905
+ if labels is not None:
906
+ prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device)
907
+ kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1)
908
+ prompts = self.get_prompt(batch_size=batch_size)
909
+ prompts = prompts.to(inputs_embeds.dtype)
910
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
911
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
912
+
913
+ def generate(self, **kwargs):
914
+ self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation
915
+ if hasattr(self.base_model, "model"):
916
+ self.base_model.model.generation_config = self.generation_config
917
+ else:
918
+ self.base_model.generation_config = self.generation_config
919
+ try:
920
+ # MoeLoRAModel.generate
921
+ outputs = self.base_model.generate(**kwargs)
922
+ except:
923
+ # 引发异常
924
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
925
+ raise
926
+ else:
927
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
928
+ return outputs
929
+
930
+ def prepare_inputs_for_generation(self, *args, **kwargs):
931
+ peft_config = self.active_peft_config
932
+ model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)
933
+ if isinstance(peft_config, PromptLearningConfig):
934
+ if model_kwargs.get("attention_mask", None) is not None:
935
+ prefix_attention_mask = torch.ones(
936
+ model_kwargs["input_ids"].shape[0], peft_config.num_virtual_tokens
937
+ ).to(model_kwargs["input_ids"].device)
938
+ model_kwargs["attention_mask"] = torch.cat(
939
+ (prefix_attention_mask, model_kwargs["attention_mask"]), dim=1
940
+ )
941
+
942
+ if model_kwargs.get("position_ids", None) is not None:
943
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
944
+ model_kwargs["position_ids"] = None
945
+
946
+ if kwargs.get("token_type_ids", None) is not None:
947
+ warnings.warn(
948
+ "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids"
949
+ )
950
+ kwargs["token_type_ids"] = None
951
+
952
+ if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:
953
+ past_key_values = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0])
954
+ model_kwargs["past_key_values"] = past_key_values
955
+ else:
956
+ if model_kwargs["past_key_values"] is None:
957
+ inputs_embeds = self.word_embeddings(model_kwargs["input_ids"])
958
+ prompts = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0])
959
+ prompts = prompts.to(inputs_embeds.dtype)
960
+ model_kwargs["inputs_embeds"] = torch.cat((prompts, inputs_embeds), dim=1)
961
+ model_kwargs["input_ids"] = None
962
+ # !!!
963
+ model_kwargs["user_embeds"] = None
964
+ return model_kwargs
965
+
966
+ # !!!
967
+ def base_model_validate_model_kwargs(self, model_kwargs: Dict[str, Any]):
968
+ """Validates model kwargs for generation. Generate argument typos will also be caught here."""
969
+ pass
970
+ # If a `Cache` instance is passed, checks whether the model is compatible with it
971
+ if isinstance(model_kwargs.get("past_key_values", None), Cache) and not self._supports_cache_class:
972
+ raise ValueError(
973
+ f"{self.__class__.__name__} does not support an instance of `Cache` as `past_key_values`. Please "
974
+ "check the model documentation for supported cache formats."
975
+ )
976
+
977
+ # Excludes arguments that are handled before calling any model function
978
+ if self.config.is_encoder_decoder:
979
+ for key in ["decoder_input_ids"]:
980
+ model_kwargs.pop(key, None)
981
+
982
+ unused_model_args = []
983
+ model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)
984
+ # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If
985
+ # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)
986
+ if "kwargs" in model_args or "model_kwargs" in model_args:
987
+ model_args |= set(inspect.signature(self.forward).parameters)
988
+
989
+ # Encoder-Decoder models may also need Encoder arguments from `model_kwargs`
990
+ if self.config.is_encoder_decoder:
991
+ base_model = getattr(self, self.base_model_prefix, None)
992
+
993
+ # allow encoder kwargs
994
+ encoder = getattr(self, "encoder", None)
995
+ # `MusicgenForConditionalGeneration` has `text_encoder` and `audio_encoder`.
996
+ # Also, it has `base_model_prefix = "encoder_decoder"` but there is no `self.encoder_decoder`
997
+ # TODO: A better way to handle this.
998
+ if encoder is None and base_model is not None:
999
+ encoder = getattr(base_model, "encoder", None)
1000
+
1001
+ if encoder is not None:
1002
+ encoder_model_args = set(inspect.signature(encoder.forward).parameters)
1003
+ model_args |= encoder_model_args
1004
+
1005
+ # allow decoder kwargs
1006
+ decoder = getattr(self, "decoder", None)
1007
+ if decoder is None and base_model is not None:
1008
+ decoder = getattr(base_model, "decoder", None)
1009
+
1010
+ if decoder is not None:
1011
+ decoder_model_args = set(inspect.signature(decoder.forward).parameters)
1012
+ model_args |= {f"decoder_{x}" for x in decoder_model_args}
1013
+
1014
+ # allow assistant_encoder_outputs to be passed if we're doing assisted generating
1015
+ if "assistant_encoder_outputs" in model_kwargs:
1016
+ model_args |= {"assistant_encoder_outputs"}
1017
+
1018
+ for key, value in model_kwargs.items():
1019
+ if value is not None and key not in model_args:
1020
+ unused_model_args.append(key)
1021
+
1022
+ if unused_model_args:
1023
+ raise ValueError(
1024
+ f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the"
1025
+ " generate arguments will also show up in this list)"
1026
+ )
1027
+
1028
+ class PeftModelForSeq2SeqLM(PeftModel):
1029
+ """
1030
+ Peft model for sequence-to-sequence language modeling.
1031
+
1032
+ Args:
1033
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1034
+ peft_config ([`PeftConfig`]): Peft config.
1035
+
1036
+
1037
+ Example:
1038
+
1039
+ ```py
1040
+ >>> from transformers import AutoModelForSeq2SeqLM
1041
+ >>> from peft import PeftModelForSeq2SeqLM, get_peft_config
1042
+
1043
+ >>> config = {
1044
+ ... "peft_type": "LORA",
1045
+ ... "task_type": "SEQ_2_SEQ_LM",
1046
+ ... "inference_mode": False,
1047
+ ... "r": 8,
1048
+ ... "target_modules": ["q", "v"],
1049
+ ... "lora_alpha": 32,
1050
+ ... "lora_dropout": 0.1,
1051
+ ... "fan_in_fan_out": False,
1052
+ ... "enable_lora": None,
1053
+ ... "bias": "none",
1054
+ ... }
1055
+
1056
+ >>> peft_config = get_peft_config(config)
1057
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
1058
+ >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config)
1059
+ >>> peft_model.print_trainable_parameters()
1060
+ trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566
1061
+ ```
1062
+ """
1063
+
1064
+ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"):
1065
+ super().__init__(model, peft_config, adapter_name)
1066
+ self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
1067
+ self.base_model_prepare_encoder_decoder_kwargs_for_generation = (
1068
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation
1069
+ )
1070
+
1071
+ def forward(
1072
+ self,
1073
+ input_ids=None,
1074
+ attention_mask=None,
1075
+ inputs_embeds=None,
1076
+ decoder_input_ids=None,
1077
+ decoder_attention_mask=None,
1078
+ decoder_inputs_embeds=None,
1079
+ labels=None,
1080
+ output_attentions=None,
1081
+ output_hidden_states=None,
1082
+ return_dict=None,
1083
+ **kwargs,
1084
+ ):
1085
+ peft_config = self.active_peft_config
1086
+ if not isinstance(peft_config, PromptLearningConfig):
1087
+ return self.base_model(
1088
+ input_ids=input_ids,
1089
+ attention_mask=attention_mask,
1090
+ inputs_embeds=inputs_embeds,
1091
+ decoder_input_ids=decoder_input_ids,
1092
+ decoder_attention_mask=decoder_attention_mask,
1093
+ decoder_inputs_embeds=decoder_inputs_embeds,
1094
+ labels=labels,
1095
+ output_attentions=output_attentions,
1096
+ output_hidden_states=output_hidden_states,
1097
+ return_dict=return_dict,
1098
+ **kwargs,
1099
+ )
1100
+
1101
+ batch_size = input_ids.shape[0]
1102
+ if decoder_attention_mask is not None:
1103
+ # concat prompt attention mask
1104
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1105
+ decoder_attention_mask.device
1106
+ )
1107
+ decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1)
1108
+
1109
+ if kwargs.get("position_ids", None) is not None:
1110
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1111
+ kwargs["position_ids"] = None
1112
+ if kwargs.get("token_type_ids", None) is not None:
1113
+ warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids")
1114
+ kwargs["token_type_ids"] = None
1115
+ kwargs.update(
1116
+ {
1117
+ "attention_mask": attention_mask,
1118
+ "decoder_attention_mask": decoder_attention_mask,
1119
+ "labels": labels,
1120
+ "output_attentions": output_attentions,
1121
+ "output_hidden_states": output_hidden_states,
1122
+ "return_dict": return_dict,
1123
+ }
1124
+ )
1125
+
1126
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1127
+ past_key_values = self.get_prompt(batch_size)
1128
+ return self.base_model(
1129
+ input_ids=input_ids, decoder_input_ids=decoder_input_ids, past_key_values=past_key_values, **kwargs
1130
+ )
1131
+ elif peft_config.peft_type in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]:
1132
+ if inputs_embeds is None:
1133
+ inputs_embeds = self.word_embeddings(input_ids)
1134
+
1135
+ if attention_mask is not None:
1136
+ # concat prompt attention mask
1137
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1138
+ attention_mask.device
1139
+ )
1140
+ kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1141
+
1142
+ prompts = self.get_prompt(batch_size=batch_size)
1143
+ prompts = prompts.to(inputs_embeds.dtype)
1144
+ inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)
1145
+
1146
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1147
+ else:
1148
+ if inputs_embeds is None:
1149
+ inputs_embeds = self.word_embeddings(input_ids)
1150
+ if decoder_inputs_embeds is None and decoder_input_ids is None:
1151
+ decoder_input_ids = shift_tokens_right(
1152
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1153
+ )
1154
+ decoder_inputs_embeds = self.word_embeddings(decoder_input_ids)
1155
+
1156
+ if attention_mask is not None:
1157
+ # concat prompt attention mask
1158
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1159
+ attention_mask.device
1160
+ )
1161
+ kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1162
+ # concat prompt labels
1163
+ if labels is not None:
1164
+ if peft_config.num_transformer_submodules == 1:
1165
+ kwargs["labels"] = labels
1166
+ elif peft_config.num_transformer_submodules == 2:
1167
+ prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device)
1168
+ kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1)
1169
+ prompts = self.get_prompt(batch_size=batch_size)
1170
+ prompts = prompts.to(inputs_embeds.dtype)
1171
+ inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)
1172
+ if peft_config.num_transformer_submodules == 1:
1173
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1174
+ elif peft_config.num_transformer_submodules == 2:
1175
+ decoder_inputs_embeds = torch.cat(
1176
+ (prompts[:, peft_config.num_virtual_tokens :], decoder_inputs_embeds), dim=1
1177
+ )
1178
+ return self.base_model(
1179
+ inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs
1180
+ )
1181
+
1182
+ def generate(self, **kwargs):
1183
+ peft_config = self.active_peft_config
1184
+ self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation
1185
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation = (
1186
+ self._prepare_encoder_decoder_kwargs_for_generation
1187
+ )
1188
+ try:
1189
+ if not isinstance(peft_config, PromptLearningConfig):
1190
+ outputs = self.base_model.generate(**kwargs)
1191
+ else:
1192
+ if "input_ids" not in kwargs:
1193
+ raise ValueError("input_ids must be provided for Peft model generation")
1194
+ if kwargs.get("position_ids", None) is not None:
1195
+ warnings.warn(
1196
+ "Position ids are not supported for parameter efficient tuning. Ignoring position ids."
1197
+ )
1198
+ kwargs["position_ids"] = None
1199
+ if kwargs.get("token_type_ids", None) is not None:
1200
+ warnings.warn(
1201
+ "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids"
1202
+ )
1203
+ kwargs["token_type_ids"] = None
1204
+
1205
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1206
+ outputs = self.base_model.generate(**kwargs)
1207
+ elif peft_config.peft_type in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]:
1208
+ kwargs = deepcopy(kwargs)
1209
+
1210
+ if "encoder_outputs" in kwargs:
1211
+ del kwargs["encoder_ouputs"]
1212
+ warnings.warn(
1213
+ "`encoder_outputs` should not be passed to `generate` when using prompt tuning. Ignoring it."
1214
+ )
1215
+
1216
+ input_ids = kwargs.pop("input_ids")
1217
+ inputs_embeds = self.word_embeddings(input_ids)
1218
+ batch_size = inputs_embeds.shape[0]
1219
+ prompts = self.get_prompt(batch_size=batch_size)
1220
+ prompts = prompts.to(inputs_embeds.dtype)
1221
+
1222
+ inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)
1223
+ kwargs["inputs_embeds"] = inputs_embeds
1224
+
1225
+ if "attention_mask" in kwargs:
1226
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
1227
+ kwargs["attention_mask"].device
1228
+ )
1229
+ kwargs["attention_mask"] = torch.cat((prefix_attention_mask, kwargs["attention_mask"]), dim=1)
1230
+
1231
+ return self.base_model.generate(**kwargs)
1232
+ else:
1233
+ raise NotImplementedError
1234
+ except:
1235
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
1236
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation = (
1237
+ self.base_model_prepare_encoder_decoder_kwargs_for_generation
1238
+ )
1239
+ raise
1240
+ else:
1241
+ self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
1242
+ self.base_model._prepare_encoder_decoder_kwargs_for_generation = (
1243
+ self.base_model_prepare_encoder_decoder_kwargs_for_generation
1244
+ )
1245
+ return outputs
1246
+
1247
+ def prepare_inputs_for_generation(self, *args, **kwargs):
1248
+ peft_config = self.active_peft_config
1249
+ model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)
1250
+ if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:
1251
+ batch_size = model_kwargs["decoder_input_ids"].shape[0]
1252
+ past_key_values = self.get_prompt(batch_size)
1253
+ model_kwargs["past_key_values"] = past_key_values
1254
+
1255
+ return model_kwargs
1256
+
1257
+
1258
+ class PeftModelForTokenClassification(PeftModel):
1259
+ """
1260
+ Peft model for token classification tasks.
1261
+
1262
+ Args:
1263
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1264
+ peft_config ([`PeftConfig`]): Peft config.
1265
+
1266
+ **Attributes**:
1267
+ - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
1268
+ - **cls_layer_name** (`str`) -- The name of the classification layer.
1269
+
1270
+ Example:
1271
+
1272
+ ```py
1273
+ >>> from transformers import AutoModelForSequenceClassification
1274
+ >>> from peft import PeftModelForTokenClassification, get_peft_config
1275
+
1276
+ >>> config = {
1277
+ ... "peft_type": "PREFIX_TUNING",
1278
+ ... "task_type": "TOKEN_CLS",
1279
+ ... "inference_mode": False,
1280
+ ... "num_virtual_tokens": 20,
1281
+ ... "token_dim": 768,
1282
+ ... "num_transformer_submodules": 1,
1283
+ ... "num_attention_heads": 12,
1284
+ ... "num_layers": 12,
1285
+ ... "encoder_hidden_size": 768,
1286
+ ... "prefix_projection": False,
1287
+ ... "postprocess_past_key_value_function": None,
1288
+ ... }
1289
+
1290
+ >>> peft_config = get_peft_config(config)
1291
+ >>> model = AutoModelForTokenClassification.from_pretrained("bert-base-cased")
1292
+ >>> peft_model = PeftModelForTokenClassification(model, peft_config)
1293
+ >>> peft_model.print_trainable_parameters()
1294
+ trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117
1295
+ ```
1296
+ """
1297
+
1298
+ def __init__(self, model, peft_config: PeftConfig = None, adapter_name="default"):
1299
+ super().__init__(model, peft_config, adapter_name)
1300
+ if self.modules_to_save is None:
1301
+ self.modules_to_save = {"classifier", "score"}
1302
+ else:
1303
+ self.modules_to_save.update({"classifier", "score"})
1304
+
1305
+ for name, _ in self.base_model.named_children():
1306
+ if any(module_name in name for module_name in self.modules_to_save):
1307
+ self.cls_layer_name = name
1308
+ break
1309
+
1310
+ # to make sure classifier layer is trainable
1311
+ _set_trainable(self, adapter_name)
1312
+
1313
+ def forward(
1314
+ self,
1315
+ input_ids=None,
1316
+ attention_mask=None,
1317
+ inputs_embeds=None,
1318
+ labels=None,
1319
+ output_attentions=None,
1320
+ output_hidden_states=None,
1321
+ return_dict=None,
1322
+ **kwargs,
1323
+ ):
1324
+ peft_config = self.active_peft_config
1325
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1326
+
1327
+ if not isinstance(peft_config, PromptLearningConfig):
1328
+ return self.base_model(
1329
+ input_ids=input_ids,
1330
+ attention_mask=attention_mask,
1331
+ inputs_embeds=inputs_embeds,
1332
+ labels=labels,
1333
+ output_attentions=output_attentions,
1334
+ output_hidden_states=output_hidden_states,
1335
+ return_dict=return_dict,
1336
+ **kwargs,
1337
+ )
1338
+
1339
+ batch_size = input_ids.shape[0]
1340
+ if attention_mask is not None:
1341
+ # concat prompt attention mask
1342
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
1343
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1344
+ if kwargs.get("position_ids", None) is not None:
1345
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1346
+ kwargs["position_ids"] = None
1347
+ kwargs.update(
1348
+ {
1349
+ "attention_mask": attention_mask,
1350
+ "labels": labels,
1351
+ "output_attentions": output_attentions,
1352
+ "output_hidden_states": output_hidden_states,
1353
+ "return_dict": return_dict,
1354
+ }
1355
+ )
1356
+
1357
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1358
+ return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
1359
+ else:
1360
+ if kwargs.get("token_type_ids", None) is not None:
1361
+ kwargs["token_type_ids"] = torch.cat(
1362
+ (
1363
+ torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device),
1364
+ kwargs["token_type_ids"],
1365
+ ),
1366
+ dim=1,
1367
+ ).long()
1368
+ if inputs_embeds is None:
1369
+ inputs_embeds = self.word_embeddings(input_ids)
1370
+ prompts = self.get_prompt(batch_size=batch_size)
1371
+ prompts = prompts.to(inputs_embeds.dtype)
1372
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
1373
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1374
+
1375
+ def _prefix_tuning_forward(
1376
+ self,
1377
+ input_ids=None,
1378
+ attention_mask=None,
1379
+ inputs_embeds=None,
1380
+ labels=None,
1381
+ output_attentions=None,
1382
+ output_hidden_states=None,
1383
+ return_dict=None,
1384
+ **kwargs,
1385
+ ):
1386
+ batch_size = input_ids.shape[0]
1387
+ past_key_values = self.get_prompt(batch_size)
1388
+ fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
1389
+ kwargs.update(
1390
+ {
1391
+ "input_ids": input_ids,
1392
+ "attention_mask": attention_mask,
1393
+ "inputs_embeds": inputs_embeds,
1394
+ "output_attentions": output_attentions,
1395
+ "output_hidden_states": output_hidden_states,
1396
+ "return_dict": return_dict,
1397
+ "past_key_values": past_key_values,
1398
+ }
1399
+ )
1400
+ if "past_key_values" in fwd_params:
1401
+ return self.base_model(labels=labels, **kwargs)
1402
+ else:
1403
+ transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
1404
+ fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
1405
+ if "past_key_values" not in fwd_params:
1406
+ raise ValueError("Model does not support past key values which are required for prefix tuning.")
1407
+ outputs = transformer_backbone_name(**kwargs)
1408
+ sequence_output = outputs[0]
1409
+ if "dropout" in [name for name, _ in list(self.base_model.named_children())]:
1410
+ sequence_output = self.base_model.dropout(sequence_output)
1411
+ logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output)
1412
+
1413
+ loss = None
1414
+ if labels is not None:
1415
+ loss_fct = CrossEntropyLoss()
1416
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1417
+
1418
+ if not return_dict:
1419
+ output = (logits,) + outputs[2:]
1420
+ return ((loss,) + output) if loss is not None else output
1421
+
1422
+ return TokenClassifierOutput(
1423
+ loss=loss,
1424
+ logits=logits,
1425
+ hidden_states=outputs.hidden_states,
1426
+ attentions=outputs.attentions,
1427
+ )
1428
+
1429
+
1430
+ class PeftModelForQuestionAnswering(PeftModel):
1431
+ """
1432
+ Peft model for extractive question answering.
1433
+
1434
+ Args:
1435
+ model ([`~transformers.PreTrainedModel`]): Base transformer model.
1436
+ peft_config ([`PeftConfig`]): Peft config.
1437
+
1438
+ **Attributes**:
1439
+ - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
1440
+ - **cls_layer_name** (`str`) -- The name of the classification layer.
1441
+
1442
+ Example:
1443
+
1444
+ ```py
1445
+ >>> from transformers import AutoModelForQuestionAnswering
1446
+ >>> from peft import PeftModelForQuestionAnswering, get_peft_config
1447
+
1448
+ >>> config = {
1449
+ ... "peft_type": "LORA",
1450
+ ... "task_type": "QUESTION_ANS",
1451
+ ... "inference_mode": False,
1452
+ ... "r": 16,
1453
+ ... "target_modules": ["query", "value"],
1454
+ ... "lora_alpha": 32,
1455
+ ... "lora_dropout": 0.05,
1456
+ ... "fan_in_fan_out": False,
1457
+ ... "bias": "none",
1458
+ ... }
1459
+
1460
+ >>> peft_config = get_peft_config(config)
1461
+ >>> model = AutoModelForQuestionAnswering.from_pretrained("bert-base-cased")
1462
+ >>> peft_model = PeftModelForQuestionAnswering(model, peft_config)
1463
+ >>> peft_model.print_trainable_parameters()
1464
+ trainable params: 592900 || all params: 108312580 || trainable%: 0.5473971721475013
1465
+ ```
1466
+ """
1467
+
1468
+ def __init__(self, model, peft_config: PeftConfig = None, adapter_name="default"):
1469
+ super().__init__(model, peft_config, adapter_name)
1470
+ if self.modules_to_save is None:
1471
+ self.modules_to_save = {"qa_outputs"}
1472
+ else:
1473
+ self.modules_to_save.update({"qa_outputs"})
1474
+
1475
+ for name, _ in self.base_model.named_children():
1476
+ if any(module_name in name for module_name in self.modules_to_save):
1477
+ self.cls_layer_name = name
1478
+ break
1479
+
1480
+ # to make sure classifier layer is trainable
1481
+ _set_trainable(self, adapter_name)
1482
+
1483
+ def forward(
1484
+ self,
1485
+ input_ids=None,
1486
+ attention_mask=None,
1487
+ token_type_ids=None,
1488
+ position_ids=None,
1489
+ inputs_embeds=None,
1490
+ start_positions=None,
1491
+ end_positions=None,
1492
+ output_attentions=None,
1493
+ output_hidden_states=None,
1494
+ return_dict=None,
1495
+ **kwargs,
1496
+ ):
1497
+ peft_config = self.active_peft_config
1498
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1499
+
1500
+ if not isinstance(peft_config, PromptLearningConfig):
1501
+ return self.base_model(
1502
+ input_ids=input_ids,
1503
+ attention_mask=attention_mask,
1504
+ inputs_embeds=inputs_embeds,
1505
+ start_positions=start_positions,
1506
+ end_positions=end_positions,
1507
+ output_attentions=output_attentions,
1508
+ output_hidden_states=output_hidden_states,
1509
+ return_dict=return_dict,
1510
+ **kwargs,
1511
+ )
1512
+
1513
+ batch_size = input_ids.shape[0]
1514
+ if attention_mask is not None:
1515
+ # concat prompt attention mask
1516
+ prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
1517
+ attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
1518
+ if kwargs.get("position_ids", None) is not None:
1519
+ warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
1520
+ kwargs["position_ids"] = None
1521
+ kwargs.update(
1522
+ {
1523
+ "attention_mask": attention_mask,
1524
+ "start_positions": start_positions,
1525
+ "end_positions": end_positions,
1526
+ "output_attentions": output_attentions,
1527
+ "output_hidden_states": output_hidden_states,
1528
+ "return_dict": return_dict,
1529
+ }
1530
+ )
1531
+
1532
+ if peft_config.peft_type == PeftType.PREFIX_TUNING:
1533
+ return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
1534
+ else:
1535
+ if kwargs.get("token_type_ids", None) is not None:
1536
+ kwargs["token_type_ids"] = torch.cat(
1537
+ (
1538
+ torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device),
1539
+ kwargs["token_type_ids"],
1540
+ ),
1541
+ dim=1,
1542
+ ).long()
1543
+ if inputs_embeds is None:
1544
+ inputs_embeds = self.word_embeddings(input_ids)
1545
+ prompts = self.get_prompt(batch_size=batch_size)
1546
+ prompts = prompts.to(inputs_embeds.dtype)
1547
+ inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
1548
+ return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
1549
+
1550
+ def _prefix_tuning_forward(
1551
+ self,
1552
+ input_ids=None,
1553
+ attention_mask=None,
1554
+ inputs_embeds=None,
1555
+ start_positions=None,
1556
+ end_positions=None,
1557
+ output_attentions=None,
1558
+ output_hidden_states=None,
1559
+ return_dict=None,
1560
+ **kwargs,
1561
+ ):
1562
+ batch_size = input_ids.shape[0]
1563
+ past_key_values = self.get_prompt(batch_size)
1564
+ fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
1565
+ kwargs.update(
1566
+ {
1567
+ "input_ids": input_ids,
1568
+ "attention_mask": attention_mask,
1569
+ "inputs_embeds": inputs_embeds,
1570
+ "output_attentions": output_attentions,
1571
+ "output_hidden_states": output_hidden_states,
1572
+ "return_dict": return_dict,
1573
+ "past_key_values": past_key_values,
1574
+ }
1575
+ )
1576
+ if "past_key_values" in fwd_params:
1577
+ return self.base_model(start_positions=start_positions, end_positions=end_positions, **kwargs)
1578
+ else:
1579
+ transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
1580
+ fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
1581
+ if "past_key_values" not in fwd_params:
1582
+ raise ValueError("Model does not support past key values which are required for prefix tuning.")
1583
+ outputs = transformer_backbone_name(**kwargs)
1584
+ sequence_output = outputs[0]
1585
+ if "dropout" in [name for name, _ in list(self.base_model.named_children())]:
1586
+ sequence_output = self.base_model.dropout(sequence_output)
1587
+ logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output)
1588
+ start_logits, end_logits = logits.split(1, dim=-1)
1589
+ start_logits = start_logits.squeeze(-1).contiguous()
1590
+ end_logits = end_logits.squeeze(-1).contiguous()
1591
+
1592
+ total_loss = None
1593
+ if start_positions is not None and end_positions is not None:
1594
+ # If we are on multi-GPU, split add a dimension
1595
+ if len(start_positions.size()) > 1:
1596
+ start_positions = start_positions.squeeze(-1)
1597
+ if len(end_positions.size()) > 1:
1598
+ end_positions = end_positions.squeeze(-1)
1599
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1600
+ ignored_index = start_logits.size(1)
1601
+ start_positions = start_positions.clamp(0, ignored_index)
1602
+ end_positions = end_positions.clamp(0, ignored_index)
1603
+
1604
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1605
+ start_loss = loss_fct(start_logits, start_positions)
1606
+ end_loss = loss_fct(end_logits, end_positions)
1607
+ total_loss = (start_loss + end_loss) / 2
1608
+
1609
+ if not return_dict:
1610
+ output = (start_logits, end_logits) + outputs[2:]
1611
+ return ((total_loss,) + output) if total_loss is not None else output
1612
+
1613
+ return QuestionAnsweringModelOutput(
1614
+ loss=total_loss,
1615
+ start_logits=start_logits,
1616
+ end_logits=end_logits,
1617
+ hidden_states=outputs.hidden_states,
1618
+ attentions=outputs.attentions,
1619
+ )
model/peft/tuners/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all
4
+
5
+ # coding=utf-8
6
+ # Copyright 2023-present the HuggingFace Inc. team.
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+
20
+ from .adaption_prompt import AdaptionPromptConfig, AdaptionPromptModel
21
+ from .lora import LoraConfig, LoraModel
22
+ from .adalora import AdaLoraConfig, AdaLoraModel
23
+ from .p_tuning import PromptEncoder, PromptEncoderConfig, PromptEncoderReparameterizationType
24
+ from .prefix_tuning import PrefixEncoder, PrefixTuningConfig
25
+ from .prompt_tuning import PromptEmbedding, PromptTuningConfig, PromptTuningInit
26
+ from .moelora import MoeLoraConfig, MoeLoraModel
27
+ from .gating import GATING_TO_MODEL_MAPPING
model/peft/tuners/adalora.py ADDED
@@ -0,0 +1,751 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import warnings
3
+ from dataclasses import dataclass, field
4
+ from typing import Optional
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ from transformers.pytorch_utils import Conv1D
10
+
11
+ from ..import_utils import is_bnb_4bit_available, is_bnb_available
12
+ from ..utils import (
13
+ TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING,
14
+ PeftType,
15
+ _freeze_adapter,
16
+ _get_submodules,
17
+ transpose,
18
+ )
19
+ from .lora import (
20
+ LoraConfig,
21
+ LoraLayer,
22
+ LoraModel,
23
+ mark_only_lora_as_trainable,
24
+ )
25
+
26
+
27
+ if is_bnb_available():
28
+ import bitsandbytes as bnb
29
+
30
+
31
+ @dataclass
32
+ class AdaLoraConfig(LoraConfig):
33
+ """
34
+ This is the configuration class to store the configuration of a [`~peft.AdaLora`].
35
+
36
+ Args:
37
+ target_r (`int`): The target average rank of incremental matrix.
38
+ init_r (`int`): The initial rank for each incremental matrix.
39
+ tinit (`int`): The steps of initial fine-tuning warmup.
40
+ tfinal (`int`): The step of final fine-tuning.
41
+ deltaT (`int`): The time internval between two budget allocations.
42
+ beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing.
43
+ beta2 (`float`): The hyperparameter of EMA for undertainty quantification.
44
+ orth_reg_weight (`float`): The coefficient of orthogonal regularization.
45
+ total_step (`int`): The total training steps that should be specified before training.
46
+ rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator.
47
+ """
48
+
49
+ target_r: int = field(default=8, metadata={"help": "Target Lora matrix dimension."})
50
+ init_r: int = field(default=12, metadata={"help": "Intial Lora matrix dimension."})
51
+ tinit: int = field(default=0, metadata={"help": "The steps of initial warmup."})
52
+ tfinal: int = field(default=0, metadata={"help": "The steps of final warmup."})
53
+ deltaT: int = field(default=1, metadata={"help": "Step interval of rank allocation."})
54
+ beta1: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."})
55
+ beta2: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."})
56
+ orth_reg_weight: float = field(default=0.5, metadata={"help": "The orthogonal regularization coefficient."})
57
+ total_step: Optional[int] = field(default=None, metadata={"help": "The total training steps."})
58
+ rank_pattern: Optional[dict] = field(default=None, metadata={"help": "The saved rank pattern."})
59
+
60
+ def __post_init__(self):
61
+ self.peft_type = PeftType.ADALORA
62
+
63
+
64
+ class AdaLoraModel(LoraModel):
65
+ """
66
+ Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper:
67
+ https://openreview.net/pdf?id=lq62uWRJjiY
68
+
69
+ Args:
70
+ model ([`transformers.PreTrainedModel`]): The model to be adapted.
71
+ config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
72
+
73
+ Returns:
74
+ `torch.nn.Module`: The AdaLora model.
75
+
76
+ Example::
77
+
78
+ >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig
79
+ >>> config = AdaLoraConfig(
80
+ peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", r=8, lora_alpha=32, target_modules=["q", "v"],
81
+ lora_dropout=0.01,
82
+ )
83
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(config, model)
84
+
85
+ **Attributes**:
86
+ - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted.
87
+ - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model.
88
+ """
89
+
90
+ def __init__(self, model, config, adapter_name):
91
+ nn.Module.__init__(self)
92
+ self.model = model
93
+ self.peft_config = config
94
+ self.add_adapter(adapter_name, self.peft_config[adapter_name])
95
+
96
+ def add_adapter(self, adapter_name, config=None):
97
+ if config is not None:
98
+ model_config = self.model.config.to_dict() if hasattr(self.model.config, "to_dict") else self.model.config
99
+ config = self._prepare_adalora_config(config, model_config)
100
+ self.peft_config[adapter_name] = config
101
+ self._find_and_replace(adapter_name)
102
+ if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != "none":
103
+ raise ValueError(
104
+ "AdaLoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters."
105
+ )
106
+ traininable_mode_counter = 0
107
+ for config in self.peft_config.values():
108
+ if not config.inference_mode:
109
+ traininable_mode_counter += 1
110
+
111
+ if traininable_mode_counter > 1:
112
+ raise ValueError(
113
+ "AdaLoraModel supports only 1 trainable adapter. "
114
+ "When using multiple adapters, set inference_mode to True for all adapters except the one you want to train."
115
+ )
116
+
117
+ mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)
118
+ if self.peft_config[adapter_name].inference_mode:
119
+ _freeze_adapter(self.model, adapter_name)
120
+ else:
121
+ self.trainable_adapter_name = adapter_name
122
+ self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name)
123
+
124
+ def _find_and_replace(self, adapter_name):
125
+ lora_config = self.peft_config[adapter_name]
126
+ loaded_in_8bit = getattr(self.model, "is_loaded_in_8bit", False)
127
+ loaded_in_4bit = getattr(self.model, "is_loaded_in_4bit", False)
128
+
129
+ if (loaded_in_8bit or loaded_in_4bit) and not is_bnb_available():
130
+ raise ImportError(
131
+ "To use Lora with 8-bit quantization, please install the `bitsandbytes` package. "
132
+ "You can install it with `pip install bitsandbytes`."
133
+ )
134
+ is_target_modules_in_base_model = False
135
+ kwargs = {
136
+ "r": lora_config.init_r,
137
+ "lora_alpha": lora_config.lora_alpha,
138
+ "lora_dropout": lora_config.lora_dropout,
139
+ "fan_in_fan_out": lora_config.fan_in_fan_out,
140
+ "init_lora_weights": lora_config.init_lora_weights,
141
+ }
142
+ key_list = [key for key, _ in self.model.named_modules()]
143
+ for key in key_list:
144
+ if isinstance(lora_config.target_modules, str):
145
+ target_module_found = re.fullmatch(lora_config.target_modules, key)
146
+ else:
147
+ target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)
148
+ if target_module_found:
149
+ if not is_target_modules_in_base_model:
150
+ is_target_modules_in_base_model = True
151
+ parent, target, target_name = _get_submodules(self.model, key)
152
+ bias = target.bias is not None
153
+ if isinstance(target, LoraLayer):
154
+ target.update_layer(
155
+ adapter_name,
156
+ lora_config.init_r,
157
+ lora_config.lora_alpha,
158
+ lora_config.lora_dropout,
159
+ lora_config.init_lora_weights,
160
+ )
161
+ else:
162
+ if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):
163
+ kwargs.update(
164
+ {
165
+ "has_fp16_weights": target.state.has_fp16_weights,
166
+ "memory_efficient_backward": target.state.memory_efficient_backward,
167
+ "threshold": target.state.threshold,
168
+ "index": target.index,
169
+ }
170
+ )
171
+ new_module = SVDLinear8bitLt(
172
+ adapter_name, target.in_features, target.out_features, bias=bias, **kwargs
173
+ )
174
+ elif loaded_in_4bit and is_bnb_4bit_available() and isinstance(target, bnb.nn.Linear4bit):
175
+ fourbit_kwargs = kwargs.copy()
176
+ fourbit_kwargs.update(
177
+ {
178
+ "compute_dtype": target.compute_dtype,
179
+ "compress_statistics": target.weight.compress_statistics,
180
+ "quant_type": target.weight.quant_type,
181
+ }
182
+ )
183
+ new_module = SVDLinear4bit(
184
+ adapter_name, target.in_features, target.out_features, bias=bias, **fourbit_kwargs
185
+ )
186
+ else:
187
+ if isinstance(target, torch.nn.Linear):
188
+ in_features, out_features = target.in_features, target.out_features
189
+ if kwargs["fan_in_fan_out"]:
190
+ warnings.warn(
191
+ "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
192
+ "Setting fan_in_fan_out to False."
193
+ )
194
+ kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
195
+ elif isinstance(target, Conv1D):
196
+ in_features, out_features = (
197
+ target.weight.ds_shape if hasattr(target.weight, "ds_shape") else target.weight.shape
198
+ )
199
+ if not kwargs["fan_in_fan_out"]:
200
+ warnings.warn(
201
+ "fan_in_fan_out is set to False but the target module is `Conv1D`. "
202
+ "Setting fan_in_fan_out to True."
203
+ )
204
+ kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
205
+ else:
206
+ raise ValueError(
207
+ f"Target module {target} is not supported. "
208
+ f"Currently, only `torch.nn.Linear` and `Conv1D` are supported."
209
+ )
210
+ new_module = SVDLinear(adapter_name, in_features, out_features, bias=bias, **kwargs)
211
+
212
+ self._replace_module(parent, target_name, new_module, target)
213
+ if not is_target_modules_in_base_model:
214
+ raise ValueError(
215
+ f"Target modules {lora_config.target_modules} not found in the base model. "
216
+ f"Please check the target modules and try again."
217
+ )
218
+
219
+ def __getattr__(self, name: str):
220
+ """Forward missing attributes to the wrapped module."""
221
+ try:
222
+ return super().__getattr__(name) # defer to nn.Module's logic
223
+ except AttributeError:
224
+ return getattr(self.model, name)
225
+
226
+ def forward(self, *args, **kwargs):
227
+ outputs = self.model.forward(*args, **kwargs)
228
+
229
+ # Calculate the orthogonal regularization
230
+ orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight
231
+ assert orth_reg_weight > 0
232
+
233
+ if hasattr(outputs, "loss"):
234
+ regu_loss = 0
235
+ num_param = 0
236
+ for n, p in self.model.named_parameters():
237
+ if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n:
238
+ para_cov = p @ p.T if "lora_A" in n else p.T @ p
239
+ I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov))
240
+ I.requires_grad = False
241
+ num_param += 1
242
+ regu_loss += torch.norm(para_cov - I, p="fro")
243
+ if num_param > 0:
244
+ regu_loss = regu_loss / num_param
245
+ else:
246
+ regu_loss = 0
247
+ outputs.loss += orth_reg_weight * regu_loss
248
+ return outputs
249
+
250
+ def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):
251
+ lora_config = self.peft_config[adapter_name]
252
+ for name, rank_idx in rank_pattern.items():
253
+ if isinstance(rank_idx, list):
254
+ rank = sum(rank_idx)
255
+ elif isinstance(rank_idx, torch.Tensor):
256
+ rank_idx = rank_idx.view(-1)
257
+ rank = rank_idx.sum().item()
258
+ else:
259
+ raise ValueError("Unexcepted type of rank_idx")
260
+ key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
261
+ _, target, _ = _get_submodules(self.model, key)
262
+ lora_E_weights = target.lora_E[adapter_name][rank_idx]
263
+ lora_A_weights = target.lora_A[adapter_name][rank_idx]
264
+ lora_B_weights = target.lora_B[adapter_name][:, rank_idx]
265
+ ranknum = target.ranknum[adapter_name]
266
+ target.update_layer(
267
+ adapter_name,
268
+ rank,
269
+ lora_config.lora_alpha,
270
+ lora_config.lora_dropout,
271
+ lora_config.init_lora_weights,
272
+ )
273
+ with torch.no_grad():
274
+ if rank > 0:
275
+ target.lora_E[adapter_name].copy_(lora_E_weights)
276
+ target.lora_A[adapter_name].copy_(lora_A_weights)
277
+ target.lora_B[adapter_name].copy_(lora_B_weights)
278
+ # The scaling is exactly as the previous
279
+ target.ranknum[adapter_name].copy_(ranknum)
280
+
281
+ def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):
282
+ for name, rank_idx in rank_pattern.items():
283
+ rank = sum(rank_idx)
284
+ prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
285
+ for layer in ["lora_E", "lora_A", "lora_B"]:
286
+ key = f"base_model.model.{prefix}.{layer}.{adapter_name}"
287
+ if layer != "lora_B":
288
+ state_dict[key] = (
289
+ state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]
290
+ )
291
+ else:
292
+ state_dict[key] = (
293
+ state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]
294
+ )
295
+ return state_dict
296
+
297
+ def update_and_allocate(self, global_step):
298
+ lora_config = self.peft_config[self.trainable_adapter_name]
299
+ # Update the importance score and allocate the budget
300
+ if global_step < lora_config.total_step - lora_config.tfinal:
301
+ _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)
302
+ if rank_pattern:
303
+ lora_config.rank_pattern = rank_pattern
304
+ # Finalize the budget allocation
305
+ elif global_step == lora_config.total_step - lora_config.tfinal:
306
+ _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)
307
+ # for some reason, this freezes the trainable parameters and nothing gets updates
308
+ # self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)
309
+ lora_config.rank_pattern = rank_pattern
310
+ self.rankallocator.reset_ipt()
311
+ # Currently using inefficient way to mask the unimportant weights using the rank pattern
312
+ # due to problem mentioned above
313
+ elif global_step > lora_config.total_step - lora_config.tfinal:
314
+ self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)
315
+ # Pass the function and do forward propagation
316
+ else:
317
+ return None
318
+
319
+ @staticmethod
320
+ def _prepare_adalora_config(peft_config, model_config):
321
+ if peft_config.target_modules is None:
322
+ if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING:
323
+ raise ValueError("Please specify `target_modules` in `peft_config`")
324
+ peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[
325
+ model_config["model_type"]
326
+ ]
327
+ return peft_config
328
+
329
+
330
+ class AdaLoraLayer(LoraLayer):
331
+ def __init__(
332
+ self,
333
+ in_features: int,
334
+ out_features: int,
335
+ ):
336
+ super().__init__(in_features, out_features)
337
+ self.lora_E = nn.ParameterDict({})
338
+ self.lora_A = nn.ParameterDict({})
339
+ self.lora_B = nn.ParameterDict({})
340
+ self.ranknum = nn.ParameterDict({})
341
+
342
+ def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):
343
+ self.r[adapter_name] = r
344
+ self.lora_alpha[adapter_name] = lora_alpha
345
+ if lora_dropout > 0.0:
346
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
347
+ else:
348
+
349
+ def lora_dropout_layer(x):
350
+ return x
351
+
352
+ self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
353
+ # Actual trainable parameters
354
+ # Right singular vectors
355
+ self.lora_A.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(r, self.in_features))}))
356
+ # Singular values
357
+ self.lora_E.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(r, 1))}))
358
+ # Left singular vectors
359
+ self.lora_B.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(self.out_features, r))}))
360
+ # The current rank
361
+ self.ranknum.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(1), requires_grad=False)}))
362
+ self.ranknum[adapter_name].data.fill_(float(r))
363
+ self.ranknum[adapter_name].requires_grad = False
364
+ self.scaling[adapter_name] = lora_alpha if lora_alpha > 0 else float(r)
365
+ if init_lora_weights:
366
+ self.reset_lora_parameters(adapter_name)
367
+ self.to(self.weight.device)
368
+
369
+ def reset_lora_parameters(self, adapter_name):
370
+ if adapter_name in self.lora_A.keys():
371
+ nn.init.zeros_(self.lora_E[adapter_name])
372
+ nn.init.normal_(self.lora_A[adapter_name], mean=0.0, std=0.02)
373
+ nn.init.normal_(self.lora_B[adapter_name], mean=0.0, std=0.02)
374
+
375
+
376
+ class SVDLinear(nn.Linear, AdaLoraLayer):
377
+ # SVD-based adaptation by a dense layer
378
+ def __init__(
379
+ self,
380
+ adapter_name: str,
381
+ in_features: int,
382
+ out_features: int,
383
+ r: int = 0,
384
+ lora_alpha: int = 1,
385
+ lora_dropout: float = 0.0,
386
+ fan_in_fan_out: bool = False,
387
+ **kwargs,
388
+ ):
389
+ init_lora_weights = kwargs.pop("init_lora_weights", True)
390
+ nn.Linear.__init__(self, in_features, out_features, **kwargs)
391
+ AdaLoraLayer.__init__(self, in_features=in_features, out_features=out_features)
392
+ # Freezing the pre-trained weight matrix
393
+ self.weight.requires_grad = False
394
+
395
+ self.fan_in_fan_out = fan_in_fan_out
396
+ if fan_in_fan_out:
397
+ self.weight.data = self.weight.data.T
398
+
399
+ nn.Linear.reset_parameters(self)
400
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
401
+ self.active_adapter = adapter_name
402
+
403
+ def merge(self):
404
+ if self.active_adapter not in self.lora_A.keys():
405
+ return
406
+ if self.merged:
407
+ warnings.warn("Already merged. Nothing to do.")
408
+ return
409
+ if self.r[self.active_adapter] > 0:
410
+ self.weight.data += (
411
+ transpose(
412
+ self.lora_B[self.active_adapter]
413
+ @ (self.lora_A[self.active_adapter] * self.lora_E[self.active_adapter]),
414
+ self.fan_in_fan_out,
415
+ )
416
+ * self.scaling[self.active_adapter]
417
+ / (self.ranknum[self.active_adapter] + 1e-5)
418
+ )
419
+ self.merged = True
420
+
421
+ def unmerge(self):
422
+ if self.active_adapter not in self.lora_A.keys():
423
+ return
424
+ if not self.merged:
425
+ warnings.warn("Already unmerged. Nothing to do.")
426
+ return
427
+ if self.r[self.active_adapter] > 0:
428
+ self.weight.data -= (
429
+ transpose(
430
+ self.lora_B[self.active_adapter]
431
+ @ (self.lora_A[self.active_adapter] * self.lora_E[self.active_adapter])
432
+ )
433
+ * self.scaling[self.active_adapter]
434
+ / (self.ranknum[self.active_adapter] + 1e-5)
435
+ )
436
+ self.merged = False
437
+
438
+ def forward(self, x: torch.Tensor):
439
+ if self.active_adapter not in self.lora_A.keys():
440
+ return F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
441
+ if self.disable_adapters:
442
+ if self.r[self.active_adapter] > 0 and self.merged:
443
+ self.unmerge()
444
+ result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
445
+ elif self.r[self.active_adapter] > 0 and not self.merged:
446
+ result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
447
+ result += (
448
+ (
449
+ self.lora_dropout[self.active_adapter](x)
450
+ @ (self.lora_A[self.active_adapter] * self.lora_E[self.active_adapter]).T
451
+ @ self.lora_B[self.active_adapter].T
452
+ )
453
+ * self.scaling[self.active_adapter]
454
+ / (self.ranknum[self.active_adapter] + 1e-5)
455
+ )
456
+ else:
457
+ result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
458
+ return result
459
+
460
+
461
+ if is_bnb_available():
462
+
463
+ class SVDLinear8bitLt(bnb.nn.Linear8bitLt, AdaLoraLayer):
464
+ # Low-rank matrix for SVD-based adaptation
465
+ def __init__(
466
+ self,
467
+ adapter_name,
468
+ in_features,
469
+ out_features,
470
+ r: int = 0,
471
+ lora_alpha: int = 1,
472
+ lora_dropout: float = 0.0,
473
+ **kwargs,
474
+ ):
475
+ bnb.nn.Linear8bitLt.__init__(
476
+ self,
477
+ in_features,
478
+ out_features,
479
+ bias=kwargs.get("bias", True),
480
+ has_fp16_weights=kwargs.get("has_fp16_weights", True),
481
+ memory_efficient_backward=kwargs.get("memory_efficient_backward", False),
482
+ threshold=kwargs.get("threshold", 0.0),
483
+ index=kwargs.get("index", None),
484
+ )
485
+ AdaLoraLayer.__init__(self, in_features=in_features, out_features=out_features)
486
+ # Freezing the pre-trained weight matrix
487
+ self.weight.requires_grad = False
488
+
489
+ init_lora_weights = kwargs.pop("init_lora_weights", True)
490
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
491
+ self.active_adapter = adapter_name
492
+
493
+ def forward(self, x: torch.Tensor):
494
+ result = super().forward(x)
495
+
496
+ if self.disable_adapters or self.active_adapter not in self.lora_A.keys():
497
+ return result
498
+ elif self.r[self.active_adapter] > 0:
499
+ if not torch.is_autocast_enabled():
500
+ expected_dtype = result.dtype
501
+
502
+ if x.dtype != torch.float32:
503
+ x = x.float()
504
+ output = (
505
+ (
506
+ self.lora_dropout[self.active_adapter](x)
507
+ @ (self.lora_A[self.active_adapter] * self.lora_E[self.active_adapter]).T
508
+ @ self.lora_B[self.active_adapter].T
509
+ ).to(expected_dtype)
510
+ * self.scaling[self.active_adapter]
511
+ / (self.ranknum[self.active_adapter] + 1e-5)
512
+ )
513
+ else:
514
+ output = (
515
+ (
516
+ self.lora_dropout[self.active_adapter](x)
517
+ @ (self.lora_A[self.active_adapter] * self.lora_E[self.active_adapter]).T
518
+ @ self.lora_B[self.active_adapter].T
519
+ )
520
+ * self.scaling[self.active_adapter]
521
+ / (self.ranknum[self.active_adapter] + 1e-5)
522
+ )
523
+ result = result + output
524
+ return result
525
+
526
+
527
+ if is_bnb_4bit_available():
528
+
529
+ class SVDLinear4bit(bnb.nn.Linear4bit, AdaLoraLayer):
530
+ # Low-rank matrix for SVD-based adaptation
531
+ def __init__(
532
+ self,
533
+ adapter_name,
534
+ in_features,
535
+ out_features,
536
+ r: int = 0,
537
+ lora_alpha: int = 1,
538
+ lora_dropout: float = 0.0,
539
+ **kwargs,
540
+ ):
541
+ bnb.nn.Linear4bit.__init__(
542
+ self,
543
+ in_features,
544
+ out_features,
545
+ bias=kwargs.get("bias", True),
546
+ compute_dtype=kwargs.get("compute_dtype", torch.float32),
547
+ compress_statistics=kwargs.get("compress_statistics", True),
548
+ quant_type=kwargs.get("quant_type", "nf4"),
549
+ )
550
+ AdaLoraLayer.__init__(self, in_features=in_features, out_features=out_features)
551
+ # Freezing the pre-trained weight matrix
552
+ self.weight.requires_grad = False
553
+
554
+ init_lora_weights = kwargs.pop("init_lora_weights", True)
555
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
556
+ self.active_adapter = adapter_name
557
+
558
+ def forward(self, x: torch.Tensor):
559
+ result = super().forward(x)
560
+
561
+ if self.disable_adapters or self.active_adapter not in self.lora_A.keys():
562
+ return result
563
+ elif self.r[self.active_adapter] > 0:
564
+ if not torch.is_autocast_enabled():
565
+ expected_dtype = result.dtype
566
+
567
+ if x.dtype != torch.float32:
568
+ x = x.float()
569
+ output = (
570
+ (
571
+ self.lora_dropout[self.active_adapter](x)
572
+ @ (self.lora_A[self.active_adapter] * self.lora_E[self.active_adapter]).T
573
+ @ self.lora_B[self.active_adapter].T
574
+ ).to(expected_dtype)
575
+ * self.scaling[self.active_adapter]
576
+ / (self.ranknum[self.active_adapter] + 1e-5)
577
+ )
578
+ else:
579
+ output = (
580
+ (
581
+ self.lora_dropout[self.active_adapter](x)
582
+ @ (self.lora_A[self.active_adapter] * self.lora_E[self.active_adapter]).T
583
+ @ self.lora_B[self.active_adapter].T
584
+ )
585
+ * self.scaling[self.active_adapter]
586
+ / (self.ranknum[self.active_adapter] + 1e-5)
587
+ )
588
+ result = result + output
589
+ return result
590
+
591
+
592
+ class RankAllocator(object):
593
+ """
594
+ The RankAllocator for AdaLoraModel. Paper: https://openreview.net/pdf?id=lq62uWRJjiY
595
+
596
+ Args:
597
+ config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
598
+ model: the model that we apply AdaLoRA to.
599
+
600
+ """
601
+
602
+ def __init__(self, model, peft_config, adapter_name):
603
+ self.peft_config = peft_config
604
+ self.adapter_name = adapter_name
605
+ self.beta1 = peft_config.beta1
606
+ self.beta2 = peft_config.beta2
607
+ assert self.beta1 > 0 and self.beta1 < 1
608
+ assert self.beta2 > 0 and self.beta2 < 1
609
+
610
+ self.reset_ipt()
611
+ self._set_budget_scheduler(model)
612
+
613
+ def set_total_step(self, total_step):
614
+ self.peft_config.total_step = total_step
615
+
616
+ def reset_ipt(self):
617
+ self.ipt = {}
618
+ self.exp_avg_ipt = {}
619
+ self.exp_avg_unc = {}
620
+
621
+ def _set_budget_scheduler(self, model):
622
+ self.init_bgt = 0
623
+ self.name_set = set()
624
+ for n, p in model.named_parameters():
625
+ if f"lora_A.{self.adapter_name}" in n:
626
+ self.init_bgt += p.size(0)
627
+ self.name_set.add(n.replace("lora_A", "%s"))
628
+ self.name_set = sorted(self.name_set)
629
+ # The total final rank budget
630
+ self.target_bgt = self.peft_config.target_r * len(self.name_set)
631
+
632
+ def budget_schedule(self, step: int):
633
+ tinit = self.peft_config.tinit
634
+ tfinal = self.peft_config.tfinal
635
+ total_step = self.peft_config.total_step
636
+ # Initial warmup
637
+ if step <= tinit:
638
+ budget = self.init_bgt
639
+ mask_ind = False
640
+ # Final fine-tuning
641
+ elif step > total_step - tfinal:
642
+ budget = self.target_bgt
643
+ mask_ind = True
644
+ else:
645
+ # Budget decreasing with a cubic scheduler
646
+ mul_coeff = 1 - (step - tinit) / (total_step - tfinal - tinit)
647
+ budget = int((self.init_bgt - self.target_bgt) * (mul_coeff**3) + self.target_bgt)
648
+ mask_ind = True if step % self.peft_config.deltaT == 0 else False
649
+ return budget, mask_ind
650
+
651
+ def update_ipt(self, model):
652
+ # Update the sensitivity and uncertainty for every weight
653
+ for n, p in model.named_parameters():
654
+ if "lora_" in n and self.adapter_name in n:
655
+ if n not in self.ipt:
656
+ self.ipt[n] = torch.zeros_like(p)
657
+ self.exp_avg_ipt[n] = torch.zeros_like(p)
658
+ self.exp_avg_unc[n] = torch.zeros_like(p)
659
+ with torch.no_grad():
660
+ self.ipt[n] = (p * p.grad).abs().detach()
661
+ # Sensitivity smoothing
662
+ self.exp_avg_ipt[n] = self.beta1 * self.exp_avg_ipt[n] + (1 - self.beta1) * self.ipt[n]
663
+ # Uncertainty quantification
664
+ self.exp_avg_unc[n] = (
665
+ self.beta2 * self.exp_avg_unc[n] + (1 - self.beta2) * (self.ipt[n] - self.exp_avg_ipt[n]).abs()
666
+ )
667
+
668
+ def _element_score(self, n):
669
+ return self.exp_avg_ipt[n] * self.exp_avg_unc[n]
670
+
671
+ def _combine_ipt(self, ipt_E, ipt_AB):
672
+ ipt_AB = ipt_AB.sum(dim=1, keepdim=False)
673
+ sum_ipt = ipt_E.view(-1) + ipt_AB.view(-1)
674
+ return sum_ipt
675
+
676
+ def mask_to_budget(self, model, budget):
677
+ value_ipt = {}
678
+ vector_ipt = {}
679
+ triplet_ipt = {}
680
+ # Get the importance score for A, E, B
681
+ for n, p in model.named_parameters():
682
+ if f"lora_A.{self.adapter_name}" in n:
683
+ entry_ipt = self._element_score(n)
684
+ comb_ipt = torch.mean(entry_ipt, dim=1, keepdim=True)
685
+ name_m = n.replace("lora_A", "%s")
686
+ if name_m not in vector_ipt:
687
+ vector_ipt[name_m] = [comb_ipt]
688
+ else:
689
+ vector_ipt[name_m].append(comb_ipt)
690
+ if f"lora_B.{self.adapter_name}" in n:
691
+ entry_ipt = self._element_score(n)
692
+ comb_ipt = torch.mean(entry_ipt, dim=0, keepdim=False).view(-1, 1)
693
+ name_m = n.replace("lora_B", "%s")
694
+ if name_m not in vector_ipt:
695
+ vector_ipt[name_m] = [comb_ipt]
696
+ else:
697
+ vector_ipt[name_m].append(comb_ipt)
698
+ if f"lora_E.{self.adapter_name}" in n:
699
+ entry_ipt = self._element_score(n)
700
+ name_m = n.replace("lora_E", "%s")
701
+ value_ipt[name_m] = entry_ipt
702
+
703
+ all_score = []
704
+ # Calculate the score for each triplet
705
+ for name_m in vector_ipt:
706
+ ipt_E = value_ipt[name_m]
707
+ ipt_AB = torch.cat(vector_ipt[name_m], dim=1)
708
+ sum_ipt = self._combine_ipt(ipt_E, ipt_AB)
709
+ name_E = name_m % "lora_E"
710
+ triplet_ipt[name_E] = sum_ipt.view(-1, 1)
711
+ all_score.append(sum_ipt.view(-1))
712
+
713
+ # Get the threshold by ranking ipt
714
+ mask_threshold = torch.kthvalue(
715
+ torch.cat(all_score),
716
+ k=self.init_bgt - budget,
717
+ )[0].item()
718
+
719
+ rank_pattern = {}
720
+ # Mask the unimportant triplets
721
+ with torch.no_grad():
722
+ for n, p in model.named_parameters():
723
+ if f"lora_E.{self.adapter_name}" in n:
724
+ p.masked_fill_(triplet_ipt[n] <= mask_threshold, 0.0)
725
+ rank_pattern[n] = (~(triplet_ipt[n] <= mask_threshold)).view(-1).tolist()
726
+ return rank_pattern
727
+
728
+ def update_and_allocate(self, model, global_step, force_mask=False):
729
+ # # Update the importance score and allocate the budget
730
+ if global_step < self.peft_config.total_step - self.peft_config.tfinal:
731
+ self.update_ipt(model)
732
+ budget, mask_ind = self.budget_schedule(global_step)
733
+ # Allocate the budget according to importance scores
734
+ if mask_ind or force_mask:
735
+ rank_pattern = self.mask_to_budget(model, budget)
736
+ else:
737
+ rank_pattern = None
738
+ return budget, rank_pattern
739
+
740
+ def mask_using_rank_pattern(self, model, rank_pattern):
741
+ # Mask the unimportant triplets
742
+ is_adapter_name_truncated = False
743
+ if self.adapter_name not in next(iter(rank_pattern.keys())):
744
+ is_adapter_name_truncated = True
745
+
746
+ with torch.no_grad():
747
+ for n, p in model.named_parameters():
748
+ if f"lora_E.{self.adapter_name}" in n:
749
+ key = n if not is_adapter_name_truncated else n.replace(f".{self.adapter_name}", "")
750
+ mask = torch.Tensor(rank_pattern[key]).unsqueeze(-1).to(p.device)
751
+ p.masked_fill_(~mask.bool(), 0.0)
model/peft/tuners/adaption_prompt.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import math
17
+ from collections import namedtuple
18
+ from dataclasses import dataclass, field
19
+ from typing import Dict, List
20
+
21
+ import torch
22
+ import torch.nn as nn
23
+ import torch.nn.functional as F
24
+
25
+ from peft.utils.config import PeftConfig, PeftType
26
+ from peft.utils.other import _freeze_adapter, _get_submodules
27
+
28
+
29
+ def llama_rotate_half(x: torch.Tensor) -> torch.Tensor:
30
+ """
31
+ Rotate half the hidden dims of the input.
32
+
33
+ This function was duplicated verbatim from:
34
+ https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L126
35
+
36
+ This was done to eliminate the Llama transformers implementation as a dependency of this file. Note that some other
37
+ functions were also adapted from the transformers implementation but were modified.
38
+ """
39
+ x1 = x[..., : x.shape[-1] // 2]
40
+ x2 = x[..., x.shape[-1] // 2 :]
41
+ return torch.cat((-x2, x1), dim=-1)
42
+
43
+
44
+ def llama_apply_rotary_pos_emb(q, cos, sin, position_ids):
45
+ """
46
+ Apply rotary position embedding to query states in the Llama model.
47
+
48
+ This function was adapted from:
49
+ https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L133
50
+
51
+ It was modified to remove unnecessary processing of key states.
52
+ """
53
+ gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
54
+ gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
55
+ cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
56
+ sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
57
+ q_embed = (q * cos) + (llama_rotate_half(q) * sin)
58
+ return q_embed
59
+
60
+
61
+ def llama_compute_query_states(model: nn.Module, **kwargs) -> torch.Tensor:
62
+ """
63
+ Compute query states for Llama models specifically.
64
+
65
+ They need to be recomputed as the forward() method of the original LlamaModel in the transformers library does not
66
+ return them. See the related discussion in the PR: https://github.com/huggingface/peft/pull/268
67
+ """
68
+ hidden_states = kwargs.get("hidden_states")
69
+ position_ids = kwargs.get("position_ids")
70
+ past_key_value = kwargs.get("past_key_value")
71
+ bsz, q_len, _ = hidden_states.size()
72
+ query_states = model.q_proj(hidden_states).view(bsz, q_len, model.num_heads, model.head_dim).transpose(1, 2)
73
+ value_states = model.v_proj(hidden_states).view(bsz, q_len, model.num_heads, model.head_dim).transpose(1, 2)
74
+
75
+ seq_len = q_len
76
+ if past_key_value is not None:
77
+ seq_len += past_key_value[0].shape[-2]
78
+ cos, sin = model.rotary_emb(value_states, seq_len=seq_len)
79
+
80
+ return llama_apply_rotary_pos_emb(query_states, cos, sin, position_ids)
81
+
82
+
83
+ # Contains the config that is specific to a transformers model type.
84
+ ModelTypeConfig = namedtuple(
85
+ "ModelTypeConfig", ["compute_query_states", "target_modules", "k_proj_layer", "v_proj_layer", "o_proj_layer"]
86
+ )
87
+ # Mapping of transformers model types to their specific configuration.
88
+ TRANSFORMERS_MODEL_CONFIG = {
89
+ "llama": ModelTypeConfig(
90
+ compute_query_states=llama_compute_query_states,
91
+ target_modules="self_attn",
92
+ k_proj_layer="k_proj",
93
+ v_proj_layer="v_proj",
94
+ o_proj_layer="o_proj",
95
+ ),
96
+ }
97
+
98
+
99
+ def is_adaption_prompt_trainable(params: str) -> bool:
100
+ """Return True if module is trainable under adaption prompt fine-tuning."""
101
+ return params.split(".")[-1].startswith("adaption_")
102
+
103
+
104
+ @dataclass
105
+ class AdaptionPromptConfig(PeftConfig):
106
+ """Stores the configuration of an [`AdaptionPromptModel`]."""
107
+
108
+ target_modules: str = field(
109
+ default=None, metadata={"help": "Name of the attention submodules to insert adaption prompts into."}
110
+ )
111
+ adapter_len: int = field(default=None, metadata={"help": "Number of adapter tokens to insert"})
112
+ adapter_layers: int = field(default=None, metadata={"help": "Number of adapter layers (from the top)"})
113
+
114
+ def __post_init__(self):
115
+ self.peft_type = PeftType.ADAPTION_PROMPT
116
+
117
+
118
+ def prepare_config(
119
+ peft_config: AdaptionPromptConfig,
120
+ model,
121
+ ) -> AdaptionPromptConfig:
122
+ """Prepare the config based on the llama model type."""
123
+ if model.config.model_type not in TRANSFORMERS_MODEL_CONFIG:
124
+ raise ValueError("Unsupported model type for adaption prompt: '{model.config.model_type}'.")
125
+
126
+ model_config = TRANSFORMERS_MODEL_CONFIG[model.config.model_type]
127
+
128
+ if peft_config.target_modules is None:
129
+ peft_config.target_modules = model_config.target_modules
130
+
131
+ return peft_config
132
+
133
+
134
+ class AdaptionPromptModel(nn.Module):
135
+ """
136
+ Implements adaption prompts as described in https://arxiv.org/pdf/2303.16199.pdf.
137
+
138
+ The top L attention modules are replaced with AdaptedAttention modules that wrap the original ones, but insert
139
+ trainable prompts with gates (for zero init).
140
+
141
+ Notes on the multi-adapter pattern:
142
+ - We store the states of different adapters by keeping a dictionary of AdaptedAttention modules indexed by adapter
143
+ name.
144
+ - Every time we switch adapters, we remove the modules of the currently active adapter from the model, store them
145
+ in the dictionary, and replace them with the modules of the new adapter.
146
+ - To avoid duplicated and potentially inconsistent state, the currently active adapter is always removed from the
147
+ dictionary.
148
+ - Disabling the adapter would also result in the modules being removed from the model.
149
+ """
150
+
151
+ def __init__(self, model, configs: Dict, adapter_name: str):
152
+ super().__init__()
153
+ self.model = model
154
+ # Store adapter configs by name.
155
+ self._configs: Dict[str, AdaptionPromptConfig] = {}
156
+ # Store lists of the parents of the affected attention modules by adapter name.
157
+ # We keep references to the parents so we can swap the adapters in-and-out of the model.
158
+ self._parents: Dict[str, List[nn.Module]] = {}
159
+ # Store lists of cached AdaptedAttention modules by name.
160
+ self._cached_adapters: Dict[str, List] = {}
161
+ # The name of the currently active adapter.
162
+ self._active_adapter = None
163
+ # Whether the adapter is enabled.
164
+ self._enabled = True
165
+ self.forward = self.model.forward
166
+ self.add_adapter(adapter_name, configs[adapter_name])
167
+ self._mark_only_adaption_prompts_as_trainable()
168
+
169
+ def add_adapter(self, adapter_name: str, config: AdaptionPromptConfig) -> None:
170
+ """Add an adapter with the given name and config."""
171
+ config = prepare_config(config, self.model)
172
+ if adapter_name in self._configs:
173
+ raise ValueError(f"Adapter with name '{adapter_name}' already exists.")
174
+
175
+ parents = []
176
+ for name, _ in self.model.named_modules():
177
+ if name.endswith(config.target_modules):
178
+ par, _, _ = _get_submodules(self.model, name)
179
+ parents.append(par)
180
+ if len(parents) < config.adapter_layers:
181
+ raise ValueError(
182
+ f"Config specifies more adapter layers '{config.adapter_layers}'"
183
+ f" than the model has '{len(parents)}'."
184
+ )
185
+ # Note that if the target modules are not in Sequential, ModuleList, or
186
+ # some other PyTorch ordered container, the behavior is undefined as we
187
+ # assume here that the order of the modules is the same as the order of
188
+ # the transformer decoder layers.
189
+ parents = parents[-config.adapter_layers :]
190
+ self._parents[adapter_name] = parents
191
+
192
+ # It is only None during initialization.
193
+ # If it is disabled, we don't have to remove the modules.
194
+ if self._active_adapter is not None and self._enabled:
195
+ self._remove_adapted_attentions(self._active_adapter)
196
+ self._active_adapter = adapter_name
197
+ self._configs[adapter_name] = config
198
+ self._create_adapted_attentions(config, parents)
199
+ if not self._enabled:
200
+ self._remove_adapted_attentions(self._active_adapter)
201
+
202
+ if config.inference_mode:
203
+ _freeze_adapter(self.model, adapter_name)
204
+
205
+ def set_adapter(self, adapter_name: str) -> None:
206
+ """Set the model to use the adapter with the given name."""
207
+ if self._active_adapter == adapter_name:
208
+ return
209
+ if adapter_name not in self._configs:
210
+ raise ValueError(f"Adapter with name '{adapter_name}' does not exist.")
211
+
212
+ if self._enabled:
213
+ self._remove_adapted_attentions(self._active_adapter)
214
+ self._set_adapted_attentions(adapter_name)
215
+
216
+ self._active_adapter = adapter_name
217
+
218
+ def enable_adapter_layers(self):
219
+ """Enable adapter layers by swapping in cached AdaptedAttention modules."""
220
+ self._enabled = True
221
+ self._set_adapted_attentions(self._active_adapter)
222
+
223
+ def disable_adapter_layers(self):
224
+ """Disable adapter layers by swapping out AdaptedAttention modules."""
225
+ self._enabled = False
226
+ self._remove_adapted_attentions(self._active_adapter)
227
+
228
+ def _create_adapted_attentions(self, config: AdaptionPromptConfig, parents: List[nn.Module]) -> None:
229
+ """Wrap LlamaAttention modules with newly created AdaptedAttention modules."""
230
+ for par in parents:
231
+ attn = AdaptedAttention(
232
+ model_type=self.model.config.model_type,
233
+ adapter_len=config.adapter_len,
234
+ model=getattr(par, config.target_modules),
235
+ )
236
+ setattr(par, config.target_modules, attn)
237
+
238
+ def _set_adapted_attentions(self, adapter_name: str) -> None:
239
+ """Replace LlamaAttention modules with cached AdaptedAttention modules."""
240
+ cached = self._cached_adapters[adapter_name]
241
+ del self._cached_adapters[adapter_name]
242
+ config = self._configs[adapter_name]
243
+ for i, par in enumerate(self._parents[adapter_name]):
244
+ setattr(par, config.target_modules, cached[i])
245
+
246
+ def _remove_adapted_attentions(self, adapter_name: str) -> None:
247
+ """Remove AdaptedAttention modules from the model and store them in the cache."""
248
+ config = self._configs[adapter_name]
249
+ adapted_attentions = []
250
+ for par in self._parents[adapter_name]:
251
+ attn = getattr(par, config.target_modules)
252
+ adapted_attentions.append(attn)
253
+ setattr(par, config.target_modules, attn.model)
254
+ self._cached_adapters[adapter_name] = adapted_attentions
255
+
256
+ def _mark_only_adaption_prompts_as_trainable(self) -> None:
257
+ """Freeze all parameters of the model except the adaption prompts."""
258
+ for n, p in self.model.named_parameters():
259
+ if not is_adaption_prompt_trainable(n):
260
+ p.requires_grad = False
261
+
262
+ def __getattr__(self, name: str):
263
+ """Forward missing attributes to the wrapped module."""
264
+ try:
265
+ return super().__getattr__(name) # defer to nn.Module's logic
266
+ except AttributeError:
267
+ # This is necessary as e.g. causal models have various methods that we
268
+ # don't want to re-implement here.
269
+ return getattr(self.model, name)
270
+
271
+
272
+ class AdaptedAttention(nn.Module):
273
+ """This module wraps a LLamaAttention module and injects adaption prompts."""
274
+
275
+ def __init__(self, model_type: str, adapter_len: int, model):
276
+ """
277
+ Initialize object.
278
+
279
+ Args:
280
+ model_type: The transformer model type. This is used to retrieve the right method to
281
+ compute query states.
282
+ adapter_len: The length of the adaption prompt to insert.
283
+ model: The original transformer attention module that is being wrapped.
284
+ """
285
+ assert not isinstance(model, AdaptedAttention)
286
+ super().__init__()
287
+ self.model_type = model_type
288
+ self.model = model
289
+ self.adapter_len = adapter_len
290
+ # Assume all parameters of the attention model we are wrapping are on the same device.
291
+ device = next(model.parameters()).device
292
+ # Don't think this was specified in the paper, but we follow the official repo which used an Embedding
293
+ # which initializes the tokens with standard normal values.
294
+ # https://github.com/ZrrSkywalker/LLaMA-Adapter/blob/41c3546fe1997ab8a65809dc8d8f9252b19d9faf/llama/model.py#L234
295
+ # (bsz, adapter_len, hidden_size)
296
+ target_dtype = (
297
+ model.q_proj.weight.dtype if model.q_proj.weight.dtype not in [torch.int8, torch.uint8] else torch.float32
298
+ )
299
+ self.adaption_prompt = nn.Parameter(
300
+ torch.empty(1, adapter_len, self.model.hidden_size, device=device, dtype=target_dtype).normal_()
301
+ )
302
+ # Initialize the gate to 0 as this is "zero-init".
303
+ self.adaption_gate = nn.Parameter(torch.zeros(1, device=device, dtype=target_dtype))
304
+
305
+ def forward(self, **kwargs):
306
+ """
307
+ Forward pass for the adapter which wraps the original LlamaAttention module.
308
+
309
+ "Official" paper implementation:
310
+ https://github.com/ZrrSkywalker/LLaMA-Adapter/blob/41c3546fe1997ab8a65809dc8d8f9252b19d9faf/llama/model.py#L141
311
+
312
+ Args:
313
+ kwargs: See the original LlamaAttention module.
314
+ """
315
+ if kwargs.get("output_attention", False):
316
+ raise NotImplementedError("output_attention is not currently supported.")
317
+
318
+ output, _, past_key_value = self.model(**kwargs)
319
+ bsz = output.shape[0]
320
+ q_len = output.shape[1]
321
+ embed_dim = output.shape[2]
322
+ k_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].k_proj_layer
323
+ v_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].v_proj_layer
324
+ o_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].o_proj_layer
325
+
326
+ if k_proj_layer == v_proj_layer:
327
+ _, key, value = getattr(self.model, k_proj_layer)(self.adaption_prompt).split(embed_dim, dim=2)
328
+ else:
329
+ key = getattr(self.model, k_proj_layer)(self.adaption_prompt)
330
+ value = getattr(self.model, v_proj_layer)(self.adaption_prompt)
331
+ # (bsz, num_heads, adapter_len, head_dim)
332
+ adapter_k = (
333
+ key.view(1, self.adapter_len, self.model.num_heads, self.model.head_dim)
334
+ .repeat(bsz, 1, 1, 1)
335
+ .transpose(1, 2)
336
+ )
337
+ # (bsz, num_heads, adapter_len, head_dim)
338
+ adapter_v = (
339
+ value.view(1, self.adapter_len, self.model.num_heads, self.model.head_dim)
340
+ .repeat(bsz, 1, 1, 1)
341
+ .transpose(1, 2)
342
+ )
343
+
344
+ # Recompute query states.
345
+ compute_query_states = TRANSFORMERS_MODEL_CONFIG[self.model_type].compute_query_states
346
+ # (bsz, num_heads, q_len, head_dim)
347
+ query_states = compute_query_states(model=self.model, **kwargs)
348
+
349
+ previous_dtype = query_states.dtype
350
+ # (bsz, num_heads, q_len, adapter_len)
351
+ scores = torch.matmul(query_states, adapter_k.transpose(2, 3).to(previous_dtype)) / math.sqrt(
352
+ self.model.head_dim
353
+ )
354
+ # Upcast attention to fp32
355
+ # (bsz, num_heads, q_len, adapter_len)
356
+ scores = self.adaption_gate * F.softmax(scores, dim=-1, dtype=torch.float32).to(previous_dtype)
357
+ # (bsz, q_len, num_heads * head_dim)
358
+ adapter_output = torch.matmul(scores, adapter_v).transpose(1, 2).reshape(bsz, q_len, -1)
359
+ # (bsz, q_len, hidden_size)
360
+ if o_proj_layer is not None:
361
+ adapter_output = getattr(self.model, o_proj_layer)(adapter_output)
362
+
363
+ # Add adaption prompt output to original output.
364
+ output = output + adapter_output
365
+
366
+ # Restore original dtype.
367
+ output = output.to(previous_dtype)
368
+ return output, None, past_key_value
model/peft/tuners/debug_utils.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import matplotlib.pyplot as plt
4
+ import numpy as np
5
+ from typing import Optional
6
+
7
+ class DebugUtils:
8
+ def __init__(self, pic_dir: str):
9
+ self.pic_dir = pic_dir
10
+ if not os.path.exists(self.pic_dir):
11
+ os.makedirs(self.pic_dir)
12
+
13
+ def check_orthogonal(self, tensor: torch.Tensor) -> float:
14
+ if tensor.dim() > 2:
15
+ raise ValueError("Tensor dimension must be 1 or 2.")
16
+
17
+ if tensor.dim() == 1:
18
+ return 0.0 # 1D tensor is not applicable for orthogonality
19
+
20
+ if tensor.dim() == 2:
21
+ num_rows, num_dims = tensor.shape
22
+ dot_products = torch.mm(tensor, tensor.t())
23
+ norms_squared = torch.diag(dot_products)
24
+ sum_abs_dot_products = torch.sum(torch.abs(dot_products - torch.diag(norms_squared)))
25
+ max_sum = (num_rows * (num_rows - 1)) / 2
26
+ orthogonality_score = 1.0 - (sum_abs_dot_products / max_sum)
27
+ return orthogonality_score.item()
28
+
29
+ def plot_tensor(self, tensor: torch.Tensor, pic_name: str, x_axis: Optional[str], y_axis: Optional[str], plot_number: bool = False) -> None:
30
+ if tensor.dim() > 2:
31
+ raise ValueError("Tensor dimensions must be 2 or lower for plotting.")
32
+
33
+ fig, ax = plt.subplots()
34
+
35
+ if tensor.dim() == 1:
36
+ tensor_shape = tensor.shape[0]
37
+ ax.set_yticks([])
38
+ ax.set_xticks(np.arange(tensor_shape))
39
+ color_matrix = np.ones((1, tensor_shape, 3)) * [0.7, 0.7, 1.0]
40
+ ax.imshow(color_matrix, aspect='equal')
41
+ for i in range(1, tensor_shape):
42
+ ax.axvline(x=i - 0.5, color='black', linewidth=1)
43
+ if x_axis:
44
+ ax.set_xlabel(x_axis)
45
+ else:
46
+ heatmap = ax.imshow(tensor, cmap='YlOrRd', aspect='auto')
47
+
48
+ if plot_number:
49
+ for i in range(tensor.shape[0]):
50
+ for j in range(tensor.shape[1]):
51
+ ax.text(j, i, f'{tensor[i, j]:.3f}', ha='center', va='center', color='black')
52
+
53
+ if y_axis:
54
+ ax.set_ylabel(y_axis)
55
+ if x_axis:
56
+ ax.set_xlabel(x_axis)
57
+
58
+ plt.colorbar(heatmap, ax=ax, orientation='vertical')
59
+
60
+ plt.savefig(os.path.join(self.pic_dir, pic_name))
61
+ plt.close()
62
+
63
+
64
+ debuge_utils = DebugUtils("./picture")
model/peft/tuners/gating.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ import torch.nn.functional as F
4
+
5
+ class Dense(nn.Module):
6
+ def __init__(self, dim: int, num_moe: int) -> None:
7
+ super().__init__()
8
+ self.dim = 64
9
+ self.num_moe = num_moe
10
+ self.linear_layer = nn.Linear(self.dim, num_moe, bias=False)
11
+ self.softmax = nn.Softmax(dim=-1)
12
+
13
+ def forward(self, x):
14
+ logits = self.linear_layer(x)
15
+ probs = self.softmax(logits)
16
+ return probs
17
+
18
+ class topK(nn.Module):
19
+ def __init__(self, dim: int, num_moe: int) -> None:
20
+ super().__init__()
21
+ self.dim = 64
22
+ self.num_moe = num_moe
23
+ self.linear_layer = nn.Linear(self.dim, num_moe, bias=False)
24
+ self.softmax = nn.Softmax(dim=-1)
25
+
26
+ def forward(self, x, topk=1):
27
+ logits = self.linear_layer(x)
28
+ probs = self.softmax(logits)
29
+ # 使用topk来选择最高的k个概率
30
+ topk_values, topk_indices = torch.topk(probs, k=topk, dim=-1)
31
+ # 创建一个初始值全为负无穷的张量,形状与probs相同
32
+ topk_probs = torch.full_like(probs, float('-inf'))
33
+ # 使用scatter填充topk的概率值
34
+ topk_probs = topk_probs.scatter_(-1, topk_indices, topk_values)
35
+ # 应用softmax确保top k值的和为1
36
+ topk_probs = self.softmax(topk_probs)
37
+ return topk_probs
38
+
39
+ class MLP(nn.Module):
40
+ def __init__(self, dim: int, num_moe: int, hidden_dim: int = 128) -> None:
41
+ super().__init__()
42
+ self.dim = 64
43
+ self.num_moe = num_moe
44
+ # 添加多层感知机结构
45
+ self.linear_layer1 = nn.Linear(self.dim, hidden_dim)
46
+ self.activation = nn.GELU() # 使用GELU激活函数
47
+ self.linear_layer2 = nn.Linear(hidden_dim, self.num_moe)
48
+ self.softmax = nn.Softmax(dim=-1)
49
+
50
+ def forward(self, x):
51
+ x = self.linear_layer1(x)
52
+ x = self.activation(x)
53
+ logits = self.linear_layer2(x)
54
+ probs = self.softmax(logits)
55
+ return probs
56
+
57
+
58
+ class Noise(nn.Module):
59
+ def __init__(self, dim: int, num_moe: int, noise_std: float = 0.1) -> None:
60
+ super().__init__()
61
+ self.dim = 64
62
+ self.num_moe = num_moe
63
+ self.noise_std = noise_std
64
+ self.linear_layer = nn.Linear(self.dim, num_moe, bias=False)
65
+ self.softmax = nn.Softmax(dim=-1)
66
+
67
+ def forward(self, x):
68
+ logits = self.linear_layer(x)
69
+
70
+ # 添加噪声
71
+ noise = torch.randn_like(logits) * self.noise_std
72
+ logits = logits + noise
73
+
74
+ probs = self.softmax(logits)
75
+ return probs
76
+
77
+ class MLP_noise(nn.Module):
78
+ def __init__(self, dim: int, num_moe: int, hidden_dim: int = 128, noise_std: float = 0.1) -> None:
79
+ super().__init__()
80
+ self.dim = 64
81
+ self.num_moe = num_moe
82
+ self.noise_std = noise_std
83
+ self.linear1 = nn.Linear(self.dim, hidden_dim, bias=False)
84
+ self.relu = nn.ReLU()
85
+ self.linear2 = nn.Linear(hidden_dim, num_moe, bias=False)
86
+ self.softmax = nn.Softmax(dim=-1)
87
+
88
+ def forward(self, x):
89
+ hidden = self.linear1(x)
90
+ hidden = self.relu(hidden)
91
+ logits = self.linear2(hidden)
92
+
93
+ # 添加噪声
94
+ noise = torch.randn_like(logits) * self.noise_std
95
+ logits = logits + noise
96
+
97
+ probs = self.softmax(logits)
98
+ return probs
99
+
100
+
101
+ class Drop(nn.Module):
102
+ def __init__(self, dim: int, num_moe: int, dropout_rate: float = 0.1) -> None:
103
+ super().__init__()
104
+ self.dim = 64
105
+ self.num_moe = num_moe
106
+ self.linear_layer = nn.Linear(self.dim, num_moe, bias=False)
107
+ self.dropout = nn.Dropout(dropout_rate)
108
+ self.softmax = nn.Softmax(dim=-1)
109
+
110
+ def forward(self, x):
111
+ logits = self.linear_layer(x)
112
+ # 添加Dropout
113
+ logits = self.dropout(logits)
114
+ probs = self.softmax(logits)
115
+ return probs
116
+
117
+ GATING_TO_MODEL_MAPPING = {
118
+ "Dense": Dense,
119
+ "topK": topK,
120
+ "MLP": MLP,
121
+ "Drop": Drop,
122
+ "MLP_noise": MLP_noise,
123
+ "Noise": Noise,
124
+ }
model/peft/tuners/lora.py ADDED
@@ -0,0 +1,1033 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import math
16
+ import re
17
+ import warnings
18
+ from dataclasses import asdict, dataclass, field
19
+ from enum import Enum
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.nn as nn
24
+ import torch.nn.functional as F
25
+ from transformers.pytorch_utils import Conv1D
26
+
27
+ from ..import_utils import is_bnb_4bit_available, is_bnb_available
28
+ from ..utils import (
29
+ COMMON_LAYERS_PATTERN,
30
+ TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING,
31
+ ModulesToSaveWrapper,
32
+ PeftConfig,
33
+ PeftType,
34
+ _freeze_adapter,
35
+ _get_submodules,
36
+ transpose,
37
+ )
38
+
39
+
40
+ if is_bnb_available():
41
+ import bitsandbytes as bnb
42
+
43
+
44
+ @dataclass
45
+ class LoraConfig(PeftConfig):
46
+ """
47
+ This is the configuration class to store the configuration of a [`LoraModel`].
48
+
49
+ Args:
50
+ r (`int`): Lora attention dimension.
51
+ target_modules (`Union[List[str],str]`): The names of the modules to apply Lora to.
52
+ lora_alpha (`int`): The alpha parameter for Lora scaling.
53
+ lora_dropout (`float`): The dropout probability for Lora layers.
54
+ fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out).
55
+ For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.:
56
+ bias (`str`): Bias type for Lora. Can be 'none', 'all' or 'lora_only'
57
+ modules_to_save (`List[str]`):List of modules apart from LoRA layers to be set as trainable
58
+ and saved in the final checkpoint.
59
+ layers_to_transform (`Union[List[int],int]`):
60
+ The layer indexes to transform, if this argument is specified, it will apply the LoRA transformations on
61
+ the layer indexes that are specified in this list. If a single integer is passed, it will apply the LoRA
62
+ transformations on the layer at this index.
63
+ layers_pattern (`str`):
64
+ The layer pattern name, used only if `layers_to_transform` is different from `None` and if the layer
65
+ pattern is not in the common layers pattern.
66
+ """
67
+
68
+ r: int = field(default=8, metadata={"help": "Lora attention dimension"})
69
+ target_modules: Optional[Union[List[str], str]] = field(
70
+ default=None,
71
+ metadata={
72
+ "help": "List of module names or regex expression of the module names to replace with Lora."
73
+ "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "
74
+ },
75
+ )
76
+ lora_alpha: int = field(default=8, metadata={"help": "Lora alpha"})
77
+ lora_dropout: float = field(default=0.0, metadata={"help": "Lora dropout"})
78
+ fan_in_fan_out: bool = field(
79
+ default=False,
80
+ metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"},
81
+ )
82
+ bias: str = field(default="none", metadata={"help": "Bias type for Lora. Can be 'none', 'all' or 'lora_only'"})
83
+ modules_to_save: Optional[List[str]] = field(
84
+ default=None,
85
+ metadata={
86
+ "help": "List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. "
87
+ "For example, in Sequence Classification or Token Classification tasks, "
88
+ "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
89
+ },
90
+ )
91
+ init_lora_weights: bool = field(
92
+ default=True,
93
+ metadata={"help": "Whether to initialize the weights of the Lora layers."},
94
+ )
95
+ layers_to_transform: Optional[Union[List, int]] = field(
96
+ default=None,
97
+ metadata={
98
+ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index."
99
+ },
100
+ )
101
+ layers_pattern: Optional[str] = field(
102
+ default=None,
103
+ metadata={
104
+ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern."
105
+ },
106
+ )
107
+
108
+ def __post_init__(self):
109
+ self.peft_type = PeftType.LORA
110
+
111
+
112
+ class LoraModel(torch.nn.Module):
113
+ """
114
+ Creates Low Rank Adapter (Lora) model from a pretrained transformers model.
115
+
116
+ Args:
117
+ model ([`~transformers.PreTrainedModel`]): The model to be adapted.
118
+ config ([`LoraConfig`]): The configuration of the Lora model.
119
+
120
+ Returns:
121
+ `torch.nn.Module`: The Lora model.
122
+
123
+ Example:
124
+
125
+ ```py
126
+ >>> from transformers import AutoModelForSeq2SeqLM
127
+ >>> from peft import LoraModel, LoraConfig
128
+
129
+ >>> config = LoraConfig(
130
+ ... peft_type="LORA",
131
+ ... task_type="SEQ_2_SEQ_LM",
132
+ ... r=8,
133
+ ... lora_alpha=32,
134
+ ... target_modules=["q", "v"],
135
+ ... lora_dropout=0.01,
136
+ ... )
137
+
138
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
139
+ >>> lora_model = LoraModel(config, model)
140
+ ```
141
+
142
+ ```py
143
+ >>> import transformers
144
+ >>> from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_int8_training
145
+
146
+ >>> target_modules = ["q_proj", "k_proj", "v_proj", "out_proj", "fc_in", "fc_out", "wte"]
147
+ >>> config = LoraConfig(
148
+ ... r=4, lora_alpha=16, target_modules=target_modules, lora_dropout=0.1, bias="none", task_type="CAUSAL_LM"
149
+ ... )
150
+
151
+ >>> model = transformers.GPTJForCausalLM.from_pretrained(
152
+ ... "kakaobrain/kogpt",
153
+ ... revision="KoGPT6B-ryan1.5b-float16", # or float32 version: revision=KoGPT6B-ryan1.5b
154
+ ... pad_token_id=tokenizer.eos_token_id,
155
+ ... use_cache=False,
156
+ ... device_map={"": rank},
157
+ ... torch_dtype=torch.float16,
158
+ ... load_in_8bit=True,
159
+ ... )
160
+ >>> model = prepare_model_for_int8_training(model)
161
+ >>> lora_model = get_peft_model(model, config)
162
+ ```
163
+
164
+ **Attributes**:
165
+ - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
166
+ - **peft_config** ([`LoraConfig`]): The configuration of the Lora model.
167
+ """
168
+
169
+ def __init__(self, model, config, adapter_name):
170
+ super().__init__()
171
+ self.model = model
172
+ self.forward = self.model.forward
173
+ self.peft_config = config
174
+ self.add_adapter(adapter_name, self.peft_config[adapter_name])
175
+
176
+ def add_adapter(self, adapter_name, config=None):
177
+ if config is not None:
178
+ model_config = self.model.config.to_dict() if hasattr(self.model.config, "to_dict") else self.model.config
179
+ config = self._prepare_lora_config(config, model_config)
180
+ self.peft_config[adapter_name] = config
181
+ self._find_and_replace(adapter_name)
182
+ if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != "none":
183
+ raise ValueError(
184
+ "LoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters."
185
+ )
186
+ mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)
187
+ if self.peft_config[adapter_name].inference_mode:
188
+ _freeze_adapter(self.model, adapter_name)
189
+
190
+ def _check_quantization_dependency(self):
191
+ loaded_in_4bit = getattr(self.model, "is_loaded_in_4bit", False)
192
+ loaded_in_8bit = getattr(self.model, "is_loaded_in_8bit", False)
193
+ if (loaded_in_4bit or loaded_in_8bit) and not is_bnb_available():
194
+ raise ImportError(
195
+ "To use Lora with 8-bit or 4-bit quantization, please install the `bitsandbytes` package. "
196
+ "You can install it with `pip install bitsandbytes`."
197
+ )
198
+
199
+ def _check_target_module_exists(self, lora_config, key):
200
+ if isinstance(lora_config.target_modules, str):
201
+ target_module_found = re.fullmatch(lora_config.target_modules, key)
202
+ else:
203
+ target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)
204
+ is_using_layer_indexes = getattr(lora_config, "layers_to_transform", None) is not None
205
+ layer_indexing_pattern = getattr(lora_config, "layers_pattern", None)
206
+
207
+ if is_using_layer_indexes and target_module_found:
208
+ layers_pattern = COMMON_LAYERS_PATTERN if layer_indexing_pattern is None else layer_indexing_pattern
209
+ layers_pattern = [layers_pattern] if isinstance(layers_pattern, str) else layers_pattern
210
+
211
+ for pattern in layers_pattern:
212
+ layer_index = re.match(f".*.{pattern}\.(\d+)\.*", key)
213
+ if layer_index is not None:
214
+ layer_index = int(layer_index.group(1))
215
+ if isinstance(lora_config.layers_to_transform, int):
216
+ target_module_found = layer_index == lora_config.layers_to_transform
217
+ else:
218
+ target_module_found = layer_index in lora_config.layers_to_transform
219
+
220
+ break
221
+ else:
222
+ target_module_found = False
223
+ return target_module_found
224
+
225
+ def _create_new_module(self, lora_config, adapter_name, target):
226
+ bias = hasattr(target, "bias") and target.bias is not None
227
+ kwargs = {
228
+ "r": lora_config.r,
229
+ "lora_alpha": lora_config.lora_alpha,
230
+ "lora_dropout": lora_config.lora_dropout,
231
+ "fan_in_fan_out": lora_config.fan_in_fan_out,
232
+ "init_lora_weights": lora_config.init_lora_weights,
233
+ }
234
+ loaded_in_4bit = getattr(self.model, "is_loaded_in_4bit", False)
235
+ loaded_in_8bit = getattr(self.model, "is_loaded_in_8bit", False)
236
+
237
+ if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):
238
+ eightbit_kwargs = kwargs.copy()
239
+ eightbit_kwargs.update(
240
+ {
241
+ "has_fp16_weights": target.state.has_fp16_weights,
242
+ "memory_efficient_backward": target.state.memory_efficient_backward,
243
+ "threshold": target.state.threshold,
244
+ "index": target.index,
245
+ }
246
+ )
247
+ new_module = Linear8bitLt(
248
+ adapter_name, target.in_features, target.out_features, bias=bias, **eightbit_kwargs
249
+ )
250
+ elif loaded_in_4bit and is_bnb_4bit_available() and isinstance(target, bnb.nn.Linear4bit):
251
+ fourbit_kwargs = kwargs.copy()
252
+ fourbit_kwargs.update(
253
+ {
254
+ "compute_dtype": target.compute_dtype,
255
+ "compress_statistics": target.weight.compress_statistics,
256
+ "quant_type": target.weight.quant_type,
257
+ }
258
+ )
259
+ new_module = Linear4bit(adapter_name, target.in_features, target.out_features, bias=bias, **fourbit_kwargs)
260
+ elif isinstance(target, torch.nn.Embedding):
261
+ embedding_kwargs = kwargs.copy()
262
+ embedding_kwargs.pop("fan_in_fan_out", None)
263
+ in_features, out_features = target.num_embeddings, target.embedding_dim
264
+ new_module = Embedding(adapter_name, in_features, out_features, **embedding_kwargs)
265
+ elif isinstance(target, torch.nn.Conv2d):
266
+ out_channels, in_channels = target.weight.size()[:2]
267
+ kernel_size = target.weight.size()[2:]
268
+ stride = target.stride
269
+ padding = target.padding
270
+ new_module = Conv2d(adapter_name, in_channels, out_channels, kernel_size, stride, padding, **kwargs)
271
+ else:
272
+ if isinstance(target, torch.nn.Linear):
273
+ in_features, out_features = target.in_features, target.out_features
274
+ if kwargs["fan_in_fan_out"]:
275
+ warnings.warn(
276
+ "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
277
+ "Setting fan_in_fan_out to False."
278
+ )
279
+ kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
280
+ elif isinstance(target, Conv1D):
281
+ in_features, out_features = (
282
+ target.weight.ds_shape if hasattr(target.weight, "ds_shape") else target.weight.shape
283
+ )
284
+ if not kwargs["fan_in_fan_out"]:
285
+ warnings.warn(
286
+ "fan_in_fan_out is set to False but the target module is `Conv1D`. "
287
+ "Setting fan_in_fan_out to True."
288
+ )
289
+ kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
290
+ else:
291
+ raise ValueError(
292
+ f"Target module {target} is not supported. "
293
+ f"Currently, only `torch.nn.Linear` and `Conv1D` are supported."
294
+ )
295
+ new_module = Linear(adapter_name, in_features, out_features, bias=bias, **kwargs)
296
+
297
+ return new_module
298
+
299
+ def _find_and_replace(self, adapter_name):
300
+ lora_config = self.peft_config[adapter_name]
301
+ self._check_quantization_dependency()
302
+ is_target_modules_in_base_model = False
303
+ key_list = [key for key, _ in self.model.named_modules()]
304
+
305
+ for key in key_list:
306
+ if not self._check_target_module_exists(lora_config, key):
307
+ continue
308
+
309
+ is_target_modules_in_base_model = True
310
+ parent, target, target_name = _get_submodules(self.model, key)
311
+
312
+ if isinstance(target, LoraLayer) and isinstance(target, torch.nn.Conv2d):
313
+ target.update_layer_conv2d(
314
+ adapter_name,
315
+ lora_config.r,
316
+ lora_config.lora_alpha,
317
+ lora_config.lora_dropout,
318
+ lora_config.init_lora_weights,
319
+ )
320
+ elif isinstance(target, LoraLayer):
321
+ target.update_layer(
322
+ adapter_name,
323
+ lora_config.r,
324
+ lora_config.lora_alpha,
325
+ lora_config.lora_dropout,
326
+ lora_config.init_lora_weights,
327
+ )
328
+ else:
329
+ new_module = self._create_new_module(lora_config, adapter_name, target)
330
+ self._replace_module(parent, target_name, new_module, target)
331
+
332
+ if not is_target_modules_in_base_model:
333
+ raise ValueError(
334
+ f"Target modules {lora_config.target_modules} not found in the base model. "
335
+ f"Please check the target modules and try again."
336
+ )
337
+
338
+ def _replace_module(self, parent_module, child_name, new_module, old_module):
339
+ setattr(parent_module, child_name, new_module)
340
+ new_module.weight = old_module.weight
341
+ if hasattr(old_module, "bias"):
342
+ if old_module.bias is not None:
343
+ new_module.bias = old_module.bias
344
+
345
+ if getattr(old_module, "state", None) is not None:
346
+ new_module.state = old_module.state
347
+ new_module.to(old_module.weight.device)
348
+
349
+ # dispatch to correct device
350
+ for name, module in new_module.named_modules():
351
+ if "lora_" in name:
352
+ module.to(old_module.weight.device)
353
+ if "ranknum" in name:
354
+ module.to(old_module.weight.device)
355
+
356
+ def __getattr__(self, name: str):
357
+ """Forward missing attributes to the wrapped module."""
358
+ try:
359
+ return super().__getattr__(name) # defer to nn.Module's logic
360
+ except AttributeError:
361
+ return getattr(self.model, name)
362
+
363
+ def get_peft_config_as_dict(self, inference: bool = False):
364
+ config_dict = {}
365
+ for key, value in self.peft_config.items():
366
+ config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}
367
+ if inference:
368
+ config["inference_mode"] = True
369
+ config_dict[key] = config
370
+ return config
371
+
372
+ def _set_adapter_layers(self, enabled=True):
373
+ for module in self.model.modules():
374
+ if isinstance(module, LoraLayer):
375
+ module.disable_adapters = False if enabled else True
376
+
377
+ def enable_adapter_layers(self):
378
+ self._set_adapter_layers(enabled=True)
379
+
380
+ def disable_adapter_layers(self):
381
+ self._set_adapter_layers(enabled=False)
382
+
383
+ def set_adapter(self, adapter_name):
384
+ for module in self.model.modules():
385
+ if isinstance(module, LoraLayer):
386
+ if module.merged:
387
+ warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
388
+ module.unmerge()
389
+ module.active_adapter = adapter_name
390
+
391
+ def merge_adapter(self):
392
+ for module in self.model.modules():
393
+ if isinstance(module, LoraLayer):
394
+ module.merge()
395
+
396
+ def unmerge_adapter(self):
397
+ for module in self.model.modules():
398
+ if isinstance(module, LoraLayer):
399
+ module.unmerge()
400
+
401
+ @staticmethod
402
+ def _prepare_lora_config(peft_config, model_config):
403
+ if peft_config.target_modules is None:
404
+ if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:
405
+ raise ValueError("Please specify `target_modules` in `peft_config`")
406
+ peft_config.target_modules = TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]]
407
+ return peft_config
408
+
409
+ def merge_and_unload(self):
410
+ r"""
411
+ This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model
412
+ as a standalone model.
413
+ """
414
+ if getattr(self.config, "model_type", None) == "gpt2":
415
+ raise ValueError("GPT2 models are not supported for merging LORA layers")
416
+
417
+ if getattr(self.model, "is_loaded_in_8bit", False) or getattr(self.model, "is_loaded_in_4bit", False):
418
+ raise ValueError("Cannot merge LORA layers when the model is loaded in 8-bit mode")
419
+
420
+ key_list = [key for key, _ in self.model.named_modules() if "lora" not in key]
421
+ for key in key_list:
422
+ try:
423
+ parent, target, target_name = _get_submodules(self.model, key)
424
+ except AttributeError:
425
+ continue
426
+ if isinstance(target, LoraLayer):
427
+ if isinstance(target, nn.Embedding):
428
+ new_module = torch.nn.Embedding(target.in_features, target.out_features)
429
+ elif isinstance(target, nn.Conv2d):
430
+ new_module = torch.nn.Conv2d(
431
+ target.in_channels,
432
+ target.out_channels,
433
+ kernel_size=target.kernel_size,
434
+ stride=target.stride,
435
+ padding=target.padding,
436
+ dilation=target.dilation,
437
+ )
438
+ else:
439
+ bias = target.bias is not None
440
+ new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)
441
+ target.merge()
442
+ self._replace_module(parent, target_name, new_module, target)
443
+
444
+ # save any additional trainable modules part of `modules_to_save`
445
+ if isinstance(target, ModulesToSaveWrapper):
446
+ setattr(parent, target_name, target.modules_to_save[target.active_adapter])
447
+
448
+ return self.model
449
+
450
+ def add_weighted_adapter(self, adapters, weights, adapter_name):
451
+ if len({self.peft_config[adapter].r for adapter in adapters}) != 1:
452
+ raise ValueError("All adapters must have the same r value")
453
+ self.peft_config[adapter_name] = self.peft_config[adapters[0]]
454
+ self.peft_config[adapter_name].lora_alpha = self.peft_config[adapters[0]].r
455
+ self._find_and_replace(adapter_name)
456
+ mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)
457
+ _freeze_adapter(self.model, adapter_name)
458
+ key_list = [key for key, _ in self.model.named_modules() if "lora" not in key]
459
+ for key in key_list:
460
+ _, target, _ = _get_submodules(self.model, key)
461
+ if isinstance(target, LoraLayer):
462
+ if adapter_name in target.lora_A:
463
+ target.lora_A[adapter_name].weight.data = target.lora_A[adapter_name].weight.data * 0.0
464
+ target.lora_B[adapter_name].weight.data = target.lora_B[adapter_name].weight.data * 0.0
465
+ for adapter, weight in zip(adapters, weights):
466
+ if adapter not in target.lora_A:
467
+ continue
468
+ target.lora_A[adapter_name].weight.data += (
469
+ target.lora_A[adapter].weight.data * weight * target.scaling[adapter]
470
+ )
471
+ target.lora_B[adapter_name].weight.data += target.lora_B[adapter].weight.data * weight
472
+
473
+ elif adapter_name in target.lora_embedding_A:
474
+ target.lora_embedding_A[adapter_name].data = target.lora_embedding_A[adapter_name].data * 0.0
475
+ target.lora_embedding_B[adapter_name].data = target.lora_embedding_B[adapter_name].data * 0.0
476
+ for adapter, weight in zip(adapters, weights):
477
+ if adapter not in target.lora_embedding_A:
478
+ continue
479
+ target.lora_embedding_A[adapter_name].data += (
480
+ target.lora_embedding_A[adapter].data * weight * target.scaling[adapter]
481
+ )
482
+ target.lora_embedding_B[adapter_name].data += target.lora_embedding_B[adapter].data * weight
483
+
484
+
485
+ # Below code is based on https://github.com/microsoft/LoRA/blob/main/loralib/layers.py
486
+ # and modified to work with PyTorch FSDP
487
+
488
+
489
+ # ------------------------------------------------------------------------------------------
490
+ # Copyright (c) Microsoft Corporation. All rights reserved.
491
+ # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
492
+ # ------------------------------------------------------------------------------------------
493
+
494
+
495
+ # had to adapt it for `lora_only` to work
496
+ def mark_only_lora_as_trainable(model: nn.Module, bias: str = "none") -> None:
497
+ for n, p in model.named_parameters():
498
+ if "lora_" not in n:
499
+ p.requires_grad = False
500
+ if bias == "none":
501
+ return
502
+ elif bias == "all":
503
+ for n, p in model.named_parameters():
504
+ if "bias" in n:
505
+ p.requires_grad = True
506
+ elif bias == "lora_only":
507
+ for m in model.modules():
508
+ if isinstance(m, LoraLayer) and hasattr(m, "bias") and m.bias is not None:
509
+ m.bias.requires_grad = True
510
+ else:
511
+ raise NotImplementedError
512
+
513
+
514
+ class LoraLayer:
515
+ def __init__(self, in_features: int, out_features: int, **kwargs):
516
+ self.r = {}
517
+ self.lora_alpha = {}
518
+ self.scaling = {}
519
+ self.lora_dropout = nn.ModuleDict({})
520
+ self.lora_A = nn.ModuleDict({})
521
+ self.lora_B = nn.ModuleDict({})
522
+ # For Embedding layer
523
+ self.lora_embedding_A = nn.ParameterDict({})
524
+ self.lora_embedding_B = nn.ParameterDict({})
525
+ # Mark the weight as unmerged
526
+ self.merged = False
527
+ self.disable_adapters = False
528
+ self.in_features = in_features
529
+ self.out_features = out_features
530
+ self.kwargs = kwargs
531
+
532
+ def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):
533
+ self.r[adapter_name] = r
534
+ self.lora_alpha[adapter_name] = lora_alpha
535
+ if lora_dropout > 0.0:
536
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
537
+ else:
538
+ lora_dropout_layer = nn.Identity()
539
+
540
+ self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
541
+ # Actual trainable parameters
542
+ if r > 0:
543
+ self.lora_A.update(nn.ModuleDict({adapter_name: nn.Linear(self.in_features, r, bias=False)}))
544
+ self.lora_B.update(nn.ModuleDict({adapter_name: nn.Linear(r, self.out_features, bias=False)}))
545
+ self.scaling[adapter_name] = lora_alpha / r
546
+ if init_lora_weights:
547
+ self.reset_lora_parameters(adapter_name)
548
+ self.to(self.weight.device)
549
+
550
+ def update_layer_conv2d(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):
551
+ self.r[adapter_name] = r
552
+ self.lora_alpha[adapter_name] = lora_alpha
553
+ if lora_dropout > 0.0:
554
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
555
+ else:
556
+ lora_dropout_layer = nn.Identity()
557
+
558
+ self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
559
+ # Actual trainable parameters
560
+ if r > 0:
561
+ kernel_size = self.kwargs["kernel_size"]
562
+ stride = self.kwargs["stride"]
563
+ padding = self.kwargs["padding"]
564
+ self.lora_A.update(
565
+ nn.ModuleDict({adapter_name: nn.Conv2d(self.in_features, r, kernel_size, stride, padding, bias=False)})
566
+ )
567
+ self.lora_B.update(
568
+ nn.ModuleDict({adapter_name: nn.Conv2d(r, self.out_features, (1, 1), (1, 1), bias=False)})
569
+ )
570
+ self.scaling[adapter_name] = lora_alpha / r
571
+ if init_lora_weights:
572
+ self.reset_lora_parameters(adapter_name)
573
+ self.to(self.weight.device)
574
+
575
+ def update_layer_embedding(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):
576
+ self.r[adapter_name] = r
577
+ self.lora_alpha[adapter_name] = lora_alpha
578
+ if lora_dropout > 0.0:
579
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
580
+ else:
581
+ lora_dropout_layer = nn.Identity()
582
+
583
+ self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
584
+ # Actual trainable parameters
585
+ if r > 0:
586
+ self.lora_embedding_A.update(
587
+ nn.ParameterDict({adapter_name: nn.Parameter(self.weight.new_zeros((r, self.in_features)))})
588
+ )
589
+ self.lora_embedding_B.update(
590
+ nn.ParameterDict({adapter_name: nn.Parameter(self.weight.new_zeros((self.out_features, r)))})
591
+ )
592
+ self.scaling[adapter_name] = lora_alpha / r
593
+ if init_lora_weights:
594
+ self.reset_lora_parameters(adapter_name)
595
+ self.to(self.weight.device)
596
+
597
+ def reset_lora_parameters(self, adapter_name):
598
+ if adapter_name in self.lora_A.keys():
599
+ # initialize A the same way as the default for nn.Linear and B to zero
600
+ nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5))
601
+ nn.init.zeros_(self.lora_B[adapter_name].weight)
602
+ if adapter_name in self.lora_embedding_A.keys():
603
+ # initialize a the same way as the default for nn.linear and b to zero
604
+ nn.init.zeros_(self.lora_embedding_A[adapter_name])
605
+ nn.init.normal_(self.lora_embedding_B[adapter_name])
606
+
607
+
608
+ class Linear(nn.Linear, LoraLayer):
609
+ # Lora implemented in a dense layer
610
+ def __init__(
611
+ self,
612
+ adapter_name: str,
613
+ in_features: int,
614
+ out_features: int,
615
+ r: int = 0,
616
+ lora_alpha: int = 1,
617
+ lora_dropout: float = 0.0,
618
+ fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
619
+ **kwargs,
620
+ ):
621
+ init_lora_weights = kwargs.pop("init_lora_weights", True)
622
+
623
+ nn.Linear.__init__(self, in_features, out_features, **kwargs)
624
+ LoraLayer.__init__(self, in_features=in_features, out_features=out_features)
625
+ # Freezing the pre-trained weight matrix
626
+ self.weight.requires_grad = False
627
+
628
+ self.fan_in_fan_out = fan_in_fan_out
629
+ if fan_in_fan_out:
630
+ self.weight.data = self.weight.data.T
631
+
632
+ nn.Linear.reset_parameters(self)
633
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
634
+ self.active_adapter = adapter_name
635
+
636
+ def merge(self):
637
+ if self.active_adapter not in self.lora_A.keys():
638
+ return
639
+ if self.merged:
640
+ warnings.warn("Already merged. Nothing to do.")
641
+ return
642
+ if self.r[self.active_adapter] > 0:
643
+ self.weight.data += (
644
+ transpose(
645
+ self.lora_B[self.active_adapter].weight @ self.lora_A[self.active_adapter].weight,
646
+ self.fan_in_fan_out,
647
+ )
648
+ * self.scaling[self.active_adapter]
649
+ )
650
+ self.merged = True
651
+
652
+ def unmerge(self):
653
+ if self.active_adapter not in self.lora_A.keys():
654
+ return
655
+ if not self.merged:
656
+ warnings.warn("Already unmerged. Nothing to do.")
657
+ return
658
+ if self.r[self.active_adapter] > 0:
659
+ self.weight.data -= (
660
+ transpose(
661
+ self.lora_B[self.active_adapter].weight @ self.lora_A[self.active_adapter].weight,
662
+ self.fan_in_fan_out,
663
+ )
664
+ * self.scaling[self.active_adapter]
665
+ )
666
+ self.merged = False
667
+
668
+ def forward(self, x: torch.Tensor):
669
+ previous_dtype = x.dtype
670
+ if self.active_adapter not in self.lora_A.keys():
671
+ return F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
672
+ if self.disable_adapters:
673
+ if self.r[self.active_adapter] > 0 and self.merged:
674
+ self.unmerge()
675
+ result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
676
+ elif self.r[self.active_adapter] > 0 and not self.merged:
677
+ result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
678
+
679
+ x = x.to(self.lora_A[self.active_adapter].weight.dtype)
680
+
681
+ result += (
682
+ self.lora_B[self.active_adapter](
683
+ self.lora_A[self.active_adapter](self.lora_dropout[self.active_adapter](x))
684
+ )
685
+ * self.scaling[self.active_adapter]
686
+ )
687
+ else:
688
+ result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
689
+
690
+ result = result.to(previous_dtype)
691
+
692
+ return result
693
+
694
+
695
+ class Embedding(nn.Embedding, LoraLayer):
696
+ # LoRA implemented in a Embedding layer
697
+ def __init__(
698
+ self,
699
+ adapter_name: str,
700
+ num_embeddings: int,
701
+ embedding_dim: int,
702
+ r: int = 0,
703
+ lora_alpha: int = 1,
704
+ lora_dropout: float = 0.0,
705
+ **kwargs,
706
+ ):
707
+ init_lora_weights = kwargs.pop("init_lora_weights", True)
708
+
709
+ nn.Embedding.__init__(self, num_embeddings, embedding_dim, **kwargs)
710
+ LoraLayer.__init__(self, in_features=num_embeddings, out_features=embedding_dim)
711
+
712
+ self.weight.requires_grad = False
713
+
714
+ nn.Embedding.reset_parameters(self)
715
+ self.update_layer_embedding(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
716
+ self.active_adapter = adapter_name
717
+
718
+ def unmerge(self, mode: bool = True):
719
+ if not self.merged:
720
+ warnings.warn("Already unmerged. Nothing to do.")
721
+ return
722
+ if self.r[self.active_adapter] > 0:
723
+ self.weight.data -= (
724
+ transpose(
725
+ self.lora_embedding_B[self.active_adapter] @ self.lora_embedding_A[self.active_adapter], True
726
+ )
727
+ * self.scaling[self.active_adapter]
728
+ )
729
+ self.merged = False
730
+
731
+ def merge(self):
732
+ if self.merged:
733
+ warnings.warn("Already merged. Nothing to do.")
734
+ return
735
+ if self.r[self.active_adapter] > 0:
736
+ self.weight.data += (
737
+ transpose(
738
+ self.lora_embedding_B[self.active_adapter] @ self.lora_embedding_A[self.active_adapter], True
739
+ )
740
+ * self.scaling[self.active_adapter]
741
+ )
742
+ self.merged = True
743
+
744
+ def forward(self, x: torch.Tensor):
745
+ if self.disable_adapters:
746
+ if self.r[self.active.adapter] > 0 and self.merged:
747
+ self.weight.data -= (
748
+ transpose(
749
+ self.lora_embedding_B[self.active_adapter].weight
750
+ @ self.lora_embedding_A[self.active_adapter].weight,
751
+ True,
752
+ )
753
+ * self.scaling[self.active_adapter]
754
+ )
755
+ self.merged = False
756
+ return nn.Embedding.forward(self, x)
757
+
758
+ elif self.r[self.active_adapter] > 0 and not self.merged:
759
+ result = nn.Embedding.forward(self, x)
760
+ if self.r[self.active_adapter] > 0:
761
+ after_A = F.embedding(
762
+ x,
763
+ self.lora_embedding_A[self.active_adapter].T,
764
+ self.padding_idx,
765
+ self.max_norm,
766
+ self.norm_type,
767
+ self.scale_grad_by_freq,
768
+ self.sparse,
769
+ )
770
+ result += (after_A @ self.lora_embedding_B[self.active_adapter].T) * self.scaling[self.active_adapter]
771
+ return result
772
+ else:
773
+ return nn.Embedding.forward(self, x)
774
+
775
+
776
+ class Conv2d(nn.Conv2d, LoraLayer):
777
+ # Lora implemented in a conv2d layer
778
+ def __init__(
779
+ self,
780
+ adapter_name: str,
781
+ in_channels: int,
782
+ out_channels: int,
783
+ kernel_size: Union[int, Tuple[int]],
784
+ stride: Union[int, Tuple[int]] = 1,
785
+ padding: Union[int, Tuple[int]] = 0,
786
+ r: int = 0,
787
+ lora_alpha: int = 1,
788
+ lora_dropout: float = 0.0,
789
+ **kwargs,
790
+ ):
791
+ init_lora_weights = kwargs.pop("init_lora_weights", True)
792
+
793
+ nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, stride, padding)
794
+ LoraLayer.__init__(
795
+ self,
796
+ in_features=in_channels,
797
+ out_features=out_channels,
798
+ kernel_size=kernel_size,
799
+ stride=stride,
800
+ padding=padding,
801
+ )
802
+ # Freezing the pre-trained weight matrix
803
+ self.weight.requires_grad = False
804
+
805
+ nn.Conv2d.reset_parameters(self)
806
+ self.update_layer_conv2d(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
807
+ self.active_adapter = adapter_name
808
+
809
+ def merge(self):
810
+ if self.active_adapter not in self.lora_A.keys():
811
+ return
812
+ if self.merged:
813
+ warnings.warn("Already merged. Nothing to do.")
814
+ return
815
+ if self.r[self.active_adapter] > 0:
816
+ # https://github.com/bmaltais/kohya_ss/blob/feb6728762a8f463d15ba936d189d4c3abfaa1ab/networks/lora.py#L117
817
+ if self.weight.size()[2:4] == (1, 1):
818
+ # conv2d 1x1
819
+ self.weight.data += (
820
+ self.lora_B[self.active_adapter].weight.squeeze(3).squeeze(2)
821
+ @ self.lora_A[self.active_adapter].weight.squeeze(3).squeeze(2)
822
+ ).unsqueeze(2).unsqueeze(3) * self.scaling[self.active_adapter]
823
+ else:
824
+ # conv2d 3x3
825
+ self.weight.data += (
826
+ F.conv2d(
827
+ self.lora_A[self.active_adapter].weight.permute(1, 0, 2, 3),
828
+ self.lora_B[self.active_adapter].weight,
829
+ ).permute(1, 0, 2, 3)
830
+ * self.scaling[self.active_adapter]
831
+ )
832
+ self.merged = True
833
+
834
+ def unmerge(self):
835
+ if self.active_adapter not in self.lora_A.keys():
836
+ return
837
+ if not self.merged:
838
+ warnings.warn("Already unmerged. Nothing to do.")
839
+ return
840
+ if self.r[self.active_adapter] > 0:
841
+ if self.weight.size()[2:4] == (1, 1):
842
+ # conv2d 1x1
843
+ self.weight.data -= (
844
+ self.lora_B[self.active_adapter].weight.squeeze(3).squeeze(2)
845
+ @ self.lora_A[self.active_adapter].weight.squeeze(3).squeeze(2)
846
+ ).unsqueeze(2).unsqueeze(3) * self.scaling[self.active_adapter]
847
+ else:
848
+ # conv2d 3x3
849
+ self.weight.data += (
850
+ F.conv2d(
851
+ self.lora_A[self.active_adapter].weight.permute(1, 0, 2, 3),
852
+ self.lora_B[self.active_adapter].weight,
853
+ ).permute(1, 0, 2, 3)
854
+ * self.scaling[self.active_adapter]
855
+ )
856
+ self.merged = False
857
+
858
+ def forward(self, x: torch.Tensor):
859
+ previous_dtype = x.dtype
860
+
861
+ if self.active_adapter not in self.lora_A.keys():
862
+ return F.conv2d(
863
+ x,
864
+ self.weight,
865
+ bias=self.bias,
866
+ stride=self.stride,
867
+ padding=self.padding,
868
+ dilation=self.dilation,
869
+ groups=self.groups,
870
+ )
871
+ if self.disable_adapters:
872
+ if self.r[self.active_adapter] > 0 and self.merged:
873
+ self.unmerge()
874
+ result = F.conv2d(
875
+ x,
876
+ self.weight,
877
+ bias=self.bias,
878
+ stride=self.stride,
879
+ padding=self.padding,
880
+ dilation=self.dilation,
881
+ groups=self.groups,
882
+ )
883
+ elif self.r[self.active_adapter] > 0 and not self.merged:
884
+ result = F.conv2d(
885
+ x,
886
+ self.weight,
887
+ bias=self.bias,
888
+ stride=self.stride,
889
+ padding=self.padding,
890
+ dilation=self.dilation,
891
+ groups=self.groups,
892
+ )
893
+
894
+ x = x.to(self.lora_A[self.active_adapter].weight.dtype)
895
+
896
+ result += (
897
+ self.lora_B[self.active_adapter](
898
+ self.lora_A[self.active_adapter](self.lora_dropout[self.active_adapter](x))
899
+ )
900
+ * self.scaling[self.active_adapter]
901
+ )
902
+ else:
903
+ result = F.conv2d(
904
+ x,
905
+ self.weight,
906
+ bias=self.bias,
907
+ stride=self.stride,
908
+ padding=self.padding,
909
+ dilation=self.dilation,
910
+ groups=self.groups,
911
+ )
912
+
913
+ result = result.to(previous_dtype)
914
+
915
+ return result
916
+
917
+
918
+ if is_bnb_available():
919
+
920
+ class Linear8bitLt(bnb.nn.Linear8bitLt, LoraLayer):
921
+ # Lora implemented in a dense layer
922
+ def __init__(
923
+ self,
924
+ adapter_name,
925
+ in_features,
926
+ out_features,
927
+ r: int = 0,
928
+ lora_alpha: int = 1,
929
+ lora_dropout: float = 0.0,
930
+ **kwargs,
931
+ ):
932
+ bnb.nn.Linear8bitLt.__init__(
933
+ self,
934
+ in_features,
935
+ out_features,
936
+ bias=kwargs.get("bias", True),
937
+ has_fp16_weights=kwargs.get("has_fp16_weights", True),
938
+ memory_efficient_backward=kwargs.get("memory_efficient_backward", False),
939
+ threshold=kwargs.get("threshold", 0.0),
940
+ index=kwargs.get("index", None),
941
+ )
942
+ LoraLayer.__init__(self, in_features=in_features, out_features=out_features)
943
+
944
+ # Freezing the pre-trained weight matrix
945
+ self.weight.requires_grad = False
946
+ init_lora_weights = kwargs.pop("init_lora_weights", True)
947
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
948
+ self.active_adapter = adapter_name
949
+
950
+ def forward(self, x: torch.Tensor):
951
+ result = super().forward(x)
952
+
953
+ if self.disable_adapters or self.active_adapter not in self.lora_A.keys():
954
+ return result
955
+ elif self.r[self.active_adapter] > 0:
956
+ if not torch.is_autocast_enabled():
957
+ expected_dtype = result.dtype
958
+
959
+ if x.dtype != torch.float32:
960
+ x = x.float()
961
+ output = (
962
+ self.lora_B[self.active_adapter](
963
+ self.lora_A[self.active_adapter](self.lora_dropout[self.active_adapter](x))
964
+ ).to(expected_dtype)
965
+ * self.scaling[self.active_adapter]
966
+ )
967
+ else:
968
+ output = (
969
+ self.lora_B[self.active_adapter](
970
+ self.lora_A[self.active_adapter](self.lora_dropout[self.active_adapter](x))
971
+ )
972
+ * self.scaling[self.active_adapter]
973
+ )
974
+ result += output
975
+ return result
976
+
977
+ if is_bnb_4bit_available():
978
+
979
+ class Linear4bit(bnb.nn.Linear4bit, LoraLayer):
980
+ # Lora implemented in a dense layer
981
+ def __init__(
982
+ self,
983
+ adapter_name,
984
+ in_features,
985
+ out_features,
986
+ r: int = 0,
987
+ lora_alpha: int = 1,
988
+ lora_dropout: float = 0.0,
989
+ **kwargs,
990
+ ):
991
+ bnb.nn.Linear4bit.__init__(
992
+ self,
993
+ in_features,
994
+ out_features,
995
+ bias=kwargs.get("bias", True),
996
+ compute_dtype=kwargs.get("compute_dtype", torch.float32),
997
+ compress_statistics=kwargs.get("compress_statistics", True),
998
+ quant_type=kwargs.get("quant_type", "nf4"),
999
+ )
1000
+ LoraLayer.__init__(self, in_features=in_features, out_features=out_features)
1001
+
1002
+ # Freezing the pre-trained weight matrix
1003
+ self.weight.requires_grad = False
1004
+
1005
+ init_lora_weights = kwargs.pop("init_lora_weights", True)
1006
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
1007
+ self.active_adapter = adapter_name
1008
+
1009
+ def forward(self, x: torch.Tensor):
1010
+ result = super().forward(x)
1011
+
1012
+ if self.disable_adapters or self.active_adapter not in self.lora_A.keys():
1013
+ return result
1014
+ elif self.r[self.active_adapter] > 0:
1015
+ result = result.clone()
1016
+ if not torch.is_autocast_enabled():
1017
+ expected_dtype = result.dtype
1018
+ x = x.to(self.lora_A[self.active_adapter].weight.dtype)
1019
+ output = (
1020
+ self.lora_B[self.active_adapter](
1021
+ self.lora_A[self.active_adapter](self.lora_dropout[self.active_adapter](x))
1022
+ ).to(expected_dtype)
1023
+ * self.scaling[self.active_adapter]
1024
+ )
1025
+ else:
1026
+ output = (
1027
+ self.lora_B[self.active_adapter](
1028
+ self.lora_A[self.active_adapter](self.lora_dropout[self.active_adapter](x))
1029
+ )
1030
+ * self.scaling[self.active_adapter]
1031
+ )
1032
+ result += output
1033
+ return result
model/peft/tuners/moelora.py ADDED
@@ -0,0 +1,1164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import traceback
17
+ import pickle
18
+ import math
19
+ import re
20
+ import warnings
21
+ from dataclasses import asdict, dataclass, field
22
+ from enum import Enum
23
+ from typing import List, Optional, Tuple, Union
24
+ import itertools
25
+ import copy
26
+ import numpy as np
27
+
28
+ import torch
29
+ import torch.nn as nn
30
+ import torch.nn.functional as F
31
+ from transformers.pytorch_utils import Conv1D
32
+ from .gating import GATING_TO_MODEL_MAPPING
33
+
34
+ from ..import_utils import is_bnb_4bit_available, is_bnb_available
35
+ from ..utils import (
36
+ COMMON_LAYERS_PATTERN,
37
+ TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING,
38
+ ModulesToSaveWrapper,
39
+ PeftConfig,
40
+ PeftType,
41
+ _freeze_adapter,
42
+ _get_submodules,
43
+ transpose,
44
+ )
45
+
46
+ if is_bnb_available():
47
+ import bitsandbytes as bnb
48
+
49
+
50
+ @dataclass
51
+ class MoeLoraConfig(PeftConfig):
52
+ """
53
+ This is the configuration class to store the configuration of a [`MoeLoraModel`].
54
+
55
+ Args:
56
+ r (`int`): Lora attention dimension.
57
+ target_modules (`Union[List[str],str]`): The names of the modules to apply Lora to.
58
+ lora_alpha (`int`): The alpha parameter for Lora scaling.
59
+ lora_dropout (`float`): The dropout probability for Lora layers.
60
+ fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out).
61
+ For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.:
62
+ bias (`str`): Bias type for Lora. Can be 'none', 'all' or 'lora_only'
63
+ modules_to_save (`List[str]`):List of modules apart from LoRA layers to be set as trainable
64
+ and saved in the final checkpoint.
65
+ layers_to_transform (`Union[List[int],int]`):
66
+ The layer indexes to transform, if this argument is specified, it will apply the LoRA transformations on
67
+ the layer indexes that are specified in this list. If a single integer is passed, it will apply the LoRA
68
+ transformations on the layer at this index.
69
+ layers_pattern (`str`):
70
+ The layer pattern name, used only if `layers_to_transform` is different from `None` and if the layer
71
+ pattern is not in the common layers pattern.
72
+ """
73
+
74
+ r: int = field(default=8, metadata={"help": "Lora attention dimension"})
75
+ num_moe: int = field(default=8, metadata={"help": "Num experts of MoeLora"})
76
+ gating: str = field(default="Standard", metadata={"help": "Select the gating network for each MoeLora"})
77
+ target_modules: Optional[Union[List[str], str]] = field(
78
+ default=None,
79
+ metadata={
80
+ "help": "List of module names or regex expression of the module names to replace with Lora."
81
+ "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "
82
+ },
83
+ )
84
+ lora_alpha: int = field(default=8, metadata={"help": "Lora alpha"})
85
+ lora_dropout: float = field(default=0.0, metadata={"help": "Lora dropout"})
86
+ fan_in_fan_out: bool = field(
87
+ default=False,
88
+ metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"},
89
+ )
90
+ bias: str = field(default="none", metadata={"help": "Bias type for Lora. Can be 'none', 'all' or 'lora_only'"})
91
+ modules_to_save: Optional[List[str]] = field(
92
+ default=None,
93
+ metadata={
94
+ "help": "List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. "
95
+ "For example, in Sequence Classification or Token Classification tasks, "
96
+ "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
97
+ },
98
+ )
99
+ init_lora_weights: bool = field(
100
+ default=True,
101
+ metadata={"help": "Whether to initialize the weights of the Lora layers."},
102
+ )
103
+ layers_to_transform: Optional[Union[List, int]] = field(
104
+ default=None,
105
+ metadata={
106
+ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index."
107
+ },
108
+ )
109
+ layers_pattern: Optional[str] = field(
110
+ default=None,
111
+ metadata={
112
+ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern."
113
+ },
114
+ )
115
+
116
+ def __post_init__(self):
117
+ self.peft_type = PeftType.MOELORA
118
+
119
+
120
+ class MoeLoraModel(torch.nn.Module):
121
+ """
122
+ Creates Low Rank Adapter (Lora) model from a pretrained transformers model.
123
+
124
+ Args:
125
+ model ([`~transformers.PreTrainedModel`]): The model to be adapted.
126
+ config ([`LoraConfig`]): The configuration of the Lora model.
127
+
128
+ Returns:
129
+ `torch.nn.Module`: The Lora model.
130
+
131
+ Example:
132
+
133
+ ```py
134
+ >>> from transformers import AutoModelForSeq2SeqLM
135
+ >>> from moelora import MoeLoraModel, MoeLoraConfig
136
+
137
+ >>> config = LoraConfig(
138
+ ... peft_type="LORA",
139
+ ... task_type="SEQ_2_SEQ_LM",
140
+ ... r=8,
141
+ ... lora_alpha=32,
142
+ ... target_modules=["q", "v"],
143
+ ... lora_dropout=0.01,
144
+ ... )
145
+
146
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
147
+ >>> lora_model = MoeLoraModel(config, model)
148
+ ```
149
+
150
+ ```py
151
+ >>> import transformers
152
+ >>> from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_int8_training
153
+
154
+ >>> target_modules = ["q_proj", "k_proj", "v_proj", "out_proj", "fc_in", "fc_out", "wte"]
155
+ >>> config = LoraConfig(
156
+ ... r=4, lora_alpha=16, target_modules=target_modules, lora_dropout=0.1, bias="none", task_type="CAUSAL_LM"
157
+ ... )
158
+
159
+ >>> model = transformers.GPTJForCausalLM.from_pretrained(
160
+ ... "kakaobrain/kogpt",
161
+ ... revision="KoGPT6B-ryan1.5b-float16", # or float32 version: revision=KoGPT6B-ryan1.5b
162
+ ... pad_token_id=tokenizer.eos_token_id,
163
+ ... use_cache=False,
164
+ ... device_map={"": rank},
165
+ ... torch_dtype=torch.float16,
166
+ ... load_in_8bit=True,
167
+ ... )
168
+ >>> model = prepare_model_for_int8_training(model)
169
+ >>> lora_model = get_peft_model(model, config)
170
+ ```
171
+
172
+ **Attributes**:
173
+ - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
174
+ - **peft_config** ([`LoraConfig`]): The configuration of the Lora model.
175
+ """
176
+
177
+ def __init__(self, model, config, adapter_name):
178
+ super().__init__()
179
+ self.model = model
180
+
181
+ # self.forward = self.model.forward
182
+
183
+
184
+ self.self_model_generate = self.model.generate
185
+ self.model.generate = self.generate
186
+
187
+ """self.self_model_forward = self.model.forward
188
+ self.model.forward = self.forward"""
189
+
190
+ self.peft_config = config
191
+ self.global_user_embeds = []
192
+ self.gate_weights = []
193
+ self.add_adapter(adapter_name, self.peft_config[adapter_name])
194
+
195
+ def forward(self, **kwargs):
196
+ self.global_user_embeds.clear()
197
+ user_embeds = kwargs['user_embeds']
198
+ self.global_user_embeds.extend([user_embeds])
199
+
200
+ self.gate_weights.clear()
201
+ gate_weights = kwargs['gate_weights']
202
+ self.gate_weights.extend([gate_weights])
203
+
204
+ return self.model(**kwargs)
205
+ # return self.self_model_forward(**kwargs)
206
+
207
+ def generate(self, **kwargs):
208
+ self.global_user_embeds.clear()
209
+ user_embeds = kwargs['user_embeds']
210
+ self.global_user_embeds.extend([user_embeds])
211
+
212
+ self.gate_weights.clear()
213
+ gate_weights = kwargs['gate_weights']
214
+ # print("generate, gate_weights:", gate_weights)
215
+ self.gate_weights.extend([gate_weights])
216
+
217
+ return self.self_model_generate(**kwargs)
218
+
219
+ def add_adapter(self, adapter_name, config=None):
220
+ if config is not None:
221
+ model_config = self.model.config.to_dict() if hasattr(self.model.config, "to_dict") else self.model.config
222
+ config = self._prepare_moelora_config(config, model_config)
223
+ self.peft_config[adapter_name] = config
224
+ self._find_and_replace(adapter_name)
225
+ if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != "none":
226
+ raise ValueError(
227
+ "MoeLoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters."
228
+ )
229
+ mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)
230
+ if self.peft_config[adapter_name].inference_mode:
231
+ _freeze_adapter(self.model, adapter_name)
232
+
233
+ def _check_quantization_dependency(self):
234
+ loaded_in_4bit = getattr(self.model, "is_loaded_in_4bit", False)
235
+ loaded_in_8bit = getattr(self.model, "is_loaded_in_8bit", False)
236
+ if (loaded_in_4bit or loaded_in_8bit) and not is_bnb_available():
237
+ raise ImportError(
238
+ "To use Lora with 8-bit or 4-bit quantization, please install the `bitsandbytes` package. "
239
+ "You can install it with `pip install bitsandbytes`."
240
+ )
241
+
242
+ def _check_target_module_exists(self, moelora_config, key):
243
+ if isinstance(moelora_config.target_modules, str):
244
+ target_module_found = re.fullmatch(moelora_config.target_modules, key)
245
+ else:
246
+ target_module_found = any(key.endswith(target_key) for target_key in moelora_config.target_modules)
247
+ is_using_layer_indexes = getattr(moelora_config, "layers_to_transform", None) is not None
248
+ layer_indexing_pattern = getattr(moelora_config, "layers_pattern", None)
249
+
250
+ if is_using_layer_indexes and target_module_found:
251
+ layers_pattern = COMMON_LAYERS_PATTERN if layer_indexing_pattern is None else layer_indexing_pattern
252
+ layers_pattern = [layers_pattern] if isinstance(layers_pattern, str) else layers_pattern
253
+
254
+ for pattern in layers_pattern:
255
+ layer_index = re.match(f".*.{pattern}\.(\d+)\.*", key)
256
+ if layer_index is not None:
257
+ layer_index = int(layer_index.group(1))
258
+ if isinstance(moelora_config.layers_to_transform, int):
259
+ target_module_found = layer_index == moelora_config.layers_to_transform
260
+ else:
261
+ target_module_found = layer_index in moelora_config.layers_to_transform
262
+
263
+ break
264
+ else:
265
+ target_module_found = False
266
+ return target_module_found
267
+
268
+ def _create_new_module(self, moelora_config, adapter_name, target, **kwargs):
269
+ bias = hasattr(target, "bias") and target.bias is not None
270
+ kwargs = {
271
+ "r": moelora_config.r,
272
+
273
+ "num_moe": moelora_config.num_moe,
274
+ "gating": moelora_config.gating,
275
+ "global_user_embeds": self.global_user_embeds,
276
+
277
+ "gate_weights": self.gate_weights,
278
+
279
+ "lora_alpha": moelora_config.lora_alpha,
280
+ "lora_dropout": moelora_config.lora_dropout,
281
+ "fan_in_fan_out": moelora_config.fan_in_fan_out,
282
+ "init_lora_weights": moelora_config.init_lora_weights,
283
+ }
284
+ loaded_in_4bit = getattr(self.model, "is_loaded_in_4bit", False)
285
+ loaded_in_8bit = getattr(self.model, "is_loaded_in_8bit", False)
286
+
287
+ if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):
288
+ eightbit_kwargs = kwargs.copy()
289
+ eightbit_kwargs.update(
290
+ {
291
+ "has_fp16_weights": target.state.has_fp16_weights,
292
+ "memory_efficient_backward": target.state.memory_efficient_backward,
293
+ "threshold": target.state.threshold,
294
+ "index": target.index,
295
+ }
296
+ )
297
+ eightbit_kwargs.pop('fan_in_fan_out', None)
298
+ eightbit_kwargs.pop('init_lora_weights', None)
299
+ new_module = Linear8bitLt(
300
+ adapter_name, target.in_features, target.out_features, bias=bias, **eightbit_kwargs
301
+ )
302
+ elif loaded_in_4bit and is_bnb_4bit_available() and isinstance(target, bnb.nn.Linear4bit):
303
+ fourbit_kwargs = kwargs.copy()
304
+ fourbit_kwargs.update(
305
+ {
306
+ "compute_dtype": target.compute_dtype,
307
+ "compress_statistics": target.weight.compress_statistics,
308
+ "quant_type": target.weight.quant_type,
309
+ }
310
+ )
311
+ new_module = Linear4bit(adapter_name, target.in_features, target.out_features, bias=bias, **fourbit_kwargs)
312
+ elif isinstance(target, torch.nn.Embedding):
313
+ embedding_kwargs = kwargs.copy()
314
+ embedding_kwargs.pop("fan_in_fan_out", None)
315
+ in_features, out_features = target.num_embeddings, target.embedding_dim
316
+ new_module = Embedding(adapter_name, in_features, out_features, **embedding_kwargs)
317
+ elif isinstance(target, torch.nn.Conv2d):
318
+ out_channels, in_channels = target.weight.size()[:2]
319
+ kernel_size = target.weight.size()[2:]
320
+ stride = target.stride
321
+ padding = target.padding
322
+ new_module = Conv2d(adapter_name, in_channels, out_channels, kernel_size, stride, padding, **kwargs)
323
+ else:
324
+ if isinstance(target, torch.nn.Linear):
325
+ in_features, out_features = target.in_features, target.out_features
326
+ if kwargs["fan_in_fan_out"]:
327
+ warnings.warn(
328
+ "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
329
+ "Setting fan_in_fan_out to False."
330
+ )
331
+ kwargs["fan_in_fan_out"] = moelora_config.fan_in_fan_out = False
332
+ elif isinstance(target, Conv1D):
333
+ in_features, out_features = (
334
+ target.weight.ds_shape if hasattr(target.weight, "ds_shape") else target.weight.shape
335
+ )
336
+ if not kwargs["fan_in_fan_out"]:
337
+ warnings.warn(
338
+ "fan_in_fan_out is set to False but the target module is `Conv1D`. "
339
+ "Setting fan_in_fan_out to True."
340
+ )
341
+ kwargs["fan_in_fan_out"] = moelora_config.fan_in_fan_out = True
342
+ else:
343
+ raise ValueError(
344
+ f"Target module {target} is not supported. "
345
+ f"Currently, only `torch.nn.Linear` and `Conv1D` are supported."
346
+ )
347
+ new_module = Linear(adapter_name, in_features, out_features, bias=bias, **kwargs)
348
+
349
+ return new_module
350
+
351
+ def _find_and_replace(self, adapter_name):
352
+ moelora_config = self.peft_config[adapter_name]
353
+ self._check_quantization_dependency()
354
+ is_target_modules_in_base_model = False
355
+ key_list = [key for key, _ in self.model.named_modules()]
356
+
357
+ for key in key_list:
358
+ if not self._check_target_module_exists(moelora_config, key):
359
+ continue
360
+
361
+ is_target_modules_in_base_model = True
362
+ parent, target, target_name = _get_submodules(self.model, key)
363
+
364
+ if isinstance(target, MoeLoraLayer) and isinstance(target, torch.nn.Conv2d):
365
+ target.update_layer_conv2d(
366
+ adapter_name,
367
+ moelora_config.r,
368
+ moelora_config.lora_alpha,
369
+ moelora_config.lora_dropout,
370
+ moelora_config.init_lora_weights,
371
+ )
372
+ elif isinstance(target, MoeLoraLayer):
373
+ target.update_layer(
374
+ adapter_name,
375
+ moelora_config.r,
376
+ moelora_config.num_moe,
377
+ moelora_config.gating,
378
+ moelora_config.lora_alpha,
379
+ moelora_config.lora_dropout,
380
+ moelora_config.init_lora_weights,
381
+ )
382
+ else:
383
+ new_module = self._create_new_module(moelora_config, adapter_name, target)
384
+ self._replace_module(parent, target_name, new_module, target)
385
+
386
+ if not is_target_modules_in_base_model:
387
+ raise ValueError(
388
+ f"Target modules {moelora_config.target_modules} not found in the base model. "
389
+ f"Please check the target modules and try again."
390
+ )
391
+
392
+ def _replace_module(self, parent_module, child_name, new_module, old_module):
393
+ setattr(parent_module, child_name, new_module)
394
+ new_module.weight = old_module.weight
395
+ if hasattr(old_module, "bias"):
396
+ if old_module.bias is not None:
397
+ new_module.bias = old_module.bias
398
+
399
+ if getattr(old_module, "state", None) is not None:
400
+ new_module.state = old_module.state
401
+ new_module.to(old_module.weight.device)
402
+
403
+ # dispatch to correct device
404
+ for name, module in new_module.named_modules():
405
+ if "lora_" in name:
406
+ module.to(old_module.weight.device)
407
+
408
+ if "gating" in name:
409
+ module.to(old_module.weight.device)
410
+
411
+ if "ranknum" in name:
412
+ module.to(old_module.weight.device)
413
+
414
+ def __getattr__(self, name: str):
415
+ """Forward missing attributes to the wrapped module."""
416
+ try:
417
+ return super().__getattr__(name) # defer to nn.Module's logic
418
+ except AttributeError:
419
+ return getattr(self.model, name)
420
+
421
+ def get_peft_config_as_dict(self, inference: bool = False):
422
+ config_dict = {}
423
+ for key, value in self.peft_config.items():
424
+ config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}
425
+ if inference:
426
+ config["inference_mode"] = True
427
+ config_dict[key] = config
428
+ return config
429
+
430
+ def _set_adapter_layers(self, enabled=True):
431
+ for module in self.model.modules():
432
+ if isinstance(module, MoeLoraLayer):
433
+ module.disable_adapters = False if enabled else True
434
+
435
+ def enable_adapter_layers(self):
436
+ self._set_adapter_layers(enabled=True)
437
+
438
+ def disable_adapter_layers(self):
439
+ self._set_adapter_layers(enabled=False)
440
+
441
+ def set_adapter(self, adapter_name):
442
+ for module in self.model.modules():
443
+ if isinstance(module, MoeLoraLayer):
444
+ if module.merged:
445
+ warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
446
+ module.unmerge()
447
+ module.active_adapter = adapter_name
448
+
449
+ def merge_adapter(self):
450
+ for module in self.model.modules():
451
+ if isinstance(module, MoeLoraLayer):
452
+ module.merge()
453
+
454
+ def unmerge_adapter(self):
455
+ for module in self.model.modules():
456
+ if isinstance(module, MoeLoraLayer):
457
+ module.unmerge()
458
+
459
+ @staticmethod
460
+ def _prepare_moelora_config(peft_config, model_config):
461
+ if peft_config.target_modules is None:
462
+ if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:
463
+ raise ValueError("Please specify `target_modules` in `peft_config`")
464
+ peft_config.target_modules = TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]]
465
+ return peft_config
466
+
467
+ def merge_and_unload(self):
468
+ r"""
469
+ This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model
470
+ as a standalone model.
471
+ """
472
+ if getattr(self.config, "model_type", None) == "gpt2":
473
+ raise ValueError("GPT2 models are not supported for merging LORA layers")
474
+
475
+ if getattr(self.model, "is_loaded_in_8bit", False) or getattr(self.model, "is_loaded_in_4bit", False):
476
+ raise ValueError("Cannot merge LORA layers when the model is loaded in 8-bit mode")
477
+
478
+ key_list = [key for key, _ in self.model.named_modules() if "lora" not in key]
479
+ for key in key_list:
480
+ try:
481
+ parent, target, target_name = _get_submodules(self.model, key)
482
+ except AttributeError:
483
+ continue
484
+ if isinstance(target, MoeLoraLayer):
485
+ if isinstance(target, nn.Embedding):
486
+ new_module = torch.nn.Embedding(target.in_features, target.out_features)
487
+ elif isinstance(target, nn.Conv2d):
488
+ new_module = torch.nn.Conv2d(
489
+ target.in_channels,
490
+ target.out_channels,
491
+ kernel_size=target.kernel_size,
492
+ stride=target.stride,
493
+ padding=target.padding,
494
+ dilation=target.dilation,
495
+ )
496
+ else:
497
+ bias = target.bias is not None
498
+ new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)
499
+ target.merge()
500
+ self._replace_module(parent, target_name, new_module, target)
501
+
502
+ # save any additional trainable modules part of `modules_to_save`
503
+ if isinstance(target, ModulesToSaveWrapper):
504
+ setattr(parent, target_name, target.modules_to_save[target.active_adapter])
505
+
506
+ return self.model
507
+
508
+ def add_weighted_adapter(self, adapters, weights, adapter_name):
509
+ if len({self.peft_config[adapter].r for adapter in adapters}) != 1:
510
+ raise ValueError("All adapters must have the same r value")
511
+ self.peft_config[adapter_name] = self.peft_config[adapters[0]]
512
+ self.peft_config[adapter_name].lora_alpha = self.peft_config[adapters[0]].r
513
+ self._find_and_replace(adapter_name)
514
+ mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)
515
+ _freeze_adapter(self.model, adapter_name)
516
+ key_list = [key for key, _ in self.model.named_modules() if "lora" not in key]
517
+ for key in key_list:
518
+ _, target, _ = _get_submodules(self.model, key)
519
+ if isinstance(target, MoeLoraLayer):
520
+ if adapter_name in target.lora_A:
521
+ target.lora_A[adapter_name].weight.data = target.lora_A[adapter_name].weight.data * 0.0
522
+ target.lora_B[adapter_name].weight.data = target.lora_B[adapter_name].weight.data * 0.0
523
+ for adapter, weight in zip(adapters, weights):
524
+ if adapter not in target.lora_A:
525
+ continue
526
+ target.lora_A[adapter_name].weight.data += (
527
+ target.lora_A[adapter].weight.data * weight * target.scaling[adapter]
528
+ )
529
+ target.lora_B[adapter_name].weight.data += target.lora_B[adapter].weight.data * weight
530
+
531
+ elif adapter_name in target.lora_embedding_A:
532
+ target.lora_embedding_A[adapter_name].data = target.lora_embedding_A[adapter_name].data * 0.0
533
+ target.lora_embedding_B[adapter_name].data = target.lora_embedding_B[adapter_name].data * 0.0
534
+ for adapter, weight in zip(adapters, weights):
535
+ if adapter not in target.lora_embedding_A:
536
+ continue
537
+ target.lora_embedding_A[adapter_name].data += (
538
+ target.lora_embedding_A[adapter].data * weight * target.scaling[adapter]
539
+ )
540
+ target.lora_embedding_B[adapter_name].data += target.lora_embedding_B[adapter].data * weight
541
+
542
+
543
+ # Below code is based on https://github.com/microsoft/LoRA/blob/main/loralib/layers.py
544
+ # and modified to work with PyTorch FSDP
545
+
546
+
547
+ # ------------------------------------------------------------------------------------------
548
+ # Copyright (c) Microsoft Corporation. All rights reserved.
549
+ # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
550
+ # ------------------------------------------------------------------------------------------
551
+
552
+
553
+ # had to adapt it for `lora_only` to work
554
+ def mark_only_lora_as_trainable(model: nn.Module, bias: str = "none") -> None:
555
+ for n, p in model.named_parameters():
556
+ if "lora_" not in n and "gating" not in n:
557
+ p.requires_grad = False
558
+ if bias == "none":
559
+ return
560
+ elif bias == "all":
561
+ for n, p in model.named_parameters():
562
+ if "bias" in n:
563
+ p.requires_grad = True
564
+ elif bias == "lora_only":
565
+ for m in model.modules():
566
+ if isinstance(m, MoeLoraLayer) and hasattr(m, "bias") and m.bias is not None:
567
+ m.bias.requires_grad = True
568
+ else:
569
+ raise NotImplementedError
570
+
571
+
572
+ class MoeLoraLayer:
573
+ def __init__(self, in_features: int, out_features: int, **kwargs):
574
+ self.r = {}
575
+
576
+ self.num_moe = {}
577
+
578
+ self.lora_alpha = {}
579
+ self.scaling = {}
580
+ self.lora_dropout = nn.ModuleDict({})
581
+ self.lora_A = nn.ModuleDict({})
582
+ self.lora_B = nn.ModuleDict({})
583
+
584
+ self.gating = nn.ModuleDict({})
585
+
586
+ # For Embedding layer
587
+ self.lora_embedding_A = nn.ParameterDict({})
588
+ self.lora_embedding_B = nn.ParameterDict({})
589
+ # Mark the weight as unmerged
590
+ self.merged = False
591
+ self.disable_adapters = False
592
+ self.in_features = in_features
593
+ self.out_features = out_features
594
+ self.kwargs = kwargs
595
+
596
+ def update_layer(self, adapter_name, r, num_moe, gating, lora_alpha, lora_dropout,
597
+ init_lora_weights):
598
+ self.r[adapter_name] = r
599
+
600
+ self.num_moe[adapter_name] = num_moe
601
+
602
+ self.lora_alpha[adapter_name] = lora_alpha
603
+
604
+ self.gating.update(
605
+ nn.ModuleDict({adapter_name: GATING_TO_MODEL_MAPPING[gating](num_moe=num_moe, dim=self.in_features)}))
606
+ self.gating[adapter_name].to(self.weight.device)
607
+
608
+ if lora_dropout > 0.0:
609
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
610
+ else:
611
+ lora_dropout_layer = nn.Identity()
612
+
613
+ self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
614
+ # Actual trainable parameters
615
+ if r > 0:
616
+ self.lora_A.update(nn.ModuleDict({adapter_name: nn.Linear(self.in_features, r, bias=False)})) # TODO
617
+ self.lora_B.update(nn.ModuleDict({adapter_name: nn.Linear(r, self.out_features, bias=False)}))
618
+ self.scaling[adapter_name] = lora_alpha / (r // num_moe)
619
+ if init_lora_weights:
620
+ self.reset_lora_parameters(adapter_name)
621
+ self.to(self.weight.device)
622
+
623
+ def update_layer_conv2d(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):
624
+ self.r[adapter_name] = r
625
+ self.lora_alpha[adapter_name] = lora_alpha
626
+ if lora_dropout > 0.0:
627
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
628
+ else:
629
+ lora_dropout_layer = nn.Identity()
630
+
631
+ self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
632
+ # Actual trainable parameters
633
+ if r > 0:
634
+ kernel_size = self.kwargs["kernel_size"]
635
+ stride = self.kwargs["stride"]
636
+ padding = self.kwargs["padding"]
637
+ self.lora_A.update(
638
+ nn.ModuleDict({adapter_name: nn.Conv2d(self.in_features, r, kernel_size, stride, padding, bias=False)})
639
+ )
640
+ self.lora_B.update(
641
+ nn.ModuleDict({adapter_name: nn.Conv2d(r, self.out_features, (1, 1), (1, 1), bias=False)})
642
+ )
643
+ self.scaling[adapter_name] = lora_alpha / r
644
+ if init_lora_weights:
645
+ self.reset_lora_parameters(adapter_name)
646
+ self.to(self.weight.device)
647
+
648
+ def update_layer_embedding(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):
649
+ self.r[adapter_name] = r
650
+ self.lora_alpha[adapter_name] = lora_alpha
651
+ if lora_dropout > 0.0:
652
+ lora_dropout_layer = nn.Dropout(p=lora_dropout)
653
+ else:
654
+ lora_dropout_layer = nn.Identity()
655
+
656
+ self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
657
+ # Actual trainable parameters
658
+ if r > 0:
659
+ self.lora_embedding_A.update(
660
+ nn.ParameterDict({adapter_name: nn.Parameter(self.weight.new_zeros((r, self.in_features)))})
661
+ )
662
+ self.lora_embedding_B.update(
663
+ nn.ParameterDict({adapter_name: nn.Parameter(self.weight.new_zeros((self.out_features, r)))})
664
+ )
665
+ self.scaling[adapter_name] = lora_alpha / r
666
+ if init_lora_weights:
667
+ self.reset_lora_parameters(adapter_name)
668
+ self.to(self.weight.device)
669
+
670
+ def reset_lora_parameters(self, adapter_name):
671
+ if adapter_name in self.lora_A.keys():
672
+ # initialize A the same way as the default for nn.Linear and B to zero
673
+ nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5))
674
+ nn.init.zeros_(self.lora_B[adapter_name].weight)
675
+ if adapter_name in self.lora_embedding_A.keys():
676
+ # initialize a the same way as the default for nn.linear and b to zero
677
+ nn.init.zeros_(self.lora_embedding_A[adapter_name])
678
+ nn.init.normal_(self.lora_embedding_B[adapter_name])
679
+
680
+
681
+ class Linear(nn.Linear, MoeLoraLayer):
682
+ # Lora implemented in a dense layer
683
+ def __init__(
684
+ self,
685
+ adapter_name: str,
686
+ in_features: int,
687
+ out_features: int,
688
+ r: int = 0,
689
+
690
+ # moe额外参数
691
+ num_moe: int = 0,
692
+ gating: str = "",
693
+ global_user_embeds: List = [],
694
+ gate_weights: List = [],
695
+
696
+ lora_alpha: int = 1,
697
+ lora_dropout: float = 0.0,
698
+ fan_in_fan_out: bool = False,
699
+ # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
700
+ **kwargs,
701
+ ):
702
+ init_lora_weights = kwargs.pop("init_lora_weights", True)
703
+
704
+ nn.Linear.__init__(self, in_features, out_features, **kwargs)
705
+ MoeLoraLayer.__init__(self, in_features=in_features, out_features=out_features)
706
+ # Freezing the pre-trained weight matrix冻结预训练参数
707
+ self.weight.requires_grad = False
708
+ self.gating.requires_grad_ = True
709
+ self.fan_in_fan_out = fan_in_fan_out
710
+ if fan_in_fan_out:
711
+ self.weight.data = self.weight.data.T
712
+ nn.Linear.reset_parameters(self)
713
+ self.update_layer(adapter_name, r, num_moe, gating, lora_alpha, lora_dropout, init_lora_weights)
714
+ self.active_adapter = adapter_name
715
+ self.global_user_embeds = global_user_embeds
716
+ self.gate_weights = gate_weights
717
+
718
+ def merge(self):
719
+ if self.active_adapter not in self.lora_A.keys():
720
+ return
721
+ if self.merged:
722
+ warnings.warn("Already merged. Nothing to do.")
723
+ return
724
+ if self.r[self.active_adapter] > 0:
725
+ self.weight.data += (
726
+ transpose(
727
+ self.lora_B[self.active_adapter].weight @ self.lora_A[self.active_adapter].weight,
728
+ self.fan_in_fan_out,
729
+ )
730
+ * self.scaling[self.active_adapter]
731
+ )
732
+ self.merged = True
733
+
734
+ def unmerge(self):
735
+ if self.active_adapter not in self.lora_A.keys():
736
+ return
737
+ if not self.merged:
738
+ warnings.warn("Already unmerged. Nothing to do.")
739
+ return
740
+ if self.r[self.active_adapter] > 0:
741
+ self.weight.data -= (
742
+ transpose(
743
+ self.lora_B[self.active_adapter].weight @ self.lora_A[self.active_adapter].weight,
744
+ self.fan_in_fan_out,
745
+ )
746
+ * self.scaling[self.active_adapter]
747
+ )
748
+ self.merged = False
749
+
750
+ def calculate_B(self, A_out):
751
+ batch_size, seq_len, n, r = A_out.size()
752
+ weight = self.lora_B[self.active_adapter].weight.t().reshape(n, r, -1)
753
+ return torch.einsum('ijkl, klm->ijkm', A_out, weight)
754
+
755
+
756
+ def forward(self, x: torch.Tensor, **kwargs):
757
+ flag = False
758
+ # user_embeds = self.global_user_embeds[0]
759
+ gate_weights = self.gate_weights[0]
760
+ previous_dtype = x.dtype
761
+ batch_size, seq_len, _ = x.size()
762
+
763
+ try:
764
+ if self.active_adapter not in self.lora_A.keys():
765
+ return F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
766
+ if self.disable_adapters:
767
+ if self.r[self.active_adapter] > 0 and self.merged:
768
+ self.unmerge()
769
+ result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
770
+ elif self.r[self.active_adapter] > 0 and not self.merged:
771
+ result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
772
+ x = x.to(self.lora_A[self.active_adapter].weight.dtype)
773
+
774
+ A_out = self.lora_A[self.active_adapter](
775
+ self.lora_dropout[self.active_adapter](x)
776
+ ).reshape(batch_size, seq_len, self.num_moe[self.active_adapter], -1)
777
+ B_out = self.calculate_B(A_out)
778
+
779
+ # Gate = self.gating[self.active_adapter](user_embeds).unsqueeze(-1)
780
+ Gate = gate_weights.unsqueeze(-1)
781
+
782
+
783
+ result += ((B_out * Gate).sum(dim=-2) * self.scaling[self.active_adapter])
784
+
785
+
786
+ gate_weights_squeezed = Gate.squeeze(-1)
787
+ gate_weights_split = gate_weights_squeezed.split(1, dim=0)
788
+ gate_weight_vectors = [x.squeeze() for x in gate_weights_split]
789
+
790
+ else:
791
+ result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
792
+
793
+
794
+ result = result.to(previous_dtype)
795
+
796
+ return result
797
+ except Exception as e:
798
+ print(f"An error occurred: {e}")
799
+ return torch.zeros_like(x) # 返回一个与输入张量相同形状的全零张量
800
+
801
+
802
+ class Embedding(nn.Embedding, MoeLoraLayer):
803
+ # LoRA implemented in a Embedding layer
804
+ def __init__(
805
+ self,
806
+ adapter_name: str,
807
+ num_embeddings: int,
808
+ embedding_dim: int,
809
+ r: int = 0,
810
+ lora_alpha: int = 1,
811
+ lora_dropout: float = 0.0,
812
+ **kwargs,
813
+ ):
814
+ init_lora_weights = kwargs.pop("init_lora_weights", True)
815
+
816
+ nn.Embedding.__init__(self, num_embeddings, embedding_dim, **kwargs)
817
+ MoeLoraLayer.__init__(self, in_features=num_embeddings, out_features=embedding_dim)
818
+
819
+ self.weight.requires_grad = False
820
+
821
+ nn.Embedding.reset_parameters(self)
822
+ self.update_layer_embedding(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
823
+ self.active_adapter = adapter_name
824
+
825
+ def unmerge(self, mode: bool = True):
826
+ if not self.merged:
827
+ warnings.warn("Already unmerged. Nothing to do.")
828
+ return
829
+ if self.r[self.active_adapter] > 0:
830
+ self.weight.data -= (
831
+ transpose(
832
+ self.lora_embedding_B[self.active_adapter] @ self.lora_embedding_A[self.active_adapter], True
833
+ )
834
+ * self.scaling[self.active_adapter]
835
+ )
836
+ self.merged = False
837
+
838
+ def merge(self):
839
+ if self.merged:
840
+ warnings.warn("Already merged. Nothing to do.")
841
+ return
842
+ if self.r[self.active_adapter] > 0:
843
+ self.weight.data += (
844
+ transpose(
845
+ self.lora_embedding_B[self.active_adapter] @ self.lora_embedding_A[self.active_adapter], True
846
+ )
847
+ * self.scaling[self.active_adapter]
848
+ )
849
+ self.merged = True
850
+
851
+ def forward(self, x: torch.Tensor):
852
+ if self.disable_adapters:
853
+ if self.r[self.active.adapter] > 0 and self.merged:
854
+ self.weight.data -= (
855
+ transpose(
856
+ self.lora_embedding_B[self.active_adapter].weight
857
+ @ self.lora_embedding_A[self.active_adapter].weight,
858
+ True,
859
+ )
860
+ * self.scaling[self.active_adapter]
861
+ )
862
+ self.merged = False
863
+ return nn.Embedding.forward(self, x)
864
+
865
+ elif self.r[self.active_adapter] > 0 and not self.merged:
866
+ result = nn.Embedding.forward(self, x)
867
+ if self.r[self.active_adapter] > 0:
868
+ after_A = F.embedding(
869
+ x,
870
+ self.lora_embedding_A[self.active_adapter].T,
871
+ self.padding_idx,
872
+ self.max_norm,
873
+ self.norm_type,
874
+ self.scale_grad_by_freq,
875
+ self.sparse,
876
+ )
877
+ result += (after_A @ self.lora_embedding_B[self.active_adapter].T) * self.scaling[self.active_adapter]
878
+ return result
879
+ else:
880
+ return nn.Embedding.forward(self, x)
881
+
882
+
883
+ class Conv2d(nn.Conv2d, MoeLoraLayer):
884
+ # Lora implemented in a conv2d layer
885
+ def __init__(
886
+ self,
887
+ adapter_name: str,
888
+ in_channels: int,
889
+ out_channels: int,
890
+ kernel_size: Union[int, Tuple[int]],
891
+ stride: Union[int, Tuple[int]] = 1,
892
+ padding: Union[int, Tuple[int]] = 0,
893
+ r: int = 0,
894
+ lora_alpha: int = 1,
895
+ lora_dropout: float = 0.0,
896
+ **kwargs,
897
+ ):
898
+ init_lora_weights = kwargs.pop("init_lora_weights", True)
899
+
900
+ nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, stride, padding)
901
+ MoeLoraLayer.__init__(
902
+ self,
903
+ in_features=in_channels,
904
+ out_features=out_channels,
905
+ kernel_size=kernel_size,
906
+ stride=stride,
907
+ padding=padding,
908
+ )
909
+ # Freezing the pre-trained weight matrix
910
+ self.weight.requires_grad = False
911
+
912
+ nn.Conv2d.reset_parameters(self)
913
+ self.update_layer_conv2d(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
914
+ self.active_adapter = adapter_name
915
+
916
+ def merge(self):
917
+ if self.active_adapter not in self.lora_A.keys():
918
+ return
919
+ if self.merged:
920
+ warnings.warn("Already merged. Nothing to do.")
921
+ return
922
+ if self.r[self.active_adapter] > 0:
923
+ # https://github.com/bmaltais/kohya_ss/blob/feb6728762a8f463d15ba936d189d4c3abfaa1ab/networks/lora.py#L117
924
+ if self.weight.size()[2:4] == (1, 1):
925
+ # conv2d 1x1
926
+ self.weight.data += (
927
+ self.lora_B[self.active_adapter].weight.squeeze(3).squeeze(2)
928
+ @ self.lora_A[self.active_adapter].weight.squeeze(3).squeeze(2)
929
+ ).unsqueeze(2).unsqueeze(3) * self.scaling[self.active_adapter]
930
+ else:
931
+ # conv2d 3x3
932
+ self.weight.data += (
933
+ F.conv2d(
934
+ self.lora_A[self.active_adapter].weight.permute(1, 0, 2, 3),
935
+ self.lora_B[self.active_adapter].weight,
936
+ ).permute(1, 0, 2, 3)
937
+ * self.scaling[self.active_adapter]
938
+ )
939
+ self.merged = True
940
+
941
+ def unmerge(self):
942
+ if self.active_adapter not in self.lora_A.keys():
943
+ return
944
+ if not self.merged:
945
+ warnings.warn("Already unmerged. Nothing to do.")
946
+ return
947
+ if self.r[self.active_adapter] > 0:
948
+ if self.weight.size()[2:4] == (1, 1):
949
+ # conv2d 1x1
950
+ self.weight.data -= (
951
+ self.lora_B[self.active_adapter].weight.squeeze(3).squeeze(2)
952
+ @ self.lora_A[self.active_adapter].weight.squeeze(3).squeeze(2)
953
+ ).unsqueeze(2).unsqueeze(3) * self.scaling[self.active_adapter]
954
+ else:
955
+ # conv2d 3x3
956
+ self.weight.data += (
957
+ F.conv2d(
958
+ self.lora_A[self.active_adapter].weight.permute(1, 0, 2, 3),
959
+ self.lora_B[self.active_adapter].weight,
960
+ ).permute(1, 0, 2, 3)
961
+ * self.scaling[self.active_adapter]
962
+ )
963
+ self.merged = False
964
+
965
+ def forward(self, x: torch.Tensor):
966
+ previous_dtype = x.dtype
967
+
968
+ if self.active_adapter not in self.lora_A.keys():
969
+ return F.conv2d(
970
+ x,
971
+ self.weight,
972
+ bias=self.bias,
973
+ stride=self.stride,
974
+ padding=self.padding,
975
+ dilation=self.dilation,
976
+ groups=self.groups,
977
+ )
978
+ if self.disable_adapters:
979
+ if self.r[self.active_adapter] > 0 and self.merged:
980
+ self.unmerge()
981
+ result = F.conv2d(
982
+ x,
983
+ self.weight,
984
+ bias=self.bias,
985
+ stride=self.stride,
986
+ padding=self.padding,
987
+ dilation=self.dilation,
988
+ groups=self.groups,
989
+ )
990
+ elif self.r[self.active_adapter] > 0 and not self.merged:
991
+ result = F.conv2d(
992
+ x,
993
+ self.weight,
994
+ bias=self.bias,
995
+ stride=self.stride,
996
+ padding=self.padding,
997
+ dilation=self.dilation,
998
+ groups=self.groups,
999
+ )
1000
+
1001
+ x = x.to(self.lora_A[self.active_adapter].weight.dtype)
1002
+
1003
+ result += (
1004
+ self.lora_B[self.active_adapter](
1005
+ self.lora_A[self.active_adapter](self.lora_dropout[self.active_adapter](x))
1006
+ )
1007
+ * self.scaling[self.active_adapter]
1008
+ )
1009
+ else:
1010
+ result = F.conv2d(
1011
+ x,
1012
+ self.weight,
1013
+ bias=self.bias,
1014
+ stride=self.stride,
1015
+ padding=self.padding,
1016
+ dilation=self.dilation,
1017
+ groups=self.groups,
1018
+ )
1019
+
1020
+ result = result.to(previous_dtype)
1021
+
1022
+ return result
1023
+
1024
+
1025
+ if is_bnb_available():
1026
+
1027
+ class Linear8bitLt(bnb.nn.Linear8bitLt, MoeLoraLayer):
1028
+ # Lora implemented in a dense layer
1029
+ def __init__(
1030
+ self,
1031
+ adapter_name: str,
1032
+ in_features: int,
1033
+ out_features: int,
1034
+ r: int = 0,
1035
+
1036
+ # moe额外参数
1037
+ num_moe: int = 0,
1038
+ gating: str = "",
1039
+ global_user_embeds: List = [],
1040
+ loss_fn: str = "",
1041
+
1042
+ lora_alpha: int = 1,
1043
+ lora_dropout: float = 0.0,
1044
+ **kwargs,
1045
+ ):
1046
+ kwargs.setdefault('bias', True)
1047
+ bnb.nn.Linear8bitLt.__init__(
1048
+ self,
1049
+ in_features,
1050
+ out_features,
1051
+ # **kwargs
1052
+ )
1053
+ MoeLoraLayer.__init__(self, in_features=in_features, out_features=out_features)
1054
+
1055
+ # Freezing the pre-trained weight matrix
1056
+ self.weight.requires_grad = False
1057
+ init_lora_weights = kwargs.pop("init_lora_weights", True)
1058
+ # self.update_layer(adapter_name, r, num_moe, gating, loss_fn, lora_alpha, lora_dropout, init_lora_weights)
1059
+ # bnb.nn.Linear8bitLt.reset_parameters(self)
1060
+
1061
+ self.update_layer(adapter_name, r, num_moe, gating, lora_alpha, lora_dropout, init_lora_weights)
1062
+ self.active_adapter = adapter_name
1063
+ self.global_user_embeds = global_user_embeds
1064
+
1065
+ def calculate_B(self, A_out):
1066
+ # batch_size, seq_len, self.num_moe[self.active_adapter], -1
1067
+ batch_size, seq_len, n, r = A_out.size()
1068
+ weight = self.lora_B[self.active_adapter].weight.t().reshape(n, r, -1)
1069
+ return torch.einsum('ijkl, klm->ijkm', A_out, weight)
1070
+
1071
+ def forward(self, x: torch.Tensor):
1072
+ user_embeds = self.global_user_embeds[0]
1073
+ result = super().forward(x)
1074
+
1075
+ batch_size, seq_len, _ = x.size()
1076
+
1077
+ if self.disable_adapters or self.active_adapter not in self.lora_A.keys():
1078
+ return result
1079
+ elif self.r[self.active_adapter] > 0:
1080
+ if not torch.is_autocast_enabled():
1081
+ expected_dtype = result.dtype
1082
+
1083
+ if x.dtype != torch.float32:
1084
+ x = x.float()
1085
+ A_out = self.lora_A[self.active_adapter](
1086
+ self.lora_dropout[self.active_adapter](x)
1087
+ ).reshape(batch_size, seq_len, self.num_moe[self.active_adapter], -1)
1088
+ B_out = self.calculate_B(A_out)
1089
+ Gate = self.gating[self.active_adapter](user_embeds).unsqueeze(-1)
1090
+ output = ((B_out * Gate).sum(dim=-2).to(expected_dtype) * self.scaling[self.active_adapter])
1091
+
1092
+ else:
1093
+ A_out = self.lora_A[self.active_adapter](
1094
+ self.lora_dropout[self.active_adapter](x)
1095
+ ).reshape(batch_size, seq_len, self.num_moe[self.active_adapter], -1)
1096
+ B_out = self.calculate_B(A_out)
1097
+ Gate = self.gating[self.active_adapter](user_embeds).unsqueeze(-1)
1098
+ output = ((B_out * Gate).sum(dim=-2) * self.scaling[self.active_adapter])
1099
+
1100
+ result += output
1101
+ return result
1102
+
1103
+
1104
+ if is_bnb_4bit_available():
1105
+
1106
+ class Linear4bit(bnb.nn.Linear4bit, MoeLoraLayer):
1107
+ # Lora implemented in a dense layer
1108
+ def __init__(
1109
+ self,
1110
+ adapter_name,
1111
+ in_features,
1112
+ out_features,
1113
+ r: int = 0,
1114
+ lora_alpha: int = 1,
1115
+ lora_dropout: float = 0.0,
1116
+ **kwargs,
1117
+ ):
1118
+ bnb.nn.Linear4bit.__init__(
1119
+ self,
1120
+ in_features,
1121
+ out_features,
1122
+ bias=kwargs.get("bias", True),
1123
+ compute_dtype=kwargs.get("compute_dtype", torch.float32),
1124
+ compress_statistics=kwargs.get("compress_statistics", True),
1125
+ quant_type=kwargs.get("quant_type", "nf4"),
1126
+ )
1127
+ MoeLoraLayer.__init__(self, in_features=in_features, out_features=out_features)
1128
+
1129
+ # Freezing the pre-trained weight matrix
1130
+ self.weight.requires_grad = False
1131
+
1132
+ init_lora_weights = kwargs.pop("init_lora_weights", True)
1133
+ self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
1134
+ self.active_adapter = adapter_name
1135
+
1136
+ def forward(self, x: torch.Tensor):
1137
+ result = super().forward(x)
1138
+ user_embeds = self.global_user_embeds[0]
1139
+
1140
+ if self.disable_adapters or self.active_adapter not in self.lora_A.keys():
1141
+ return result
1142
+ elif self.r[self.active_adapter] > 0:
1143
+ result = result.clone()
1144
+ if not torch.is_autocast_enabled():
1145
+ expected_dtype = result.dtype
1146
+ x = x.to(self.lora_A[self.active_adapter].weight.dtype)
1147
+ batch_size, seq_len, _ = x.size()
1148
+ A_out = self.lora_A[self.active_adapter](
1149
+ self.lora_dropout[self.active_adapter](x)
1150
+ ).reshape(batch_size, seq_len, self.num_moe[self.active_adapter], -1)
1151
+ B_out = self.calculate_B(A_out)
1152
+ Gate = self.gating[self.active_adapter](user_embeds).unsqueeze(-1)
1153
+ output = ((B_out * Gate).sum(dim=-2).to(expected_dtype) * self.scaling[self.active_adapter])
1154
+ else:
1155
+ x = x.to(self.lora_A[self.active_adapter].weight.dtype)
1156
+ batch_size, seq_len, _ = x.size()
1157
+ A_out = self.lora_A[self.active_adapter](
1158
+ self.lora_dropout[self.active_adapter](x)
1159
+ ).reshape(batch_size, seq_len, self.num_moe[self.active_adapter], -1)
1160
+ B_out = self.calculate_B(A_out)
1161
+ Gate = self.gating[self.active_adapter](user_embeds).unsqueeze(-1)
1162
+ output = ((B_out * Gate).sum(dim=-2) * self.scaling[self.active_adapter])
1163
+ result += output
1164
+ return result
model/peft/tuners/p_tuning.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import enum
17
+ import warnings
18
+ from dataclasses import dataclass, field
19
+ from typing import Union
20
+
21
+ import torch
22
+
23
+ from ..utils import PeftType, PromptLearningConfig
24
+
25
+
26
+ class PromptEncoderReparameterizationType(str, enum.Enum):
27
+ MLP = "MLP"
28
+ LSTM = "LSTM"
29
+
30
+
31
+ @dataclass
32
+ class PromptEncoderConfig(PromptLearningConfig):
33
+ """
34
+ This is the configuration class to store the configuration of a [`PromptEncoder`].
35
+
36
+ Args:
37
+ encoder_reparameterization_type (Union[[`PromptEncoderReparameterizationType`], `str`]):
38
+ The type of reparameterization to use.
39
+ encoder_hidden_size (`int`): The hidden size of the prompt encoder.
40
+ encoder_num_layers (`int`): The number of layers of the prompt encoder.
41
+ encoder_dropout (`float`): The dropout probability of the prompt encoder.
42
+ """
43
+
44
+ encoder_reparameterization_type: Union[str, PromptEncoderReparameterizationType] = field(
45
+ default=PromptEncoderReparameterizationType.MLP,
46
+ metadata={"help": "How to reparameterize the prompt encoder"},
47
+ )
48
+ encoder_hidden_size: int = field(
49
+ default=None,
50
+ metadata={"help": "The hidden size of the prompt encoder"},
51
+ )
52
+ encoder_num_layers: int = field(
53
+ default=2,
54
+ metadata={"help": "The number of layers of the prompt encoder"},
55
+ )
56
+ encoder_dropout: float = field(
57
+ default=0.0,
58
+ metadata={"help": "The dropout of the prompt encoder"},
59
+ )
60
+
61
+ def __post_init__(self):
62
+ self.peft_type = PeftType.P_TUNING
63
+
64
+
65
+ # Based on https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/modules/common/prompt_encoder.py
66
+ # with some refactor
67
+ class PromptEncoder(torch.nn.Module):
68
+ """
69
+ The prompt encoder network that is used to generate the virtual token embeddings for p-tuning.
70
+
71
+ Args:
72
+ config ([`PromptEncoderConfig`]): The configuration of the prompt encoder.
73
+
74
+ Example:
75
+
76
+ ```py
77
+ >>> from peft import PromptEncoder, PromptEncoderConfig
78
+
79
+ >>> config = PromptEncoderConfig(
80
+ ... peft_type="P_TUNING",
81
+ ... task_type="SEQ_2_SEQ_LM",
82
+ ... num_virtual_tokens=20,
83
+ ... token_dim=768,
84
+ ... num_transformer_submodules=1,
85
+ ... num_attention_heads=12,
86
+ ... num_layers=12,
87
+ ... encoder_reparameterization_type="MLP",
88
+ ... encoder_hidden_size=768,
89
+ ... )
90
+
91
+ >>> prompt_encoder = PromptEncoder(config)
92
+ ```
93
+
94
+ **Attributes**:
95
+ - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt encoder.
96
+ - **mlp_head** (`torch.nn.Sequential`) -- The MLP head of the prompt encoder if `inference_mode=False`.
97
+ - **lstm_head** (`torch.nn.LSTM`) -- The LSTM head of the prompt encoder if `inference_mode=False` and
98
+ `encoder_reparameterization_type="LSTM"`.
99
+ - **token_dim** (`int`) -- The hidden embedding dimension of the base transformer model.
100
+ - **input_size** (`int`) -- The input size of the prompt encoder.
101
+ - **output_size** (`int`) -- The output size of the prompt encoder.
102
+ - **hidden_size** (`int`) -- The hidden size of the prompt encoder.
103
+ - **total_virtual_tokens** (`int`): The total number of virtual tokens of the
104
+ prompt encoder.
105
+ - **encoder_type** (Union[[`PromptEncoderReparameterizationType`], `str`]): The encoder type of the prompt
106
+ encoder.
107
+
108
+
109
+ Input shape: (`batch_size`, `total_virtual_tokens`)
110
+
111
+ Output shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)
112
+ """
113
+
114
+ def __init__(self, config):
115
+ super().__init__()
116
+ self.token_dim = config.token_dim
117
+ self.input_size = self.token_dim
118
+ self.output_size = self.token_dim
119
+ self.hidden_size = config.encoder_hidden_size
120
+ self.total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules
121
+ self.encoder_type = config.encoder_reparameterization_type
122
+
123
+ # embedding
124
+ self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim)
125
+ if not config.inference_mode:
126
+ if self.encoder_type == PromptEncoderReparameterizationType.LSTM:
127
+ lstm_dropout = config.encoder_dropout
128
+ num_layers = config.encoder_num_layers
129
+ # LSTM
130
+ self.lstm_head = torch.nn.LSTM(
131
+ input_size=self.input_size,
132
+ hidden_size=self.hidden_size,
133
+ num_layers=num_layers,
134
+ dropout=lstm_dropout,
135
+ bidirectional=True,
136
+ batch_first=True,
137
+ )
138
+
139
+ self.mlp_head = torch.nn.Sequential(
140
+ torch.nn.Linear(self.hidden_size * 2, self.hidden_size * 2),
141
+ torch.nn.ReLU(),
142
+ torch.nn.Linear(self.hidden_size * 2, self.output_size),
143
+ )
144
+
145
+ elif self.encoder_type == PromptEncoderReparameterizationType.MLP:
146
+ warnings.warn(
147
+ f"for {self.encoder_type}, the `encoder_num_layers` is ignored. Exactly 2 MLP layers are used."
148
+ )
149
+ layers = [
150
+ torch.nn.Linear(self.input_size, self.hidden_size),
151
+ torch.nn.ReLU(),
152
+ torch.nn.Linear(self.hidden_size, self.hidden_size),
153
+ torch.nn.ReLU(),
154
+ torch.nn.Linear(self.hidden_size, self.output_size),
155
+ ]
156
+ self.mlp_head = torch.nn.Sequential(*layers)
157
+
158
+ else:
159
+ raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.")
160
+
161
+ def forward(self, indices):
162
+ input_embeds = self.embedding(indices)
163
+ if self.encoder_type == PromptEncoderReparameterizationType.LSTM:
164
+ output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0])
165
+ elif self.encoder_type == PromptEncoderReparameterizationType.MLP:
166
+ output_embeds = self.mlp_head(input_embeds)
167
+ else:
168
+ raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.")
169
+
170
+ return output_embeds
model/peft/tuners/prefix_tuning.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ from dataclasses import dataclass, field
18
+
19
+ import torch
20
+
21
+ from ..utils import PeftType, PromptLearningConfig
22
+
23
+
24
+ @dataclass
25
+ class PrefixTuningConfig(PromptLearningConfig):
26
+ """
27
+ This is the configuration class to store the configuration of a [`PrefixEncoder`].
28
+
29
+ Args:
30
+ encoder_hidden_size (`int`): The hidden size of the prompt encoder.
31
+ prefix_projection (`bool`): Whether to project the prefix embeddings.
32
+ """
33
+
34
+ encoder_hidden_size: int = field(
35
+ default=None,
36
+ metadata={"help": "The hidden size of the encoder"},
37
+ )
38
+ prefix_projection: bool = field(
39
+ default=False,
40
+ metadata={"help": "Whether to project the prefix tokens"},
41
+ )
42
+
43
+ def __post_init__(self):
44
+ self.peft_type = PeftType.PREFIX_TUNING
45
+
46
+
47
+ # Based on https://github.com/THUDM/P-tuning-v2/blob/main/model/prefix_encoder.py
48
+ # with some refactor
49
+ class PrefixEncoder(torch.nn.Module):
50
+ r"""
51
+ The `torch.nn` model to encode the prefix.
52
+
53
+ Args:
54
+ config ([`PrefixTuningConfig`]): The configuration of the prefix encoder.
55
+
56
+ Example:
57
+
58
+ ```py
59
+ >>> from peft import PrefixEncoder, PrefixTuningConfig
60
+
61
+ >>> config = PrefixTuningConfig(
62
+ ... peft_type="PREFIX_TUNING",
63
+ ... task_type="SEQ_2_SEQ_LM",
64
+ ... num_virtual_tokens=20,
65
+ ... token_dim=768,
66
+ ... num_transformer_submodules=1,
67
+ ... num_attention_heads=12,
68
+ ... num_layers=12,
69
+ ... encoder_hidden_size=768,
70
+ ... )
71
+ >>> prefix_encoder = PrefixEncoder(config)
72
+ ```
73
+
74
+ **Attributes**:
75
+ - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder.
76
+ - **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if
77
+ `prefix_projection` is `True`.
78
+ - **prefix_projection** (`bool`) -- Whether to project the prefix embeddings.
79
+
80
+ Input shape: (`batch_size`, `num_virtual_tokens`)
81
+
82
+ Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`)
83
+ """
84
+
85
+ def __init__(self, config):
86
+ super().__init__()
87
+ self.prefix_projection = config.prefix_projection
88
+ token_dim = config.token_dim
89
+ num_layers = config.num_layers
90
+ encoder_hidden_size = config.encoder_hidden_size
91
+ num_virtual_tokens = config.num_virtual_tokens
92
+ if self.prefix_projection and not config.inference_mode:
93
+ # Use a two-layer MLP to encode the prefix
94
+ self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim)
95
+ self.transform = torch.nn.Sequential(
96
+ torch.nn.Linear(token_dim, encoder_hidden_size),
97
+ torch.nn.Tanh(),
98
+ torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim),
99
+ )
100
+ else:
101
+ self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim)
102
+
103
+ def forward(self, prefix: torch.Tensor):
104
+ if self.prefix_projection:
105
+ prefix_tokens = self.embedding(prefix)
106
+ past_key_values = self.transform(prefix_tokens)
107
+ else:
108
+ past_key_values = self.embedding(prefix)
109
+ return past_key_values
model/peft/tuners/prompt_tuning.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import enum
17
+ import math
18
+ from dataclasses import dataclass, field
19
+ from typing import Optional, Union
20
+
21
+ import torch
22
+
23
+ from ..utils import PeftType, PromptLearningConfig
24
+
25
+
26
+ class PromptTuningInit(str, enum.Enum):
27
+ TEXT = "TEXT"
28
+ RANDOM = "RANDOM"
29
+
30
+
31
+ @dataclass
32
+ class PromptTuningConfig(PromptLearningConfig):
33
+ """
34
+ This is the configuration class to store the configuration of a [`PromptEmbedding`].
35
+
36
+ Args:
37
+ prompt_tuning_init (Union[[`PromptTuningInit`], `str`]): The initialization of the prompt embedding.
38
+ prompt_tuning_init_text (`str`, *optional*):
39
+ The text to initialize the prompt embedding. Only used if `prompt_tuning_init` is `TEXT`.
40
+ tokenizer_name_or_path (`str`, *optional*):
41
+ The name or path of the tokenizer. Only used if `prompt_tuning_init` is `TEXT`.
42
+ """
43
+
44
+ prompt_tuning_init: Union[PromptTuningInit, str] = field(
45
+ default=PromptTuningInit.RANDOM,
46
+ metadata={"help": "How to initialize the prompt tuning parameters"},
47
+ )
48
+ prompt_tuning_init_text: Optional[str] = field(
49
+ default=None,
50
+ metadata={
51
+ "help": "The text to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`"
52
+ },
53
+ )
54
+ tokenizer_name_or_path: Optional[str] = field(
55
+ default=None,
56
+ metadata={
57
+ "help": "The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`"
58
+ },
59
+ )
60
+
61
+ def __post_init__(self):
62
+ self.peft_type = PeftType.PROMPT_TUNING
63
+
64
+
65
+ class PromptEmbedding(torch.nn.Module):
66
+ """
67
+ The model to encode virtual tokens into prompt embeddings.
68
+
69
+ Args:
70
+ config ([`PromptTuningConfig`]): The configuration of the prompt embedding.
71
+ word_embeddings (`torch.nn.Module`): The word embeddings of the base transformer model.
72
+
73
+ **Attributes**:
74
+ - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt embedding.
75
+
76
+ Example:
77
+
78
+ ```py
79
+ >>> from peft import PromptEmbedding, PromptTuningConfig
80
+
81
+ >>> config = PromptTuningConfig(
82
+ ... peft_type="PROMPT_TUNING",
83
+ ... task_type="SEQ_2_SEQ_LM",
84
+ ... num_virtual_tokens=20,
85
+ ... token_dim=768,
86
+ ... num_transformer_submodules=1,
87
+ ... num_attention_heads=12,
88
+ ... num_layers=12,
89
+ ... prompt_tuning_init="TEXT",
90
+ ... prompt_tuning_init_text="Predict if sentiment of this review is positive, negative or neutral",
91
+ ... tokenizer_name_or_path="t5-base",
92
+ ... )
93
+
94
+ >>> # t5_model.shared is the word embeddings of the base model
95
+ >>> prompt_embedding = PromptEmbedding(config, t5_model.shared)
96
+ ```
97
+
98
+ Input Shape: (`batch_size`, `total_virtual_tokens`)
99
+
100
+ Output Shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)
101
+ """
102
+
103
+ def __init__(self, config, word_embeddings):
104
+ super().__init__()
105
+
106
+ total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules
107
+ self.embedding = torch.nn.Embedding(total_virtual_tokens, config.token_dim)
108
+ if config.prompt_tuning_init == PromptTuningInit.TEXT:
109
+ from transformers import AutoTokenizer
110
+
111
+ tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name_or_path)
112
+ init_text = config.prompt_tuning_init_text
113
+ init_token_ids = tokenizer(init_text)["input_ids"]
114
+ # Trim or iterate until num_text_tokens matches total_virtual_tokens
115
+ num_text_tokens = len(init_token_ids)
116
+ if num_text_tokens > total_virtual_tokens:
117
+ init_token_ids = init_token_ids[:total_virtual_tokens]
118
+ elif num_text_tokens < total_virtual_tokens:
119
+ num_reps = math.ceil(total_virtual_tokens / num_text_tokens)
120
+ init_token_ids = init_token_ids * num_reps
121
+ init_token_ids = init_token_ids[:total_virtual_tokens]
122
+
123
+ word_embedding_weights = word_embeddings(torch.LongTensor(init_token_ids)).detach().clone()
124
+ word_embedding_weights = word_embedding_weights.to(torch.float32)
125
+ self.embedding.weight = torch.nn.Parameter(word_embedding_weights)
126
+
127
+ def forward(self, indices):
128
+ # Just get embeddings
129
+ prompt_embeddings = self.embedding(indices)
130
+ return prompt_embeddings
model/peft/tuners/test_moelora.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import unittest
3
+ import math
4
+ import re
5
+ import warnings
6
+ from dataclasses import asdict, dataclass, field
7
+ from enum import Enum
8
+ from typing import List, Optional, Tuple, Union
9
+ import itertools
10
+ import copy
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ import torch.nn.functional as F
15
+ from transformers.pytorch_utils import Conv1D
16
+ from .gating import GATING_TO_MODEL_MAPPING
17
+
18
+ from ..import_utils import is_bnb_4bit_available, is_bnb_available
19
+ from ..utils import (
20
+ COMMON_LAYERS_PATTERN,
21
+ TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING,
22
+ ModulesToSaveWrapper,
23
+ PeftConfig,
24
+ PeftType,
25
+ _freeze_adapter,
26
+ _get_submodules,
27
+ transpose,
28
+ )
29
+
30
+ if is_bnb_available():
31
+ import bitsandbytes as bnb
32
+ from moelora import *
33
+
34
+ class TestMoELoRA(unittest.TestCase):
35
+ def setUp(self):
36
+ self.model = MoELoRA() # Instantiate your MoELoRA model here
37
+
38
+ def test_forward_no_adapters(self):
39
+ x = torch.randn(10, 20, 30) # Create a random input tensor
40
+ output = self.model.forward(x)
41
+ self.assertEqual(output.shape, (10, 20, 30)) # Assert the output shape is correct
42
+
43
+ def test_forward_with_adapters(self):
44
+ x = torch.randn(10, 20, 30) # Create a random input tensor
45
+ self.model.active_adapter = 'adapter1' # Set the active adapter
46
+ output = self.model.forward(x)
47
+ self.assertEqual(output.shape, (10, 20, 30)) # Assert the output shape is correct
48
+
49
+ def test_forward_with_global_user_embeds(self):
50
+ x = torch.randn(10, 20, 30) # Create a random input tensor
51
+ self.model.active_adapter = 'adapter1' # Set the active adapter
52
+ self.model.global_user_embeds = [torch.randn(10, 30)] # Set the global_user_embeds
53
+ output = self.model.forward(x)
54
+ self.assertEqual(output.shape, (10, 20, 30)) # Assert the output shape is correct
55
+
56
+ if __name__ == '__main__':
57
+ unittest.main()import torch
58
+ import unittest
59
+
60
+ class TestMoELoRA(unittest.TestCase):
61
+ def setUp(self):
62
+ self.model = MoELoRA() # Instantiate your MoELoRA model here
63
+
64
+ def test_forward_no_adapters(self):
65
+ x = torch.randn(10, 20, 30) # Create a random input tensor
66
+ output = self.model.forward(x)
67
+ self.assertEqual(output.shape, (10, 20, 30)) # Assert the output shape is correct
68
+
69
+ def test_forward_with_adapters(self):
70
+ x = torch.randn(10, 20, 30) # Create a random input tensor
71
+ self.model.active_adapter = 'adapter1' # Set the active adapter
72
+ output = self.model.forward(x)
73
+ self.assertEqual(output.shape, (10, 20, 30)) # Assert the output shape is correct
74
+
75
+ def test_forward_with_global_user_embeds(self):
76
+ x = torch.randn(10, 20, 30) # Create a random input tensor
77
+ self.model.active_adapter = 'adapter1' # Set the active adapter
78
+ self.model.global_user_embeds = [torch.randn(10, 30)] # Set the global_user_embeds
79
+ output = self.model.forward(x)
80
+ self.assertEqual(output.shape, (10, 20, 30)) # Assert the output shape is correct
81
+
82
+ def test_forward_with_global_user_embeds_exception(self):
83
+ x = torch.randn(10, 20, 30) # Create a random input tensor
84
+ self.model.active_adapter = 'adapter1' # Set the active adapter
85
+ self.model.global_user_embeds = [torch.randn(5, 30)] # Set the global_user_embeds with incompatible shape
86
+ output = self.model.forward(x)
87
+ self.assertEqual(output.shape, (10, 20, 30)) # Assert the output shape is correct
88
+
89
+ def test_forward_no_global_user_embeds(self):
90
+ x = torch.randn(10, 20, 30) # Create a random input tensor
91
+ self.model.active_adapter = 'adapter1' # Set the active adapter
92
+ self.model.global_user_embeds = [] # Set an empty global_user_embeds
93
+ output = self.model.forward(x)
94
+ self.assertEqual(output.shape, (10, 20, 30)) # Assert the output shape is correct
95
+
96
+ if __name__ == '__main__':
97
+ unittest.main()
model/peft/utils/__init__.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all
4
+
5
+ # coding=utf-8
6
+ # Copyright 2023-present the HuggingFace Inc. team.
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+
20
+ from .config import PeftConfig, PeftType, PromptLearningConfig, TaskType
21
+ from .other import (
22
+ TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
23
+ TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING,
24
+ TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING,
25
+ COMMON_LAYERS_PATTERN,
26
+ CONFIG_NAME,
27
+ WEIGHTS_NAME,
28
+ SAFETENSORS_WEIGHTS_NAME,
29
+ _set_trainable,
30
+ add_library_to_model_card,
31
+ bloom_model_postprocess_past_key_value,
32
+ prepare_model_for_int8_training,
33
+ prepare_model_for_kbit_training,
34
+ shift_tokens_right,
35
+ transpose,
36
+ _get_submodules,
37
+ _set_adapter,
38
+ _freeze_adapter,
39
+ ModulesToSaveWrapper,
40
+ )
41
+ from .hub_utils import hub_file_exists
42
+ from .save_and_load import get_peft_model_state_dict, set_peft_model_state_dict
model/peft/utils/config.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import enum
16
+ import inspect
17
+ import json
18
+ import os
19
+ from dataclasses import asdict, dataclass, field
20
+ from typing import Optional, Union
21
+
22
+ from huggingface_hub import hf_hub_download
23
+ from transformers.utils import PushToHubMixin
24
+
25
+ from .other import CONFIG_NAME
26
+
27
+
28
+ class PeftType(str, enum.Enum):
29
+ PROMPT_TUNING = "PROMPT_TUNING"
30
+ P_TUNING = "P_TUNING"
31
+ PREFIX_TUNING = "PREFIX_TUNING"
32
+ LORA = "LORA"
33
+ ADALORA = "ADALORA"
34
+ ADAPTION_PROMPT = "ADAPTION_PROMPT"
35
+ MOELORA = "MOELORA"
36
+
37
+
38
+ class TaskType(str, enum.Enum):
39
+ SEQ_CLS = "SEQ_CLS"
40
+ SEQ_2_SEQ_LM = "SEQ_2_SEQ_LM"
41
+ CAUSAL_LM = "CAUSAL_LM"
42
+ TOKEN_CLS = "TOKEN_CLS"
43
+ QUESTION_ANS = "QUESTION_ANS"
44
+
45
+
46
+ @dataclass
47
+ class PeftConfigMixin(PushToHubMixin):
48
+ r"""
49
+ This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all
50
+ PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to
51
+ push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a
52
+ directory. The method `from_pretrained` will load the configuration of your adapter model from a directory.
53
+
54
+ Args:
55
+ peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.
56
+ """
57
+ peft_type: Optional[PeftType] = field(default=None, metadata={"help": "The type of PEFT model."})
58
+
59
+ @property
60
+ def __dict__(self):
61
+ return asdict(self)
62
+
63
+ def to_dict(self):
64
+ return self.__dict__
65
+
66
+ def save_pretrained(self, save_directory, **kwargs):
67
+ r"""
68
+ This method saves the configuration of your adapter model in a directory.
69
+
70
+ Args:
71
+ save_directory (`str`):
72
+ The directory where the configuration will be saved.
73
+ kwargs (additional keyword arguments, *optional*):
74
+ Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`]
75
+ method.
76
+ """
77
+ if os.path.isfile(save_directory):
78
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
79
+
80
+ os.makedirs(save_directory, exist_ok=True)
81
+
82
+ output_dict = self.__dict__
83
+ output_path = os.path.join(save_directory, CONFIG_NAME)
84
+
85
+ # save it
86
+ with open(output_path, "w") as writer:
87
+ writer.write(json.dumps(output_dict, indent=2, sort_keys=True))
88
+
89
+ @classmethod
90
+ def from_pretrained(cls, pretrained_model_name_or_path, subfolder=None, **kwargs):
91
+ r"""
92
+ This method loads the configuration of your adapter model from a directory.
93
+
94
+ Args:
95
+ pretrained_model_name_or_path (`str`):
96
+ The directory or the Hub repository id where the configuration is saved.
97
+ kwargs (additional keyword arguments, *optional*):
98
+ Additional keyword arguments passed along to the child class initialization.
99
+ """
100
+ path = (
101
+ os.path.join(pretrained_model_name_or_path, subfolder)
102
+ if subfolder is not None
103
+ else pretrained_model_name_or_path
104
+ )
105
+
106
+ hf_hub_download_kwargs, class_kwargs, other_kwargs = cls._split_kwargs(kwargs)
107
+
108
+ if os.path.isfile(os.path.join(path, CONFIG_NAME)):
109
+ config_file = os.path.join(path, CONFIG_NAME)
110
+ else:
111
+ try:
112
+ config_file = hf_hub_download(
113
+ pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs
114
+ )
115
+ except Exception:
116
+ raise ValueError(f"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'")
117
+
118
+ loaded_attributes = cls.from_json_file(config_file)
119
+
120
+ config = cls(**class_kwargs)
121
+
122
+ for key, value in loaded_attributes.items():
123
+ if hasattr(config, key):
124
+ setattr(config, key, value)
125
+
126
+ return config
127
+
128
+ @classmethod
129
+ def from_json_file(cls, path_json_file, **kwargs):
130
+ r"""
131
+ Loads a configuration file from a json file.
132
+
133
+ Args:
134
+ path_json_file (`str`):
135
+ The path to the json file.
136
+ """
137
+ with open(path_json_file, "r") as file:
138
+ json_object = json.load(file)
139
+
140
+ return json_object
141
+
142
+ @classmethod
143
+ def _split_kwargs(cls, kwargs):
144
+ hf_hub_download_kwargs = {}
145
+ class_kwargs = {}
146
+ other_kwargs = {}
147
+
148
+ for key, value in kwargs.items():
149
+ if key in inspect.signature(hf_hub_download).parameters:
150
+ hf_hub_download_kwargs[key] = value
151
+ elif key in list(cls.__annotations__):
152
+ class_kwargs[key] = value
153
+ else:
154
+ other_kwargs[key] = value
155
+
156
+ return hf_hub_download_kwargs, class_kwargs, other_kwargs
157
+
158
+ @classmethod
159
+ def _get_peft_type(
160
+ cls,
161
+ model_id,
162
+ subfolder: Optional[str] = None,
163
+ revision: Optional[str] = None,
164
+ cache_dir: Optional[str] = None,
165
+ ):
166
+ path = os.path.join(model_id, subfolder) if subfolder is not None else model_id
167
+
168
+ if os.path.isfile(os.path.join(path, CONFIG_NAME)):
169
+ config_file = os.path.join(path, CONFIG_NAME)
170
+ else:
171
+ try:
172
+ config_file = hf_hub_download(
173
+ model_id, CONFIG_NAME, subfolder=subfolder, revision=revision, cache_dir=cache_dir
174
+ )
175
+ except Exception:
176
+ raise ValueError(f"Can't find '{CONFIG_NAME}' at '{model_id}'")
177
+
178
+ loaded_attributes = cls.from_json_file(config_file)
179
+ return loaded_attributes["peft_type"]
180
+
181
+
182
+ @dataclass
183
+ class PeftConfig(PeftConfigMixin):
184
+ """
185
+ This is the base configuration class to store the configuration of a [`PeftModel`].
186
+
187
+ Args:
188
+ peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.
189
+ task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.
190
+ inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.
191
+ """
192
+
193
+ base_model_name_or_path: str = field(default=None, metadata={"help": "The name of the base model to use."})
194
+ revision: str = field(default=None, metadata={"help": "The specific model version to use."})
195
+ peft_type: Union[str, PeftType] = field(default=None, metadata={"help": "Peft type"})
196
+ task_type: Union[str, TaskType] = field(default=None, metadata={"help": "Task type"})
197
+ inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"})
198
+
199
+
200
+ @dataclass
201
+ class PromptLearningConfig(PeftConfig):
202
+ """
203
+ This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or
204
+ [`PromptTuning`].
205
+
206
+ Args:
207
+ num_virtual_tokens (`int`): The number of virtual tokens to use.
208
+ token_dim (`int`): The hidden embedding dimension of the base transformer model.
209
+ num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.
210
+ num_attention_heads (`int`): The number of attention heads in the base transformer model.
211
+ num_layers (`int`): The number of layers in the base transformer model.
212
+ """
213
+
214
+ num_virtual_tokens: int = field(default=None, metadata={"help": "Number of virtual tokens"})
215
+ token_dim: int = field(
216
+ default=None, metadata={"help": "The hidden embedding dimension of the base transformer model"}
217
+ )
218
+ num_transformer_submodules: Optional[int] = field(
219
+ default=None, metadata={"help": "Number of transformer submodules"}
220
+ )
221
+ num_attention_heads: Optional[int] = field(default=None, metadata={"help": "Number of attention heads"})
222
+ num_layers: Optional[int] = field(default=None, metadata={"help": "Number of transformer layers"})
model/peft/utils/hub_utils.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from huggingface_hub import get_hf_file_metadata, hf_hub_url
17
+ from huggingface_hub.utils import EntryNotFoundError
18
+
19
+
20
+ def hub_file_exists(repo_id: str, filename: str, revision: str = None, repo_type: str = None) -> bool:
21
+ r"""
22
+ Checks if a file exists in a remote Hub repository.
23
+ """
24
+ url = hf_hub_url(repo_id=repo_id, filename=filename, repo_type=repo_type, revision=revision)
25
+ try:
26
+ get_hf_file_metadata(url)
27
+ return True
28
+ except EntryNotFoundError:
29
+ return False
model/peft/utils/other.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import copy
17
+ import os
18
+ import warnings
19
+
20
+ import torch
21
+
22
+
23
+ # Add or edit model card to have `library_name: peft`
24
+ def add_library_to_model_card(output_dir):
25
+ if os.path.exists(os.path.join(output_dir, "README.md")):
26
+ with open(os.path.join(output_dir, "README.md"), "r") as f:
27
+ lines = f.readlines()
28
+ # check if the first line is `---`
29
+ if len(lines) > 0 and lines[0].startswith("---"):
30
+ for i, line in enumerate(lines[1:]):
31
+ # check if line starts with `library_name`, if yes, update it
32
+ if line.startswith("library_name"):
33
+ lines[i + 1] = "library_name: peft\n"
34
+ break
35
+ elif line.startswith("---"):
36
+ # insert `library_name: peft` before the last `---`
37
+ lines.insert(i + 1, "library_name: peft\n")
38
+ break
39
+ else:
40
+ lines = ["---\n", "library_name: peft\n", "---\n"] + lines
41
+ else:
42
+ lines = ["---\n", "library_name: peft\n", "---\n"]
43
+ # write the lines back to README.md
44
+ with open(os.path.join(output_dir, "README.md"), "w") as f:
45
+ f.writelines(lines)
46
+
47
+
48
+ # needed for prefix-tuning of bloom model
49
+ def bloom_model_postprocess_past_key_value(past_key_values):
50
+ past_key_values = torch.cat(past_key_values)
51
+ total_layers, batch_size, num_attention_heads, num_virtual_tokens, head_dim = past_key_values.shape
52
+ keys = past_key_values[: total_layers // 2]
53
+ keys = keys.transpose(2, 3).reshape(
54
+ total_layers // 2, batch_size * num_attention_heads, head_dim, num_virtual_tokens
55
+ )
56
+ values = past_key_values[total_layers // 2 :]
57
+ values = values.reshape(total_layers // 2, batch_size * num_attention_heads, num_virtual_tokens, head_dim)
58
+
59
+ return tuple(zip(keys, values))
60
+
61
+
62
+ def prepare_model_for_kbit_training(model, use_gradient_checkpointing=True):
63
+ r"""
64
+ This method wraps the entire protocol for preparing a model before running a training. This includes:
65
+ 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm
66
+ head to fp32
67
+
68
+ Args:
69
+ model, (`transformers.PreTrainedModel`):
70
+ The loaded model from `transformers`
71
+ """
72
+ loaded_in_kbit = getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)
73
+
74
+ for name, param in model.named_parameters():
75
+ # freeze base model's layers
76
+ param.requires_grad = False
77
+
78
+ # cast all non INT8 parameters to fp32
79
+ for param in model.parameters():
80
+ if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16):
81
+ param.data = param.data.to(torch.float32)
82
+
83
+ if loaded_in_kbit and use_gradient_checkpointing:
84
+ # For backward compatibility
85
+ if hasattr(model, "enable_input_require_grads"):
86
+ model.enable_input_require_grads()
87
+ else:
88
+
89
+ def make_inputs_require_grad(module, input, output):
90
+ output.requires_grad_(True)
91
+
92
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
93
+
94
+ # enable gradient checkpointing for memory efficiency
95
+ model.gradient_checkpointing_enable()
96
+
97
+ return model
98
+
99
+
100
+ # For backward compatibility
101
+ def prepare_model_for_int8_training(*args, **kwargs):
102
+ warnings.warn(
103
+ "prepare_model_for_int8_training is deprecated and will be removed in a future version. Use prepare_model_for_kbit_training instead.",
104
+ FutureWarning,
105
+ )
106
+ return prepare_model_for_kbit_training(*args, **kwargs)
107
+
108
+
109
+ # copied from transformers.models.bart.modeling_bart
110
+ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
111
+ """
112
+ Shift input ids one token to the right.
113
+
114
+ Args:
115
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids
116
+ pad_token_id (`int`): The id of the `padding` token.
117
+ decoder_start_token_id (`int`): The id of the `start` token.
118
+ """
119
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
120
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
121
+ shifted_input_ids[:, 0] = decoder_start_token_id
122
+
123
+ if pad_token_id is None:
124
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
125
+ # replace possible -100 values in labels by `pad_token_id`
126
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
127
+
128
+ return shifted_input_ids
129
+
130
+
131
+ class ModulesToSaveWrapper(torch.nn.Module):
132
+ def __init__(self, module_to_save, adapter_name):
133
+ super().__init__()
134
+ self.original_module = module_to_save
135
+ self.modules_to_save = torch.nn.ModuleDict({})
136
+ self.update(adapter_name)
137
+ self.active_adapter = adapter_name
138
+
139
+ def update(self, adapter_name):
140
+ self.modules_to_save.update(torch.nn.ModuleDict({adapter_name: copy.deepcopy(self.original_module)}))
141
+
142
+ def forward(self, *args, **kwargs):
143
+ if self.active_adapter not in self.modules_to_save:
144
+ return self.original_module(*args, **kwargs)
145
+ return self.modules_to_save[self.active_adapter](*args, **kwargs)
146
+
147
+
148
+ def _get_submodules(model, key):
149
+ parent = model.get_submodule(".".join(key.split(".")[:-1]))
150
+ target_name = key.split(".")[-1]
151
+ target = model.get_submodule(key)
152
+ return parent, target, target_name
153
+
154
+
155
+ def _freeze_adapter(model, adapter_name):
156
+ for n, p in model.named_parameters():
157
+ if adapter_name in n:
158
+ p.requires_grad = False
159
+
160
+
161
+ def _set_trainable(model, adapter_name):
162
+ key_list = [key for key, _ in model.named_modules()]
163
+ for key in key_list:
164
+ target_module_found = any(key.endswith(target_key) for target_key in model.modules_to_save)
165
+ if target_module_found:
166
+ parent, target, target_name = _get_submodules(model, key)
167
+ if isinstance(target, ModulesToSaveWrapper):
168
+ target.update(adapter_name)
169
+ else:
170
+ for param in target.parameters():
171
+ param.requires_grad = True
172
+ setattr(parent, target_name, ModulesToSaveWrapper(target, adapter_name))
173
+
174
+
175
+ def _set_adapter(model, adapter_name):
176
+ for module in model.modules():
177
+ if isinstance(module, ModulesToSaveWrapper):
178
+ module.active_adapter = adapter_name
179
+
180
+
181
+ def fsdp_auto_wrap_policy(model):
182
+ import functools
183
+ import os
184
+
185
+ from accelerate import FullyShardedDataParallelPlugin
186
+ from torch.distributed.fsdp.wrap import _or_policy, lambda_auto_wrap_policy, transformer_auto_wrap_policy
187
+
188
+ from ..tuners import PrefixEncoder, PromptEmbedding, PromptEncoder
189
+
190
+ def lambda_policy_fn(module):
191
+ if (
192
+ len(list(module.named_children())) == 0
193
+ and getattr(module, "weight", None) is not None
194
+ and module.weight.requires_grad
195
+ ):
196
+ return True
197
+ return False
198
+
199
+ lambda_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=lambda_policy_fn)
200
+ transformer_wrap_policy = functools.partial(
201
+ transformer_auto_wrap_policy,
202
+ transformer_layer_cls=(
203
+ PrefixEncoder,
204
+ PromptEncoder,
205
+ PromptEmbedding,
206
+ FullyShardedDataParallelPlugin.get_module_class_from_name(
207
+ model, os.environ.get("FSDP_TRANSFORMER_CLS_TO_WRAP", "")
208
+ ),
209
+ ),
210
+ )
211
+
212
+ auto_wrap_policy = functools.partial(_or_policy, policies=[lambda_policy, transformer_wrap_policy])
213
+ return auto_wrap_policy
214
+
215
+
216
+ def transpose(weight, fan_in_fan_out):
217
+ return weight.T if fan_in_fan_out else weight
218
+
219
+
220
+ TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING = {
221
+ "t5": ["q", "v"],
222
+ "mt5": ["q", "v"],
223
+ "bart": ["q_proj", "v_proj"],
224
+ "gpt2": ["c_attn"],
225
+ "bloom": ["query_key_value"],
226
+ "blip-2": ["q", "v", "q_proj", "v_proj"],
227
+ "opt": ["q_proj", "v_proj"],
228
+ "gptj": ["q_proj", "v_proj"],
229
+ "gpt_neox": ["query_key_value"],
230
+ "gpt_neo": ["q_proj", "v_proj"],
231
+ "bert": ["query", "value"],
232
+ "roberta": ["query", "value"],
233
+ "xlm-roberta": ["query", "value"],
234
+ "electra": ["query", "value"],
235
+ "deberta-v2": ["query_proj", "value_proj"],
236
+ "deberta": ["in_proj"],
237
+ "layoutlm": ["query", "value"],
238
+ "llama": ["q_proj", "v_proj"],
239
+ "chatglm": ["query_key_value"],
240
+ "gpt_bigcode": ["c_attn"],
241
+ "mpt": ["Wqkv"],
242
+ }
243
+
244
+ COMMON_LAYERS_PATTERN = ["layers", "h", "block", "blocks"]
245
+
246
+ TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING = {
247
+ "t5": ["q", "k", "v", "o", "wi", "wo"],
248
+ "mt5": ["q", "k", "v", "o", "wi_0", "wi_1", "wo"],
249
+ "bart": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"],
250
+ # "gpt2": ["c_attn"],
251
+ # "bloom": ["query_key_value"],
252
+ "opt": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"],
253
+ # "gptj": ["q_proj", "v_proj"],
254
+ # "gpt_neox": ["query_key_value"],
255
+ # "gpt_neo": ["q_proj", "v_proj"],
256
+ # "bert": ["query", "value"],
257
+ "roberta": ["query", "key", "value", "dense"],
258
+ # "xlm-roberta": ["query", "value"],
259
+ # "electra": ["query", "value"],
260
+ "deberta-v2": ["query_proj", "key_proj", "value_proj", "dense"],
261
+ # "deberta": ["in_proj"],
262
+ # "layoutlm": ["query", "value"],
263
+ }
264
+
265
+ TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING = {
266
+ "bloom": bloom_model_postprocess_past_key_value,
267
+ }
268
+
269
+ WEIGHTS_NAME = "adapter_model.bin"
270
+ SAFETENSORS_WEIGHTS_NAME = "adapter_model.safetensors"
271
+ CONFIG_NAME = "adapter_config.json"
model/peft/utils/save_and_load.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from .config import PeftType, PromptLearningConfig
17
+
18
+
19
+ def get_peft_model_state_dict(model, state_dict=None, adapter_name="default"):
20
+ """
21
+ Get the state dict of the Peft model.
22
+
23
+ Args:
24
+ model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP,
25
+ the model should be the underlying model/unwrapped model (i.e. model.module).
26
+ state_dict (`dict`, *optional*, defaults to `None`):
27
+ The state dict of the model. If not provided, the state dict of the model
28
+ will be used.
29
+ """
30
+ config = model.peft_config[adapter_name]
31
+ if state_dict is None:
32
+ state_dict = model.state_dict()
33
+ if config.peft_type in (PeftType.LORA, PeftType.ADALORA, PeftType.MOELORA):
34
+ # to_return = lora_state_dict(model, bias=model.peft_config.bias)
35
+ # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py`
36
+ # to be used directly with the state dict which is necessary when using DeepSpeed or FSDP
37
+ bias = config.bias
38
+ if bias == "none":
39
+ to_return = {k: state_dict[k] for k in state_dict if "lora_" in k or "gating" in k}
40
+ elif bias == "all":
41
+ to_return = {k: state_dict[k] for k in state_dict if "lora_" in k or "bias" in k or "gating" in k}
42
+ elif bias == "lora_only":
43
+ to_return = {}
44
+ for k in state_dict:
45
+ if "lora_" in k:
46
+ to_return[k] = state_dict[k]
47
+ bias_name = k.split("lora_")[0] + "bias"
48
+ if bias_name in state_dict:
49
+ to_return[bias_name] = state_dict[bias_name]
50
+ if "gating" in k:
51
+ to_return[k] = state_dict[k]
52
+ else:
53
+ raise NotImplementedError
54
+ to_return = {k: v for k, v in to_return.items() if (("lora_" in k and adapter_name in k) or ("bias" in k) or ("gating" in k))}
55
+ if config.peft_type == PeftType.ADALORA:
56
+ rank_pattern = config.rank_pattern
57
+ if rank_pattern is not None:
58
+ rank_pattern = {k.replace(f".{adapter_name}", ""): v for k, v in rank_pattern.items()}
59
+ config.rank_pattern = rank_pattern
60
+ to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name)
61
+
62
+ elif config.peft_type == PeftType.ADAPTION_PROMPT:
63
+ to_return = {k: state_dict[k] for k in state_dict if k.split(".")[-1].startswith("adaption_")}
64
+ elif isinstance(config, PromptLearningConfig):
65
+ to_return = {}
66
+ if config.inference_mode:
67
+ prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight
68
+ else:
69
+ prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name)
70
+ to_return["prompt_embeddings"] = prompt_embeddings
71
+ else:
72
+ raise NotImplementedError
73
+ if model.modules_to_save is not None:
74
+ for key, value in state_dict.items():
75
+ if any(f"{module_name}.modules_to_save.{adapter_name}" in key for module_name in model.modules_to_save):
76
+ to_return[key.replace("modules_to_save.", "")] = value
77
+
78
+ to_return = {k.replace(f".{adapter_name}", ""): v for k, v in to_return.items()}
79
+ return to_return
80
+
81
+
82
+ def set_peft_model_state_dict(model, peft_model_state_dict, adapter_name="default"):
83
+ """
84
+ Set the state dict of the Peft model.
85
+
86
+ Args:
87
+ model ([`PeftModel`]): The Peft model.
88
+ peft_model_state_dict (`dict`): The state dict of the Peft model.
89
+ """
90
+ config = model.peft_config[adapter_name]
91
+ state_dict = {}
92
+ if model.modules_to_save is not None:
93
+ for key, value in peft_model_state_dict.items():
94
+ if any(module_name in key for module_name in model.modules_to_save):
95
+ for module_name in model.modules_to_save:
96
+ if module_name in key:
97
+ key = key.replace(module_name, f"{module_name}.modules_to_save.{adapter_name}")
98
+ break
99
+ state_dict[key] = value
100
+ else:
101
+ state_dict = peft_model_state_dict
102
+
103
+ if config.peft_type in (PeftType.LORA, PeftType.ADALORA, PeftType.MOELORA):
104
+ peft_model_state_dict = {}
105
+ for k, v in state_dict.items():
106
+ if "lora_A" in k:
107
+ k = k.replace("lora_A", f"lora_A.{adapter_name}")
108
+ peft_model_state_dict[k] = v
109
+ # suffix = k.split("lora_")[1]
110
+ # if "." in suffix:
111
+ # suffix_to_replace = ".".join(suffix.split(".")[1:])
112
+ # k = k.replace(suffix_to_replace, f"{adapter_name}.{suffix_to_replace}")
113
+ # else:
114
+ # k = f"{k}.{adapter_name}"
115
+ # peft_model_state_dict[k] = v
116
+ elif "lora_B" in k:
117
+ k = k.replace("lora_B", f"lora_B.{adapter_name}")
118
+ peft_model_state_dict[k] = v
119
+ elif "gating" in k:
120
+ k = k.replace("gating", f"gating.{adapter_name}")
121
+ peft_model_state_dict[k] = v
122
+ else:
123
+ peft_model_state_dict[k] = v
124
+ if config.peft_type == PeftType.ADALORA:
125
+ rank_pattern = config.rank_pattern
126
+ if rank_pattern is not None:
127
+ model.resize_modules_by_rank_pattern(rank_pattern, adapter_name)
128
+ elif isinstance(config, PromptLearningConfig) or config.peft_type == PeftType.ADAPTION_PROMPT:
129
+ peft_model_state_dict = state_dict
130
+ else:
131
+ raise NotImplementedError
132
+ load_result = model.load_state_dict(peft_model_state_dict, strict=False)
133
+ if isinstance(config, PromptLearningConfig):
134
+ model.prompt_encoder[adapter_name].embedding.load_state_dict(
135
+ {"weight": peft_model_state_dict["prompt_embeddings"]}, strict=True
136
+ )
137
+ return load_result
model/router/_init_.py ADDED
File without changes
model/router/cvr.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ import torch.nn.functional as F
3
+
4
+
5
+ class LambdaLayer(nn.Module):
6
+ def __init__(self, lambd):
7
+ super(LambdaLayer, self).__init__()
8
+ self.lambd = lambd
9
+
10
+ def forward(self, x):
11
+ return self.lambd(x)
12
+
13
+
14
+ class Block(nn.Module):
15
+ expansion = 1
16
+
17
+ def __init__(self, in_planes, planes, conv_layer, stride=1):
18
+ super(Block, self).__init__()
19
+ self.conv1 = conv_layer(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
20
+ self.bn1 = nn.BatchNorm2d(planes)
21
+ self.conv2 = conv_layer(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
22
+ self.bn2 = nn.BatchNorm2d(planes)
23
+
24
+ self.shortcut = nn.Sequential()
25
+ if stride != 1 or in_planes != planes:
26
+ diff = planes - in_planes
27
+ self.shortcut = LambdaLayer(
28
+ lambda x: F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, int(diff * 0.5), int((diff + 1) * 0.5)), "constant", 0))
29
+ def forward(self, x):
30
+ out = F.relu(self.bn1(self.conv1(x)))
31
+ out = self.bn2(self.conv2(out))
32
+ out += self.shortcut(x)
33
+ out = F.relu(out)
34
+ return out
35
+
36
+
37
+ class Router(nn.Module):
38
+ def __init__(self, block, num_blocks, num_experts=2):
39
+ super(Router, self).__init__()
40
+ self.in_planes = 16
41
+ self.conv_layer = nn.Conv2d
42
+
43
+ self.conv1 = nn.Conv2d(3, self.in_planes, kernel_size=3, stride=1, padding=1, bias=False)
44
+ self.bn1 = nn.BatchNorm2d(self.in_planes)
45
+ self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
46
+ self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
47
+ self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
48
+ self.fc = nn.Linear(64, num_experts)
49
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
50
+
51
+ def _make_layer(self, block, planes, num_blocks, stride):
52
+ planes = planes
53
+ strides = [stride] + [1] * (num_blocks - 1)
54
+ layers = []
55
+ for stride in strides:
56
+ layers.append(block(self.in_planes, planes, self.conv_layer, stride))
57
+ self.in_planes = planes * block.expansion
58
+
59
+ return nn.Sequential(*layers)
60
+
61
+ def forward(self, x):
62
+ out = F.relu(self.bn1(self.conv1(x)))
63
+ out = self.layer1(out)
64
+ out = self.layer2(out)
65
+ out = self.layer3(out)
66
+ out = self.avgpool(out)
67
+ out = out.view(out.size(0), -1)
68
+ out = self.fc(out)
69
+ return out
70
+
71
+
72
+ def build_router(**kwargs):
73
+ return Router(Block, [3, 3, 3], **kwargs)
model/router/nlpr.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+
6
+ class LambdaLayer(nn.Module):
7
+ def __init__(self, lambd):
8
+ super(LambdaLayer, self).__init__()
9
+ self.lambd = lambd
10
+
11
+ def forward(self, x):
12
+ return self.lambd(x)
13
+
14
+
15
+ class ResidualBlock(nn.Module):
16
+ expansion = 1
17
+
18
+ def __init__(self, in_planes, planes, conv_layer, stride=1):
19
+ super(ResidualBlock, self).__init__()
20
+ self.conv1 = conv_layer(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
21
+ self.bn1 = nn.BatchNorm1d(planes)
22
+ self.conv2 = conv_layer(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
23
+ self.bn2 = nn.BatchNorm1d(planes)
24
+
25
+ self.shortcut = nn.Sequential()
26
+ if stride != 1 or in_planes != planes:
27
+ diff = planes - in_planes
28
+ self.shortcut = LambdaLayer(
29
+ lambda x: F.pad(x[:, :, ::2], (0, 0, int(diff * 0.5), int((diff + 1) * 0.5)), "constant", 0))
30
+
31
+ def forward(self, x):
32
+ out = F.relu(self.bn1(self.conv1(x)))
33
+ out = self.bn2(self.conv2(out))
34
+ out += self.shortcut(x)
35
+ out = F.relu(out)
36
+ return out
37
+
38
+
39
+ class GateFunction(nn.Module):
40
+ def __init__(self, input_size, output_size):
41
+ super(GateFunction, self).__init__()
42
+ self.fc = nn.Linear(input_size, output_size)
43
+
44
+ def forward(self, x):
45
+ return F.softmax(self.fc(x), dim=-1)
46
+
47
+
48
+ class NLPRecommendationRouter(nn.Module):
49
+ def __init__(self, block, num_blocks, input_size=64, num_experts=4):
50
+ super(NLPRecommendationRouter, self).__init__()
51
+ self.in_planes = 16
52
+ self.conv_layer = nn.Conv1d
53
+
54
+ self.conv1 = nn.Conv1d(1, self.in_planes, kernel_size=3, stride=1, padding=1, bias=False)
55
+ self.bn1 = nn.BatchNorm1d(self.in_planes)
56
+ self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
57
+ self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
58
+ self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
59
+ self.avgpool = nn.AdaptiveAvgPool1d(1)
60
+
61
+ # Gate function
62
+ self.gate = GateFunction(input_size, num_experts)
63
+
64
+ def _make_layer(self, block, planes, num_blocks, stride):
65
+ strides = [stride] + [1] * (num_blocks - 1)
66
+ layers = []
67
+ for stride in strides:
68
+ layers.append(block(self.in_planes, planes, self.conv_layer, stride))
69
+ self.in_planes = planes * block.expansion
70
+ return nn.Sequential(*layers)
71
+
72
+ def forward(self, x):
73
+ out = F.relu(self.bn1(self.conv1(x)))
74
+ out = self.layer1(out)
75
+ out = self.layer2(out)
76
+ out = self.layer3(out)
77
+ out = self.avgpool(out)
78
+ out = out.view(out.size(0), -1)
79
+ out = self.gate(out)
80
+ return out.unsqueeze(1)
81
+
82
+
83
+ def build_router(**kwargs):
84
+ return NLPRecommendationRouter(ResidualBlock, [3, 3, 3], input_size=64, num_experts=4)
optims.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ class LinearWarmupCosineLRScheduler:
4
+ def __init__(
5
+ self,
6
+ optimizer,
7
+ min_lr_list,
8
+ init_lr_list,
9
+ warmup_steps=0,
10
+ warmup_start_lr_list=None,
11
+ **kwargs
12
+ ):
13
+ self.optimizer = optimizer
14
+
15
+ self.min_lr_list = min_lr_list
16
+ self.init_lr_list = init_lr_list
17
+ self.warmup_steps = warmup_steps
18
+ self.warmup_start_lr_list = warmup_start_lr_list if warmup_start_lr_list is not None else init_lr_list
19
+
20
+ def step(self, cur_step, cur_epoch, max_step):
21
+ for i, param_group in enumerate(self.optimizer.param_groups):
22
+ if cur_epoch == 0 and cur_step < self.warmup_steps:
23
+ lr = self.warmup_lr_schedule(cur_step, self.warmup_start_lr_list[i], self.init_lr_list[i])
24
+ else:
25
+ lr = self.cosine_lr_schedule(cur_step - self.warmup_steps, max_step - self.warmup_steps, self.init_lr_list[i], self.min_lr_list[i])
26
+ param_group["lr"] = lr
27
+
28
+ def cosine_lr_schedule(self, step, max_step, init_lr, min_lr):
29
+ """Decay the learning rate using cosine schedule"""
30
+ lr = (init_lr - min_lr) * 0.5 * (1 + math.cos(math.pi * step / max_step)) + min_lr
31
+ return lr
32
+
33
+ def warmup_lr_schedule(self, step, init_lr, max_lr):
34
+ """Warmup the learning rate"""
35
+ lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max(self.warmup_steps, 1))
36
+ return lr
37
+
38
+ def state_dict(self):
39
+ return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
40
+
41
+ def load_state_dict(self, state_dict):
42
+ self.__dict__.update(state_dict)
43
+