repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
import logging
import abc
import sys
logger = logging.getLogger(__name__)
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta('ABC', (), {})
class _LRSchedule(ABC):
""" Parent of all LRSchedules here. """
warn_t_total = False # is set to True for schedules where progressing beyond t_total steps doesn't make sense
def __init__(self, warmup=0.002, t_total=-1, **kw):
"""
:param warmup: what fraction of t_total steps will be used for linear warmup
:param t_total: how many training steps (updates) are planned
:param kw:
"""
super(_LRSchedule, self).__init__(**kw)
if t_total < 0:
logger.warning("t_total value of {} results in schedule not being applied".format(t_total))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
warmup = max(warmup, 0.)
self.warmup, self.t_total = float(warmup), float(t_total)
self.warned_for_t_total_at_progress = -1
def get_lr(self, step, nowarn=False):
"""
:param step: which of t_total steps we're on
:param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps
:return: learning rate multiplier for current update
"""
if self.t_total < 0:
return 1.
progress = float(step) / self.t_total
ret = self.get_lr_(progress)
# warning for exceeding t_total (only active with warmup_linear
if not nowarn and self.warn_t_total and progress > 1. and progress > self.warned_for_t_total_at_progress:
logger.warning(
"Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly."
.format(ret, self.__class__.__name__))
self.warned_for_t_total_at_progress = progress
# end warning
return ret
@abc.abstractmethod
def get_lr_(self, progress):
"""
:param progress: value between 0 and 1 (unless going beyond t_total steps) specifying training progress
:return: learning rate multiplier for current update
"""
return 1.
class ConstantLR(_LRSchedule):
def get_lr_(self, progress):
return 1.
class WarmupCosineSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Decreases learning rate from 1. to 0. over remaining `1 - warmup` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
warn_t_total = True
def __init__(self, warmup=0.002, t_total=-1, cycles=.5, **kw):
"""
:param warmup: see LRSchedule
:param t_total: see LRSchedule
:param cycles: number of cycles. Default: 0.5, corresponding to cosine decay from 1. at progress==warmup and 0 at progress==1.
:param kw:
"""
super(WarmupCosineSchedule, self).__init__(warmup=warmup, t_total=t_total, **kw)
self.cycles = cycles
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
return 0.5 * (1. + math.cos(math.pi * self.cycles * 2 * progress))
class WarmupCosineWithHardRestartsSchedule(WarmupCosineSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying
learning rate (with hard restarts).
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
super(WarmupCosineWithHardRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
assert(cycles >= 1.)
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * ((self.cycles * progress) % 1)))
return ret
class WarmupCosineWithWarmupRestartsSchedule(WarmupCosineWithHardRestartsSchedule):
"""
All training progress is divided in `cycles` (default=1.) parts of equal length.
Every part follows a schedule with the first `warmup` fraction of the training steps linearly increasing from 0. to 1.,
followed by a learning rate decreasing from 1. to 0. following a cosine curve.
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
assert(warmup * cycles < 1.)
warmup = warmup * cycles if warmup >= 0 else warmup
super(WarmupCosineWithWarmupRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
def get_lr_(self, progress):
progress = progress * self.cycles % 1.
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * progress))
return ret
class WarmupConstantSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Keeps learning rate equal to 1. after warmup.
"""
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return 1.
class WarmupLinearSchedule(_LRSchedule):
"""
Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
Linearly decreases learning rate from 1. to 0. over remaining `1 - warmup` steps.
"""
warn_t_total = True
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return max((progress - 1.) / (self.warmup - 1.), 0.)
SCHEDULES = {
None: ConstantLR,
"none": ConstantLR,
"warmup_cosine": WarmupCosineSchedule,
"warmup_constant": WarmupConstantSchedule,
"warmup_linear": WarmupLinearSchedule
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1
schedule: schedule to use for the warmup (see above).
Can be `'warmup_linear'`, `'warmup_constant'`, `'warmup_cosine'`, `'none'`, `None` or a `_LRSchedule` object (see below).
If `None` or `'none'`, learning rate is always kept constant.
Default : `'warmup_linear'`
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0, **kwargs):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
# initialize schedule object
if not isinstance(schedule, _LRSchedule):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
else:
if warmup != -1 or t_total != -1:
logger.warning("warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. "
"Please specify custom warmup and t_total in _LRSchedule object.")
defaults = dict(lr=lr, schedule=schedule,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
| 13,028 | 42 | 139 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/optimization_openai.py | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for OpenAI GPT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
import logging
from .optimization import SCHEDULES, _LRSchedule, WarmupCosineWithWarmupRestartsSchedule, \
WarmupCosineWithHardRestartsSchedule, WarmupCosineSchedule, WarmupLinearSchedule, WarmupConstantSchedule
logger = logging.getLogger(__name__)
class OpenAIAdam(Optimizer):
"""Implements Open AI version of Adam algorithm with weight decay fix.
"""
def __init__(self, params, lr=required, schedule='warmup_linear', warmup=-1, t_total=-1,
b1=0.9, b2=0.999, e=1e-8, weight_decay=0,
vector_l2=False, max_grad_norm=-1, **kwargs):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
# initialize schedule object
if not isinstance(schedule, _LRSchedule):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
else:
if warmup != -1 or t_total != -1:
logger.warning("warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. "
"Please specify custom warmup and t_total in _LRSchedule object.")
defaults = dict(lr=lr, schedule=schedule,
b1=b1, b2=b2, e=e, weight_decay=weight_decay, vector_l2=vector_l2,
max_grad_norm=max_grad_norm)
super(OpenAIAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['b1'], group['b2']
state['step'] += 1
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['e'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
# Add weight decay at the end (fixed version)
if (len(p.size()) > 1 or group['vector_l2']) and group['weight_decay'] > 0:
p.data.add_(-lr_scheduled * group['weight_decay'], p.data)
return loss
| 5,517 | 42.109375 | 134 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/__main__.py | # coding: utf8
def main():
import sys
if (len(sys.argv) != 4 and len(sys.argv) != 5) or sys.argv[1] not in [
"convert_tf_checkpoint_to_pytorch",
"convert_openai_checkpoint",
"convert_transfo_xl_checkpoint",
"convert_gpt2_checkpoint",
]:
print(
"Should be used as one of: \n"
">> `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`, \n"
">> `pytorch_pretrained_bert convert_openai_checkpoint OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG]`, \n"
">> `pytorch_pretrained_bert convert_transfo_xl_checkpoint TF_CHECKPOINT_OR_DATASET PYTORCH_DUMP_OUTPUT [TF_CONFIG]` or \n"
">> `pytorch_pretrained_bert convert_gpt2_checkpoint TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [GPT2_CONFIG]`")
else:
if sys.argv[1] == "convert_tf_checkpoint_to_pytorch":
try:
from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if len(sys.argv) != 5:
# pylint: disable=line-too-long
print("Should be used as `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`")
else:
PYTORCH_DUMP_OUTPUT = sys.argv.pop()
TF_CONFIG = sys.argv.pop()
TF_CHECKPOINT = sys.argv.pop()
convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
elif sys.argv[1] == "convert_openai_checkpoint":
from .convert_openai_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch
OPENAI_GPT_CHECKPOINT_FOLDER_PATH = sys.argv[2]
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
OPENAI_GPT_CONFIG = sys.argv[4]
else:
OPENAI_GPT_CONFIG = ""
convert_openai_checkpoint_to_pytorch(OPENAI_GPT_CHECKPOINT_FOLDER_PATH,
OPENAI_GPT_CONFIG,
PYTORCH_DUMP_OUTPUT)
elif sys.argv[1] == "convert_transfo_xl_checkpoint":
try:
from .convert_transfo_xl_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
if 'ckpt' in sys.argv[2].lower():
TF_CHECKPOINT = sys.argv[2]
TF_DATASET_FILE = ""
else:
TF_DATASET_FILE = sys.argv[2]
TF_CHECKPOINT = ""
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
TF_CONFIG = sys.argv[4]
else:
TF_CONFIG = ""
convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, TF_DATASET_FILE)
else:
try:
from .convert_gpt2_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch
except ImportError:
print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
TF_CHECKPOINT = sys.argv[2]
PYTORCH_DUMP_OUTPUT = sys.argv[3]
if len(sys.argv) == 5:
TF_CONFIG = sys.argv[4]
else:
TF_CONFIG = ""
convert_gpt2_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
if __name__ == '__main__':
main()
| 4,393 | 51.309524 | 145 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/convert_gpt2_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
from __future__ import absolute_import, division, print_function
import argparse
from io import open
import torch
from pytorch_pretrained_bert.modeling_gpt2 import (CONFIG_NAME, WEIGHTS_NAME,
GPT2Config,
GPT2Model,
load_tf_weights_in_gpt2)
def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path):
# Construct model
if gpt2_config_file == "":
config = GPT2Config()
else:
config = GPT2Config(gpt2_config_file)
model = GPT2Model(config)
# Load weights from numpy
load_tf_weights_in_gpt2(model, gpt2_checkpoint_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--gpt2_checkpoint_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
parser.add_argument("--gpt2_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture.")
args = parser.parse_args()
convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path,
args.gpt2_config_file,
args.pytorch_dump_folder_path)
| 3,017 | 40.342466 | 111 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/convert_openai_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
from __future__ import absolute_import, division, print_function
import argparse
from io import open
import torch
from pytorch_pretrained_bert.modeling_openai import (CONFIG_NAME, WEIGHTS_NAME,
OpenAIGPTConfig,
OpenAIGPTModel,
load_tf_weights_in_openai_gpt)
def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
# Construct model
if openai_config_file == "":
config = OpenAIGPTConfig()
else:
config = OpenAIGPTConfig(openai_config_file)
model = OpenAIGPTModel(config)
# Load weights from numpy
load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--openai_checkpoint_folder_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
parser.add_argument("--openai_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture.")
args = parser.parse_args()
convert_openai_checkpoint_to_pytorch(args.openai_checkpoint_folder_path,
args.openai_config_file,
args.pytorch_dump_folder_path)
| 3,106 | 41.561644 | 118 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/tokenization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
}
VOCAB_NAME = 'vocab.txt'
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]",
"[MASK]", "[BOS]", "[EOS]", "[SPEAKER1]", "[SPEAKER2]")):
"""Constructs a BertTokenizer.
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input
Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
"""
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file
def encode(self, text):
return self.convert_tokens_to_ids(self.tokenize(text))
def decode(self, tokens, skip_special_tokens=False, clean_up_tokenization_spaces=True):
text = ' '.join(self.convert_ids_to_tokens(tokens))
if clean_up_tokenization_spaces:
text = text.replace('<unk>', '')
text = text.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','
).replace(" ' ", "'").replace(
" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't"
).replace(" 's", "'s").replace(" 've", "'ve").replace(
" 're", "'re")
return text
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 18,169 | 40.770115 | 135 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/modeling.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from .file_utils import cached_path, WEIGHTS_NAME, CONFIG_NAME
#from pytorch_pretrained_bert.modeling_openai import OpenAIGPTModel, OpenAIGPTLMHead
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-pytorch_model.bin",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-config.json",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
}
BERT_CONFIG_NAME = 'bert_config.json'
TF_WEIGHTS_NAME = 'model.ckpt'
def prune_linear_layer(layer, index, dim=0):
""" Prune a linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif l[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, l[0])
except AttributeError:
print("Skipping {}".format("/".join(name)))
continue
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = output_attentions
self.keep_multihead_output = keep_multihead_output
self.multihead_output = None
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
if self.keep_multihead_output:
self.multihead_output = context_layer
self.multihead_output.retain_grad()
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if self.output_attentions:
return attention_probs, context_layer
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertAttention, self).__init__()
self.output_attentions = output_attentions
self.self = BertSelfAttention(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.output = BertSelfOutput(config)
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
for head in heads:
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
def forward(self, input_tensor, attention_mask, head_mask=None):
self_output = self.self(input_tensor, attention_mask, head_mask)
if self.output_attentions:
attentions, self_output = self_output
attention_output = self.output(self_output, input_tensor)
if self.output_attentions:
return attentions, attention_output
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertLayer, self).__init__()
self.output_attentions = output_attentions
self.attention = BertAttention(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_output = self.attention(hidden_states, attention_mask, head_mask)
if self.output_attentions:
attentions, attention_output = attention_output
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if self.output_attentions:
return attentions, layer_output
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertEncoder, self).__init__()
self.output_attentions = output_attentions
layer = BertLayer(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, head_mask=None):
all_encoder_layers = []
all_attentions = []
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, attention_mask, head_mask[i])
if self.output_attentions:
attentions, hidden_states = hidden_states
all_attentions.append(attentions)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if self.output_attentions:
return all_attentions, all_encoder_layers
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
. `bert-base-german-cased`
. `bert-large-uncased-whole-word-masking`
. `bert-large-cased-whole-word-masking`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
cache_dir = kwargs.get('cache_dir', None)
kwargs.pop('cache_dir', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
if from_tf:
# Directly load from a TensorFlow checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, BERT_CONFIG_NAME)
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
logger.error(
"Couldn't reach server at '{}' to download pretrained weights.".format(
archive_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
try:
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in PRETRAINED_CONFIG_ARCHIVE_MAP:
logger.error(
"Couldn't reach server at '{}' to download pretrained model configuration file.".format(
config_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_CONFIG_ARCHIVE_MAP.keys()),
config_file))
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = BertConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
class BertModel(BertPreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertModel, self).__init__(config)
self.output_attentions = output_attentions
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_multihead_outputs(self):
""" Gather all multi-head outputs.
Return: list (layers) of multihead module outputs with gradients
"""
return [layer.attention.self.multihead_output for layer in self.encoder.layer]
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True,
head_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand_as(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(
-1) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
head_mask=head_mask)
if self.output_attentions:
all_attentions, encoded_layers = encoded_layers
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
if self.output_attentions:
return all_attentions, encoded_layers, pooled_output
return encoded_layers, pooled_output
class BertForPreTraining(BertPreTrainedModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertForPreTraining, self).__init__(config)
self.output_attentions = output_attentions
self.bert = BertModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, head_mask=None):
outputs = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False, head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output, pooled_output = outputs
else:
sequence_output, pooled_output = outputs
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
elif self.output_attentions:
return all_attentions, prediction_scores, seq_relationship_score
return prediction_scores, seq_relationship_score
class BertForMaskedLM(BertPreTrainedModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertForMaskedLM, self).__init__(config)
self.output_attentions = output_attentions
self.bert = BertModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, head_mask=None):
#todo added by rooh
input_shape = input_ids.size() # (B, C, F)
input_ids = input_ids.view(-1, input_ids.size(-1))
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
#todo
outputs = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output, _ = outputs
else:
sequence_output, _ = outputs
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
# #todo
# prediction_scores = prediction_scores[..., :-1, :].contiguous()
# masked_lm_labels = masked_lm_labels[..., 1:].contiguous()
# #todo
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
elif self.output_attentions:
return all_attentions, prediction_scores
return prediction_scores
class BertForNextSentencePrediction(BertPreTrainedModel):
"""BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence classification head.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertForNextSentencePrediction, self).__init__(config)
self.output_attentions = output_attentions
self.bert = BertModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None, head_mask=None):
outputs = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, _, pooled_output = outputs
else:
_, pooled_output = outputs
seq_relationship_score = self.cls(pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
elif self.output_attentions:
return all_attentions, seq_relationship_score
return seq_relationship_score
class BertForSequenceClassification(BertPreTrainedModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary. Items in the batch should begin with the special "CLS" token. (see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels=2, output_attentions=False, keep_multihead_output=False):
super(BertForSequenceClassification, self).__init__(config)
self.output_attentions = output_attentions
self.num_labels = num_labels
self.bert = BertModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):
outputs = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, _, pooled_output = outputs
else:
_, pooled_output = outputs
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
elif self.output_attentions:
return all_attentions, logits
return logits
class BertForMultipleChoice(BertPreTrainedModel):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices=2, output_attentions=False, keep_multihead_output=False):
super(BertForMultipleChoice, self).__init__(config)
self.output_attentions = output_attentions
self.num_choices = num_choices
self.bert = BertModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
outputs = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, _, pooled_output = outputs
else:
_, pooled_output = outputs
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
elif self.output_attentions:
return all_attentions, reshaped_logits
return reshaped_logits
class BertMultipleChoice(BertPreTrainedModel):
def __init__(self, config, num_choices=2, output_attentions=False, keep_multihead_output=False):
super(BertMultipleChoice, self).__init__(config)
self.output_attentions = output_attentions
self.num_choices = num_choices
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, pooled_output, num_choices=2, labels=None, head_mask=None):
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
return reshaped_logits
class BertForTokenClassification(BertPreTrainedModel):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, ..., num_labels].
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels=2, output_attentions=False, keep_multihead_output=False):
super(BertForTokenClassification, self).__init__(config)
self.output_attentions = output_attentions
self.num_labels = num_labels
self.bert = BertModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):
outputs = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output, _ = outputs
else:
sequence_output, _ = outputs
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
elif self.output_attentions:
return all_attentions, logits
return logits
class BertForQuestionAnswering(BertPreTrainedModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertForQuestionAnswering, self).__init__(config)
self.output_attentions = output_attentions
self.bert = BertModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,
end_positions=None, head_mask=None):
outputs = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output, _ = outputs
else:
sequence_output, _ = outputs
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
elif self.output_attentions:
return all_attentions, start_logits, end_logits
return start_logits, end_logits
class BertLMHeadModel(BertPreTrainedModel):
def __init__(self, config, output_attentions=False):
super(BertLMHeadModel, self).__init__(config)
self.bert = BertModel(config, output_attentions=output_attentions)
self.lm_head = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, input_mask=None, lm_labels=None, token_type_ids=None, position_ids=None):
input_shape = input_ids.size() # (B, C, F)
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
hidden_states, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask,
output_all_encoded_layers=False)
lm_logits = self.lm_head(hidden_states)
if lm_labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
return loss
return lm_logits
class BertDoubleHeadsModel(BertPreTrainedModel):
def __init__(self, config, output_attentions=False):
super(BertDoubleHeadsModel, self).__init__(config)
self.bert = BertModel(config, output_attentions=output_attentions)
self.lm_head = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.multiple_choice_head = BertMultipleChoice(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, mc_token_ids, input_mask=None, lm_labels=None, mc_labels=None, token_type_ids=None,
position_ids=None):
input_shape = input_ids.size() # (B, C, F)
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
hidden_states, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask,
output_all_encoded_layers=False)
num_choices = input_shape[1]
output_shape = (input_shape) + (hidden_states.size(-1),)
hidden_states = hidden_states.view(*output_shape)
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(pooled_output, num_choices=num_choices)
losses = []
if lm_labels is not None:
#bert is not a causal language model so the lm loss can't be defined. But I used it
# and for now it works pretty well
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
return lm_logits, mc_logits
# class BertOpenAIDoubleHeadsModel(BertPreTrainedModel):
# def __init__(self, config, output_attentions=False):
# super(BertOpenAIDoubleHeadsModel, self).__init__(config)
# self.bert = BertModel(config, output_attentions=output_attentions)
# self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions)
# self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
# self.multiple_choice_head = BertMultipleChoice(config)
# self.apply(self.init_bert_weights)
#
# def forward(self, input_ids, mc_token_ids, input_mask=None, lm_labels=None, mc_labels=None, token_type_ids=None,
# position_ids=None):
# input_shape = input_ids.size() # (B, C, F)
# flat_input_ids = input_ids.view(-1, input_ids.size(-1))
# flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
# flat_attention_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
# bert_hidden_states, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask,
# output_all_encoded_layers=False)
#
# transformer_hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
#
# num_choices = input_shape[1]
# # output_shape = (input_shape) + (hidden_states.size(-1),)
# # hidden_states = hidden_states.view(*output_shape)
#
# lm_logits = self.lm_head(transformer_hidden_states)
# mc_logits = self.multiple_choice_head(pooled_output, num_choices=num_choices)
# losses = []
# if lm_labels is not None:
# shift_logits = lm_logits[..., :-1, :].contiguous()
# shift_labels = lm_labels[..., 1:].contiguous()
# loss_fct = CrossEntropyLoss(ignore_index=-1)
# losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
# if mc_labels is not None:
# loss_fct = CrossEntropyLoss()
# losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
# if losses:
# return losses
# return lm_logits, mc_logits | 85,247 | 51.524954 | 187 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/modeling_gpt2.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
from .modeling import BertLayerNorm as LayerNorm
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin"}
PRETRAINED_CONFIG_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-config.json"}
def load_tf_weights_in_gpt2(model, gpt2_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'w' or l[0] == 'g':
pointer = getattr(pointer, 'weight')
elif l[0] == 'b':
pointer = getattr(pointer, 'bias')
elif l[0] == 'wpe' or l[0] == 'wte':
pointer = getattr(pointer, l[0])
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class GPT2Config(object):
"""Configuration class to store the configuration of a `GPT2Model`.
"""
def __init__(
self,
vocab_size_or_config_json_file=50257,
n_special=0,
n_positions=1024,
n_ctx=1024,
n_embd=768,
n_layer=12,
n_head=12,
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
predict_special_tokens=True
):
"""Constructs GPT2Config.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
layer_norm_epsilon: epsilon to use in the layer norm layers
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
predict_special_tokens: should we predict special tokens (when the model has a LM head)
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.n_special = n_special
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.predict_special_tokens = predict_special_tokens
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@property
def total_tokens_embeddings(self):
return self.vocab_size + self.n_special
@classmethod
def from_dict(cls, json_object):
"""Constructs a `GPT2Config` from a Python dictionary of parameters."""
config = GPT2Config(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `GPT2Config` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False, output_attentions=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.output_attentions = output_attentions
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns-nd:ns, :ns]
w = w * b - 1e4 * (1 - b)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
if self.output_attentions:
return w, torch.matmul(w, v)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
a = self._attn(query, key, value)
if self.output_attentions:
attentions, a = a
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
if self.output_attentions:
return attentions, a, present
return a, present
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False, output_attentions=False):
super(Block, self).__init__()
nx = config.n_embd
self.output_attentions = output_attentions
self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale, output_attentions)
self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
def forward(self, x, layer_past=None):
output_attn = self.attn(self.ln_1(x), layer_past=layer_past)
if self.output_attentions:
attentions, a, present = output_attn
else:
a, present = output_attn
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
if self.output_attentions:
return attentions, x, present
return x, present
class GPT2LMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model_embeddings_weights, config):
super(GPT2LMHead, self).__init__()
self.n_embd = config.n_embd
self.vocab_size = config.vocab_size
self.predict_special_tokens = config.predict_special_tokens
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights, predict_special_tokens=True):
self.predict_special_tokens = predict_special_tokens
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, hidden_state):
lm_logits = self.decoder(hidden_state)
if not self.predict_special_tokens:
lm_logits = lm_logits[..., :self.vocab_size]
return lm_logits
class GPT2MultipleChoiceHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, config):
super(GPT2MultipleChoiceHead, self).__init__()
self.n_embd = config.n_embd
self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation
self.linear = nn.Linear(config.n_embd, 1)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, num_choices, seq_length, hidden_size)
# mc_token_ids (bsz, num_choices)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
# (bsz, num_choices, 1, hidden_size)
multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
# (bsz, num_choices, hidden_size)
multiple_choice_h = self.dropout(multiple_choice_h.transpose(1, 2)).transpose(1, 2)
multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
# (bsz, num_choices)
return multiple_choice_logits
class GPT2PreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(GPT2PreTrainedModel, self).__init__()
if not isinstance(config, GPT2Config):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `GPT2Config`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a GPT2PreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `gpt2`
- a path or url to a pretrained model archive containing:
. `gpt2_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a GPT2Model instance
- a path or url to a pretrained model archive containing:
. `gpt2_config.json` a configuration file for the model
. a TensorFlow checkpoint with trained weights
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific GPT2 class
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
cache_dir = kwargs.get('cache_dir', None)
kwargs.pop('cache_dir', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
num_special_tokens = kwargs.get('num_special_tokens', None)
kwargs.pop('num_special_tokens', None)
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
archive_file, config_file
)
)
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = GPT2Config.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint (stored as NumPy array)
return load_tf_weights_in_gpt2(model, resolved_archive_file)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if key.endswith(".g"):
new_key = key[:-2] + ".weight"
elif key.endswith(".b"):
new_key = key[:-2] + ".bias"
elif key.endswith(".w"):
new_key = key[:-2] + ".weight"
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
start_model = model
if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
start_model = model.transformer
load(start_model, prefix="")
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
)
# Add additional embeddings for special tokens if needed
# This step also make sure we are still sharing the output and input embeddings after loading weights
model.set_num_special_tokens(num_special_tokens if num_special_tokens is not None else config.n_special)
return model
class GPT2Model(GPT2PreTrainedModel):
"""OpenAI GPT-2 model ("Language Models are Unsupervised Multitask Learners").
GPT-2 use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a GPT2Config class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
(key and values in the attention blocks) to speed up sequential decoding
(this is the presents output of the model, cf. below).
Outputs a tuple consisting of:
`hidden_states`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
(or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
`presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
torch.FloatTensors. They can be reused to speed up sequential decoding.
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_gpt2.GPT2Config()
model = modeling_gpt2.GPT2Model(config)
hidden_states, presents = model(input_ids)
```
"""
def __init__(self, config, output_attentions=False):
super(GPT2Model, self).__init__(config)
self.output_attentions = output_attentions
self.wte = nn.Embedding(config.total_tokens_embeddings, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
block = Block(config.n_ctx, config, scale=True, output_attentions=output_attentions)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens):
" Update input embeddings with new embedding matrice if needed "
if self.config.n_special == num_special_tokens:
return
# Update config
self.config.n_special = num_special_tokens
# Build new embeddings and initialize all new embeddings (in particular the special tokens)
old_embed = self.wte
self.wte = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
self.wte.to(old_embed.weight.device)
self.init_weights(self.wte)
# Copy word embeddings from the previous weights
self.wte.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]
def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
presents = []
all_attentions = []
for block, layer_past in zip(self.h, past):
if self.output_attentions:
attentions, hidden_states, present = block(hidden_states, layer_past)
all_attentions.append(attentions)
else:
hidden_states, present = block(hidden_states, layer_past)
presents.append(present)
hidden_states = self.ln_f(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
if self.output_attentions:
return all_attentions, hidden_states.view(*output_shape), presents
return hidden_states.view(*output_shape), presents
class GPT2LMHeadModel(GPT2PreTrainedModel):
"""OpenAI GPT-2 model with a Language Modeling head ("Language Models are Unsupervised Multitask Learners").
Params:
config: a GPT2Config class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
(key and values in the attention blocks) to speed up sequential decoding
(this is the presents output of the model, cf. below).
Outputs:
if `lm_labels` is not `None`:
Outputs the language modeling loss.
else a tuple:
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, config.vocab_size]
(or more generally [d_1, ..., d_n, config.vocab_size] were d_1 ... d_n are the dimension of input_ids)
`presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
torch.FloatTensors. They can be reused to speed up sequential decoding.
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_gpt2.GPT2Config()
model = modeling_gpt2.GPT2LMHeadModel(config)
lm_logits, presents = model(input_ids)
```
"""
def __init__(self, config, output_attentions=False):
super(GPT2LMHeadModel, self).__init__(config)
self.transformer = GPT2Model(config, output_attentions=output_attentions)
self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.wte.weight, predict_special_tokens=predict_special_tokens)
def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None, past=None):
transformer_output = self.transformer(input_ids, position_ids, token_type_ids, past)
if self.transformer.output_attentions:
all_attentions, hidden_states, presents = transformer_output
else:
hidden_states, presents = transformer_output
lm_logits = self.lm_head(hidden_states)
if lm_labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
return loss
if self.transformer.output_attentions:
return all_attentions, lm_logits, presents
return lm_logits, presents
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
"""OpenAI GPT-2 model with a Language Modeling and a Multiple Choice head ("Language Models are Unsupervised Multitask Learners").
Params:
config: a GPT2Config class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
indices selected in the range [0, config.vocab_size[
`mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with indices selected in [-1, 0, ..., config.vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., config.vocab_size]
`multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
`past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
(key and values in the attention blocks) to speed up sequential decoding
(this is the presents output of the model, cf. below).
Outputs:
if `lm_labels` and `multiple_choice_labels` are not `None`:
Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
else: a tuple with
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, config.vocab_size]
`multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
`presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
torch.FloatTensors. They can be reused to speed up sequential decoding.
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]]) # (bsz, number of choice, seq length)
mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
config = modeling_gpt2.GPT2Config()
model = modeling_gpt2.GPT2DoubleHeadsModel(config)
lm_logits, multiple_choice_logits, presents = model(input_ids, mc_token_ids)
```
"""
def __init__(self, config, output_attentions=False):
super(GPT2DoubleHeadsModel, self).__init__(config)
self.transformer = GPT2Model(config, output_attentions=output_attentions)
self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
self.multiple_choice_head = GPT2MultipleChoiceHead(config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.wte.weight, predict_special_tokens=predict_special_tokens)
def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None, past=None):
transformer_output = self.transformer(input_ids, position_ids, token_type_ids, past)
if self.transformer.output_attentions:
all_attentions, hidden_states, presents = transformer_output
else:
hidden_states, presents = transformer_output
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
losses = []
if lm_labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
if self.transformer.output_attentions:
return all_attentions, lm_logits, mc_logits, presents
return lm_logits, mc_logits, presents
| 38,587 | 45.944039 | 146 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/modeling_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
from .modeling import BertLayerNorm as LayerNorm
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin"}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json"}
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
"""
import re
import numpy as np
print("Loading weights...")
names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8'))
shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8'))
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
# This was used when we had a single embedding matrix for positions and tokens
# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
# del init_params[1]
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.tokens_embed.weight.shape == init_params[1].shape
assert model.positions_embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
e.args += (model.positions_embed.weight.shape, init_params[0].shape)
raise
model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
model.positions_embed.weight.data = torch.from_numpy(init_params[0])
names.pop(0)
# Pop position and token embedding arrays
init_params.pop(0)
init_params.pop(0)
for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'g':
pointer = getattr(pointer, 'weight')
elif l[0] == 'b':
pointer = getattr(pointer, 'bias')
elif l[0] == 'w':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def swish(x):
return x * torch.sigmoid(x)
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu}
class OpenAIGPTConfig(object):
"""Configuration class to store the configuration of a `OpenAIGPTModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file=40478,
n_special=0,
n_positions=512,
n_ctx=512,
n_embd=768,
n_layer=12,
n_head=12,
afn="gelu",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
predict_special_tokens=True
):
"""Constructs OpenAIGPTConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `OpenAIGPTModel` or a configuration json file.
n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
afn: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
layer_norm_epsilon: epsilon to use in the layer norm layers
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
predict_special_tokens: should we predict special tokens (when the model has a LM head)
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.n_special = n_special
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.afn = afn
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.predict_special_tokens = predict_special_tokens
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@property
def total_tokens_embeddings(self):
return self.vocab_size + self.n_special
@classmethod
def from_dict(cls, json_object):
"""Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters."""
config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `OpenAIGPTConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class Conv1D(nn.Module):
def __init__(self, nf, rf, nx):
super(Conv1D, self).__init__()
self.rf = rf
self.nf = nf
if rf == 1: # faster 1x1 conv
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = Parameter(w)
self.bias = Parameter(torch.zeros(nf))
else: # was used to train LM
raise NotImplementedError
def forward(self, x):
if self.rf == 1:
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
else:
raise NotImplementedError
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False, output_attentions=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.output_attentions = output_attentions
self.c_attn = Conv1D(n_state * 3, 1, nx) # (out_channels, size_conv, in_channels)
self.c_proj = Conv1D(n_state, 1, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.bias + -1e9 * (1 - self.bias) # TF implem method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.bias[:, :, : w.size(-2), : w.size(-1)]
w = w * b + -1e9 * (1 - b)
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
if self.output_attentions:
return w, torch.matmul(w, v)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
a = self._attn(query, key, value)
if self.output_attentions:
attentions, a = a
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
if self.output_attentions:
return attentions, a
return a
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, 1, nx)
self.c_proj = Conv1D(nx, 1, n_state)
self.act = ACT_FNS[config.afn]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False, output_attentions=False):
super(Block, self).__init__()
nx = config.n_embd
self.output_attentions = output_attentions
self.attn = Attention(nx, n_ctx, config, scale, output_attentions)
self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
def forward(self, x):
a = self.attn(x)
if self.output_attentions:
attentions, a = a
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
if self.output_attentions:
return attentions, h
return h
class OpenAIGPTLMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model_embeddings_weights, config):
super(OpenAIGPTLMHead, self).__init__()
self.n_embd = config.n_embd
self.vocab_size = config.vocab_size
self.predict_special_tokens = config.predict_special_tokens
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights, predict_special_tokens=True):
self.predict_special_tokens = predict_special_tokens
embed_shape = model_embeddings_weights.shape
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, hidden_state):
lm_logits = self.decoder(hidden_state)
if not self.predict_special_tokens:
lm_logits = lm_logits[..., :self.vocab_size]
return lm_logits
class OpenAIGPTMultipleChoiceHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, config):
super(OpenAIGPTMultipleChoiceHead, self).__init__()
self.n_embd = config.n_embd
self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation
self.linear = nn.Linear(config.n_embd, 1)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, num_choices, seq_length, hidden_size)
# mc_token_ids (bsz, num_choices)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
# mc_token_ids (bsz, num_choices, 1, hidden_size)
multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
# multiple_choice_h (bsz, num_choices, hidden_size)
multiple_choice_h = self.dropout(multiple_choice_h.transpose(1, 2)).transpose(1, 2)
multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
# (bsz, num_choices)
return multiple_choice_logits
class OpenAIGPTPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(OpenAIGPTPreTrainedModel, self).__init__()
if not isinstance(config, OpenAIGPTConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `OpenAIGPTConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, num_special_tokens=None, *inputs, **kwargs):
"""
Instantiate a OpenAIGPTPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `openai-gpt`
- a path or url to a pretrained model archive containing:
. `openai_gpt_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a OpenAIGPTModel instance
- a path or url to a pretrained model archive containing:
. `openai-gpt-config.json` a configuration file for the model
. a series of NumPy files containing OpenAI TensorFlow trained weights
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific OpenAI-GPT class
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
cache_dir = kwargs.get('cache_dir', None)
kwargs.pop('cache_dir', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
archive_file, config_file
)
)
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = OpenAIGPTConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint (stored as NumPy array)
return load_tf_weights_in_openai_gpt(model, resolved_archive_file)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if key.endswith(".g"):
new_key = key[:-2] + ".weight"
elif key.endswith(".b"):
new_key = key[:-2] + ".bias"
elif key.endswith(".w"):
new_key = key[:-2] + ".weight"
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
start_model = model
if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
start_model = model.transformer
load(start_model, prefix="")
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
)
# Add additional embeddings for special tokens if needed
# This step also make sure we are still sharing the output and input embeddings after loading weights
model.set_num_special_tokens(num_special_tokens if num_special_tokens is not None else config.n_special)
return model
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
"""OpenAI GPT model ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a OpenAIGPTConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
Outputs:
`hidden_states`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
(or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_openai.OpenAIGPTConfig()
model = modeling_openai.OpenAIGPTModel(config)
hidden_states = model(input_ids)
```
"""
def __init__(self, config, output_attentions=False):
super(OpenAIGPTModel, self).__init__(config)
self.output_attentions = output_attentions
self.tokens_embed = nn.Embedding(config.total_tokens_embeddings, config.n_embd)
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
block = Block(config.n_ctx, config, scale=True, output_attentions=output_attentions)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens):
" Update input embeddings with new embedding matrice if needed "
if self.config.n_special == num_special_tokens:
return
# Update config
self.config.n_special = num_special_tokens
# Build new embeddings and initialize all new embeddings (in particular the special tokens)
old_embed = self.tokens_embed
self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
self.tokens_embed.to(old_embed.weight.device)
self.init_weights(self.tokens_embed)
# Copy word embeddings from the previous weights
self.tokens_embed.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]
def forward(self, input_ids, position_ids=None, token_type_ids=None, token_emotion_ids=None, token_action_ids=None):
if position_ids is None:
# This was used when we had a single embedding matrice from position and token embeddings
# start = self.config.vocab_size + self.config.n_special
# end = start + input_ids.size(-1)
# position_ids = torch.arange(start, end, dtype=torch.long, device=input_ids.device)
position_ids = torch.arange(input_ids.size(-1), dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
position_ids = position_ids.view(-1, position_ids.size(-1))
inputs_embeds = self.tokens_embed(input_ids)
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.tokens_embed(token_type_ids)
else:
token_type_embeds = 0
if token_emotion_ids is not None:
token_emotion_ids = token_emotion_ids.view(-1, token_emotion_ids.size(-1))
token_emotion_embeds = self.tokens_embed(token_emotion_ids)
else:
token_emotion_embeds = 0
if token_action_ids is not None:
token_action_ids = token_action_ids.view(-1, token_action_ids.size(-1))
token_action_embeds = self.tokens_embed(token_action_ids)
else:
token_action_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds + token_emotion_embeds + token_action_embeds
hidden_states = self.drop(hidden_states)
all_attentions = []
for block in self.h:
if self.output_attentions:
attentions, hidden_states = block(hidden_states)
all_attentions.append(attentions)
else:
hidden_states = block(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
if self.output_attentions:
return all_attentions, hidden_states.view(*output_shape)
return hidden_states.view(*output_shape)
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
"""OpenAI GPT model with a Language Modeling head ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a OpenAIGPTConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `lm_labels` is not `None`:
Outputs the language modeling loss.
else:
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, total_tokens_embeddings]
(or more generally [d_1, ..., d_n, total_tokens_embeddings] were d_1 ... d_n are the dimension of input_ids)
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
config = modeling_openai.OpenAIGPTConfig()
model = modeling_openai.OpenAIGPTLMHeadModel(config)
lm_logits = model(input_ids)
```
"""
def __init__(self, config, output_attentions=False):
super(OpenAIGPTLMHeadModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight,
predict_special_tokens=predict_special_tokens)
def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
if self.transformer.output_attentions:
all_attentions, hidden_states = hidden_states
lm_logits = self.lm_head(hidden_states)
if lm_labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
return loss
if self.transformer.output_attentions:
return all_attentions, lm_logits
return lm_logits
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
"""OpenAI GPT model with a Language Modeling and a Multiple Choice head ("Improving Language Understanding by Generative Pre-Training").
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
Special tokens need to be trained during the fine-tuning if you use them.
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
The embeddings are ordered as follow in the token embeddings matrice:
[0, ----------------------
... -> word embeddings
config.vocab_size - 1, ______________________
config.vocab_size,
... -> special embeddings
config.vocab_size + config.n_special - 1] ______________________
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
total_tokens_embeddings = config.vocab_size + config.n_special
You should use the associate indices to index the embeddings.
Params:
config: a OpenAIGPTConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
indices selected in the range [0, total_tokens_embeddings[
`mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
with the position indices (selected in the range [0, config.n_positions - 1[.
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
You can use it to add a third type of embedding to each input token in the sequence
(the previous two being the word and position embeddings).
The input, position and token_type embeddings are summed inside the Transformer before the first
self-attention block.
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with indices selected in [-1, 0, ..., total_tokens_embeddings]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., total_tokens_embeddings]
`multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `lm_labels` and `multiple_choice_labels` are not `None`:
Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
else: a tuple with
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, total_tokens_embeddings]
`multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]]) # (bsz, number of choice, seq length)
mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
config = modeling_openai.OpenAIGPTConfig()
model = modeling_openai.OpenAIGPTDoubleHeadsModel(config)
lm_logits, multiple_choice_logits = model(input_ids, mc_token_ids)
```
"""
def __init__(self, config, output_attentions=False):
super(OpenAIGPTDoubleHeadsModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.multiple_choice_head = OpenAIGPTMultipleChoiceHead(config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight,
predict_special_tokens=predict_special_tokens)
def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None,
token_type_ids=None, token_emotion_ids=None, token_action_ids=None, position_ids=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids, token_emotion_ids, token_action_ids)
if self.transformer.output_attentions:
all_attentions, hidden_states = hidden_states
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
losses = []
if lm_labels is not None: # when lm_labels is all -1 it means it's not the correct candidate which in turn means it's a negative example and we ignore it because ignore_index=-1
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
if self.transformer.output_attentions:
return all_attentions, lm_logits, mc_logits
return lm_logits, mc_logits
###############################################################################
class OpenAIGPTEmotionChoiceHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, config):
super(OpenAIGPTEmotionChoiceHead, self).__init__()
self.n_embd = config.n_embd
self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation
num_emotions = 7
self.linear = nn.Linear(config.n_embd, num_emotions)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, seq_length, hidden_size)
# mc_token_ids (bsz,)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, hidden_states.size(-1))
# mc_token_ids (bsz, 1, hidden_size)
multiple_choice_h = hidden_states.gather(1, mc_token_ids).squeeze(1)
# multiple_choice_h (bsz, hidden_size)
multiple_choice_h = self.dropout(multiple_choice_h)
multiple_choice_logits = self.linear(multiple_choice_h)
# (bsz, num_choices)
return multiple_choice_logits
class OpenAIGPTBatchedEmotionChoiceHead(nn.Module):
def __init__(self, config):
super(OpenAIGPTBatchedEmotionChoiceHead, self).__init__()
self.n_embd = config.n_embd
self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation
num_emotions = 7
self.linear = nn.Linear(config.n_embd, num_emotions)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, num_choices, seq_length, hidden_size)
# mc_token_ids (bsz, num_choices)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
# mc_token_ids (bsz, num_choices, 1, hidden_size)
multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
# multiple_choice_h (bsz, num_choices, hidden_size)
multiple_choice_h = self.dropout(multiple_choice_h.transpose(1, 2)).transpose(1, 2)
multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
# (bsz, num_choices)
return multiple_choice_logits
class OpenAIGPTEmotionHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, config):
super(OpenAIGPTEmotionHead, self).__init__()
self.n_embd = config.n_embd
self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation
num_classes = 2 # this probably need to be 1
self.linear = nn.Linear(config.n_embd, num_classes)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, seq_length, hidden_size)
# mc_token_ids (bsz,)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, hidden_states.size(-1))
# mc_token_ids (bsz, 1, hidden_size)
multiple_choice_h = hidden_states.gather(1, mc_token_ids).squeeze(1)
# multiple_choice_h (bsz, hidden_size)
multiple_choice_h = self.dropout(multiple_choice_h)
multiple_choice_logits = self.linear(multiple_choice_h)
# (bsz, num_choices)
return multiple_choice_logits
class OpenAIGPTDoubleHeadLMEmotionRecognitionModel(OpenAIGPTPreTrainedModel):
def __init__(self, config, output_attentions=False):
super(OpenAIGPTDoubleHeadLMEmotionRecognitionModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.emotion_choice_head = OpenAIGPTEmotionChoiceHead(config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight,
predict_special_tokens=predict_special_tokens)
def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None,
token_emotion_ids=None, position_ids=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids, token_emotion_ids)
if self.transformer.output_attentions:
all_attentions, hidden_states = hidden_states
lm_logits = self.lm_head(hidden_states)
mc_logits = self.emotion_choice_head(hidden_states, mc_token_ids)
losses = []
if lm_labels is not None: # when lm_labels is all -1 it means it's not the correct candidate which in turn means it's a negative example and we ignore it because ignore_index=-1
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
if mc_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
# loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
if self.transformer.output_attentions:
return all_attentions, lm_logits, mc_logits
return lm_logits, mc_logits
class OpenAIGPTForEmotionDetection(OpenAIGPTPreTrainedModel):
def __init__(self, config, output_attentions=False):
super(OpenAIGPTForEmotionDetection, self).__init__(config)
self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.emotion_classification_head = OpenAIGPTEmotionHead(config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight,
predict_special_tokens=predict_special_tokens)
def forward(self, input_ids, mc_token_ids, lm_labels=None,
mc_labels=None, token_type_ids=None, position_ids=None, token_emotion_ids=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids, token_emotion_ids=token_emotion_ids)
if self.transformer.output_attentions:
all_attentions, hidden_states = hidden_states
lm_logits = self.lm_head(hidden_states)
mc_logits = self.emotion_classification_head(hidden_states, mc_token_ids)
losses = []
if lm_labels is not None: # when lm_labels is all -1 it means it's not the correct candidate which in turn means it's a negative example and we ignore it because ignore_index=-1
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
if mc_labels is not None:
# loss_fct = CrossEntropyLoss(ignore_index=-1)
loss_fct = CrossEntropyLoss()
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
if losses:
return losses
if self.transformer.output_attentions:
return all_attentions, lm_logits, mc_logits
return lm_logits, mc_logits
class OpenAIGPTMultiHeadModel(OpenAIGPTPreTrainedModel):
def __init__(self, config, output_attentions=False):
super(OpenAIGPTMultiHeadModel, self).__init__(config)
self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions)
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
self.emotion_choice_head = OpenAIGPTBatchedEmotionChoiceHead(config)
self.sentence_choice_head = OpenAIGPTMultipleChoiceHead(config)
self.apply(self.init_weights)
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight,
predict_special_tokens=predict_special_tokens)
def forward(self, input_ids, ec_token_ids, sc_token_ids, lm_labels=None,
ec_labels=None, sc_labels=None, token_type_ids=None,
token_emotion_ids=None, token_action_ids=None,
position_ids=None):
hidden_states = self.transformer(input_ids, position_ids, token_type_ids, token_emotion_ids)
if self.transformer.output_attentions:
all_attentions, hidden_states = hidden_states
lm_logits = self.lm_head(hidden_states)
emotion_logits = self.emotion_choice_head(hidden_states, ec_token_ids)
sentence_logits = self.sentence_choice_head(hidden_states, sc_token_ids)
losses = []
if lm_labels is not None: # when lm_labels is all -1 it means it's not the correct candidate which in turn means it's a negative example and we ignore it because ignore_index=-1
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
if ec_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(emotion_logits.view(-1, emotion_logits.size(-1)), ec_labels.view(-1)))
if sc_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
losses.append(loss_fct(sentence_logits.view(-1, sentence_logits.size(-1)), sc_labels.view(-1)))
if losses:
return losses
if self.transformer.output_attentions:
return all_attentions, lm_logits, emotion_logits, sentence_logits
return lm_logits, emotion_logits, sentence_logits
| 53,002 | 47.626606 | 186 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/convert_transfo_xl_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Transformer XL checkpoint and datasets."""
from __future__ import absolute_import, division, print_function
import argparse
import os
import sys
from io import open
import torch
import pytorch_pretrained_bert.tokenization_transfo_xl as data_utils
from pytorch_pretrained_bert.modeling_transfo_xl import (CONFIG_NAME,
WEIGHTS_NAME,
TransfoXLConfig,
TransfoXLLMHeadModel,
load_tf_weights_in_transfo_xl)
from pytorch_pretrained_bert.tokenization_transfo_xl import (CORPUS_NAME,
VOCAB_NAME)
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
data_utils.Vocab = data_utils.TransfoXLTokenizer
data_utils.Corpus = data_utils.TransfoXLCorpus
sys.modules['data_utils'] = data_utils
sys.modules['vocabulary'] = data_utils
def convert_transfo_xl_checkpoint_to_pytorch(tf_checkpoint_path,
transfo_xl_config_file,
pytorch_dump_folder_path,
transfo_xl_dataset_file):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(transfo_xl_dataset_file, "rb") as fp:
corpus = pickle.load(fp, encoding="latin1")
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
pytorch_vocab_dump_path = pytorch_dump_folder_path + '/' + VOCAB_NAME
print("Save vocabulary to {}".format(pytorch_vocab_dump_path))
corpus_vocab_dict = corpus.vocab.__dict__
torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
corpus_dict_no_vocab = corpus.__dict__
corpus_dict_no_vocab.pop('vocab', None)
pytorch_dataset_dump_path = pytorch_dump_folder_path + '/' + CORPUS_NAME
print("Save dataset to {}".format(pytorch_dataset_dump_path))
torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
config_path = os.path.abspath(transfo_xl_config_file)
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting Transformer XL checkpoint from {} with config at {}".format(tf_path, config_path))
# Initialise PyTorch model
if transfo_xl_config_file == "":
config = TransfoXLConfig()
else:
config = TransfoXLConfig(transfo_xl_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = TransfoXLLMHeadModel(config)
model = load_tf_weights_in_transfo_xl(model, config, tf_path)
# Save pytorch-model
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path)))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path)))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path",
default = None,
type = str,
required = True,
help = "Path to the folder to store the PyTorch model or dataset/vocab.")
parser.add_argument("--tf_checkpoint_path",
default = "",
type = str,
help = "An optional path to a TensorFlow checkpoint path to be converted.")
parser.add_argument("--transfo_xl_config_file",
default = "",
type = str,
help = "An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--transfo_xl_dataset_file",
default = "",
type = str,
help = "An optional dataset file to be converted in a vocabulary.")
args = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file)
| 5,671 | 47.478632 | 121 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import sys
import json
import logging
import os
import shutil
import tempfile
import fnmatch
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'pytorch_pretrained_bert')
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(
os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
default_cache_path)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode('utf-8')
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(output_string, 'utf-8') # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 9,347 | 32.385714 | 98 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BERT checkpoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import argparse
import tensorflow as tf
import torch
import numpy as np
from pytorch_pretrained_bert.modeling import BertConfig, BertForPreTraining, load_tf_weights_in_bert
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
# Initialise PyTorch model
config = BertConfig.from_json_file(bert_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = BertForPreTraining(config)
# Load weights from tf checkpoint
load_tf_weights_in_bert(model, tf_checkpoint_path)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--tf_checkpoint_path",
default = None,
type = str,
required = True,
help = "Path the TensorFlow checkpoint path.")
parser.add_argument("--bert_config_file",
default = None,
type = str,
required = True,
help = "The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--pytorch_dump_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path,
args.bert_config_file,
args.pytorch_dump_path)
| 2,593 | 37.716418 | 101 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/modeling_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import collections
import sys
from io import open
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling import BertLayerNorm as LayerNorm
from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax, sample_logits
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-pytorch_model.bin",
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-config.json",
}
TF_WEIGHTS_NAME = 'model.ckpt'
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, 'transformer'):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update({
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias})
for i, (out_l, proj_l, tie_proj) in enumerate(zip(
model.crit.out_layers,
model.crit.out_projs,
config.tie_projs)):
layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i
if config.tie_weight:
tf_to_pt_map.update({
layer_str + 'b': out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({
layer_str + 'lookup_table': out_l.weight,
layer_str + 'b': out_l.bias})
if not tie_proj:
tf_to_pt_map.update({
layer_str + 'proj': proj_l
})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = "transformer/adaptive_embed/cutoff_%d/" % i
tf_to_pt_map.update({
layer_str + 'lookup_table': embed_l.weight,
layer_str + 'proj_W': proj_l
})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = "transformer/layer_%d/" % i
tf_to_pt_map.update({
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
})
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({
'transformer/r_r_bias': r_r_list,
'transformer/r_w_bias': r_w_list})
return tf_to_pt_map
def load_tf_weights_in_transfo_xl(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_to_pytorch_map(model, config)
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
for name, pointer in tf_to_pt_map.items():
assert name in tf_weights
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if 'kernel' in name or 'proj' in name:
array = np.transpose(array)
if ('r_r_bias' in name or 'r_w_bias' in name) and len(pointer) > 1:
# Here we will split the TF weigths
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
print("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + '/Adam', None)
tf_weights.pop(name + '/Adam_1', None)
print("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
return model
class TransfoXLConfig(object):
"""Configuration class to store the configuration of a `TransfoXLModel`.
"""
def __init__(self,
vocab_size_or_config_json_file=267735,
cutoffs=[20000, 40000, 200000],
d_model=1024,
d_embed=1024,
n_head=16,
d_head=64,
d_inner=4096,
div_val=4,
pre_lnorm=False,
n_layer=18,
tgt_len=128,
ext_len=0,
mem_len=1600,
clamp_len=1000,
same_length=True,
proj_share_all_but_first=True,
attn_type=0,
sample_softmax=-1,
adaptive=True,
tie_weight=True,
dropout=0.1,
dropatt=0.0,
untie_r=True,
init="normal",
init_range=0.01,
proj_init_std=0.01,
init_std=0.02):
"""Constructs TransfoXLConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `TransfoXLModel` or a configuration json file.
cutoffs: cutoffs for the adaptive softmax
d_model: Dimensionality of the model's hidden states.
d_embed: Dimensionality of the embeddings
d_head: Dimensionality of the model's heads.
div_val: divident value for adapative input and softmax
pre_lnorm: apply LayerNorm to the input instead of the output
d_inner: Inner dimension in FF
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
tgt_len: number of tokens to predict
ext_len: length of the extended context
mem_len: length of the retained previous heads
same_length: use the same attn length for all tokens
proj_share_all_but_first: True to share all but first projs, False not to share.
attn_type: attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al.
clamp_len: use the same pos embeddings after clamp_len
sample_softmax: number of samples in sampled softmax
adaptive: use adaptive softmax
tie_weight: tie the word embedding and softmax weights
dropout: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
dropatt: The dropout ratio for the attention probabilities.
untie_r: untie relative position biases
embd_pdrop: The dropout ratio for the embeddings.
init: parameter initializer to use
init_range: parameters initialized by U(-init_range, init_range).
proj_init_std: parameters initialized by N(0, init_std)
init_std: parameters initialized by N(0, init_std)
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.n_token = vocab_size_or_config_json_file
self.cutoffs = []
self.cutoffs.extend(cutoffs)
self.tie_weight = tie_weight
if proj_share_all_but_first:
self.tie_projs = [False] + [True] * len(self.cutoffs)
else:
self.tie_projs = [False] + [False] * len(self.cutoffs)
self.d_model = d_model
self.d_embed = d_embed
self.d_head = d_head
self.d_inner = d_inner
self.div_val = div_val
self.pre_lnorm = pre_lnorm
self.n_layer = n_layer
self.n_head = n_head
self.tgt_len = tgt_len
self.ext_len = ext_len
self.mem_len = mem_len
self.same_length = same_length
self.attn_type = attn_type
self.clamp_len = clamp_len
self.sample_softmax = sample_softmax
self.adaptive = adaptive
self.dropout = dropout
self.dropatt = dropatt
self.untie_r = untie_r
self.init = init
self.init_range = init_range
self.proj_init_std = proj_init_std
self.init_std = init_std
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `TransfoXLConfig` from a Python dictionary of parameters."""
config = TransfoXLConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `TransfoXLConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:,None,:].expand(-1, bsz, -1)
else:
return pos_emb[:,None,:]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner), nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
##### layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
##### residual connection
output = core_out + inp
else:
##### positionwise feed-forward
core_out = self.CoreNet(inp)
##### residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False, r_r_bias=None, r_w_bias=None):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
def forward(self, h, attn_mask=None, mems=None):
##### multihead attention
# [hlen x bsz x n_head x d_head]
if mems is not None:
c = torch.cat([mems, h], 0)
else:
c = h
if self.pre_lnorm:
##### layer normalization
c = self.layer_norm(c)
head_q = self.q_net(h)
head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
# [qlen x klen x bsz x n_head]
attn_score = torch.einsum('ibnd,jbnd->ijbn', (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# [qlen x klen x bsz x n_head] + [klen x bsz x n_head x d_head] -> [qlen x bsz x n_head x d_head]
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = h + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(h + attn_out)
return output
class RelMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False,
r_r_bias=None, r_w_bias=None):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
def _parallelogram_mask(self, h, w, left=False):
mask = torch.ones((h, w)).byte()
m = min(h, w)
mask[:m,:m] = torch.triu(mask[:m,:m])
mask[-m:,-m:] = torch.tril(mask[-m:,-m:])
if left:
return mask
else:
return mask.flip(0)
def _shift(self, x, qlen, klen, mask, left=False):
if qlen > 1:
zero_pad = torch.zeros((x.size(0), qlen-1, x.size(2), x.size(3)),
device=x.device, dtype=x.dtype)
else:
zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)
if left:
mask = mask.flip(1)
x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)
else:
x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)
x = x_padded.masked_select(mask[:,:,None,None]) \
.view(qlen, klen, x.size(2), x.size(3))
return x
def _rel_shift(self, x, zero_triu=False):
zero_pad_shape = (x.size(0), 1) + x.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
x = x_padded[1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:,:,None,None]
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def forward(self, w, r, attn_mask=None, mems=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
#### compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score = attn_score.float().masked_fill(
attn_mask[None,:,:,None], -1e30).type_as(attn_score)
elif attn_mask.dim() == 3:
attn_score = attn_score.float().masked_fill(
attn_mask[:,:,:,None], -1e30).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class RelLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
# r_emb: [klen, n_head, d_head], used for term B
# r_w_bias: [n_head, d_head], used for term C
# r_bias: [klen, n_head], used for term D
qlen, bsz = w.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen-r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen-r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
#### compute attention score
rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb)) # qlen x klen x bsz x n_head
D_ = r_bias[None, :, None] # 1 x klen x 1 x n_head
BD = self._rel_shift(B_ + D_)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class DecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(DecoderLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelLearnableDecoderLayer, self).__init__()
self.dec_attn = RelLearnableMultiHeadAttn(n_head, d_model, d_head, dropout,
**kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r_emb, r_w_bias, r_bias,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model,
d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
sample_softmax=False):
super(AdaptiveEmbedding, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(
nn.Embedding(n_token, d_embed, sparse=sample_softmax>0)
)
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx-l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros([inp_flat.size(0), self.d_proj],
dtype=param.dtype, device=param.device)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
return embed
class TransfoXLPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(TransfoXLPreTrainedModel, self).__init__()
if not isinstance(config, TransfoXLConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `TransfoXLConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weight(self, weight):
if self.config.init == 'uniform':
nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
elif self.config.init == 'normal':
nn.init.normal_(weight, 0.0, self.config.init_std)
def init_bias(self, bias):
nn.init.constant_(bias, 0.0)
def init_weights(self, m):
""" Initialize the weights.
"""
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None:
self.init_weight(m.weight)
if hasattr(m, 'bias') and m.bias is not None:
self.init_bias(m.bias)
elif classname.find('AdaptiveEmbedding') != -1:
if hasattr(m, 'emb_projs'):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('Embedding') != -1:
if hasattr(m, 'weight'):
self.init_weight(m.weight)
elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:
if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:
self.init_weight(m.cluster_weight)
if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:
self.init_bias(m.cluster_bias)
if hasattr(m, 'out_projs'):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
elif classname.find('LayerNorm') != -1:
if hasattr(m, 'weight'):
nn.init.normal_(m.weight, 1.0, self.config.init_std)
if hasattr(m, 'bias') and m.bias is not None:
self.init_bias(m.bias)
elif classname.find('TransformerLM') != -1:
if hasattr(m, 'r_emb'):
self.init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
self.init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
self.init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
self.init_bias(m.r_bias)
def set_num_special_tokens(self, num_special_tokens):
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate a TransfoXLPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `transfo-xl-wt103`
- a path or url to a pretrained model archive containing:
. `transfo_xl_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a TransfoXLModel instance
- a path or url to a pretrained model archive containing:
. `transfo_xl_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
*inputs, **kwargs: additional input for the specific TransformerXL class
"""
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
cache_dir = kwargs.get('cache_dir', None)
kwargs.pop('cache_dir', None)
from_tf = kwargs.get('from_tf', False)
kwargs.pop('from_tf', None)
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} and {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
archive_file, config_file))
return None
if resolved_archive_file == archive_file and resolved_config_file == config_file:
logger.info("loading weights file {}".format(archive_file))
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = TransfoXLConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint
return load_tf_weights_in_transfo_xl(model, config, pretrained_model_name_or_path)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'transformer') and any(s.startswith('transformer.') for s in state_dict.keys()):
start_prefix = 'transformer.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
# Make sure we are still sharing the input and output embeddings
if hasattr(model, 'tie_weights'):
model.tie_weights()
return model
class TransfoXLModel(TransfoXLPreTrainedModel):
"""Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context").
Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that:
- you don't need to specify positioning embeddings indices
- the tokens in the vocabulary have to be sorted to decreasing frequency.
Params:
config: a TransfoXLConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the token indices selected in the range [0, self.config.n_token[
`mems`: optional memomry of hidden states from previous forward passes
as a list (num layers) of hidden states at the entry of each layer
each hidden states has shape [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Outputs:
A tuple of (last_hidden_state, new_mems)
`last_hidden_state`: the encoded-hidden-states at the top of the model
as a torch.FloatTensor of size [batch_size, sequence_length, self.config.d_model]
`new_mems`: list (num layers) of updated mem states at the entry of each layer
each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]])
config = TransfoXLConfig()
model = TransfoXLModel(config)
last_hidden_state, new_mems = model(input_ids)
# Another time on input_ids_next using the memory:
last_hidden_state, new_mems = model(input_ids_next, new_mems)
```
"""
def __init__(self, config):
super(TransfoXLModel, self).__init__(config)
self.n_token = config.n_token
self.d_embed = config.d_embed
self.d_model = config.d_model
self.n_head = config.n_head
self.d_head = config.d_head
self.word_emb = AdaptiveEmbedding(config.n_token, config.d_embed, config.d_model, config.cutoffs,
div_val=config.div_val)
self.drop = nn.Dropout(config.dropout)
self.n_layer = config.n_layer
self.tgt_len = config.tgt_len
self.mem_len = config.mem_len
self.ext_len = config.ext_len
self.max_klen = config.tgt_len + config.ext_len + config.mem_len
self.attn_type = config.attn_type
if not config.untie_r:
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.layers = nn.ModuleList()
if config.attn_type == 0: # the default attention
for i in range(config.n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
elif config.attn_type == 1: # learnable embeddings
for i in range(config.n_layer):
self.layers.append(
RelLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
elif config.attn_type in [2, 3]: # absolute embeddings
for i in range(config.n_layer):
self.layers.append(
DecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias)
)
self.same_length = config.same_length
self.clamp_len = config.clamp_len
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 1: # learnable
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.r_bias = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head))
elif self.attn_type == 2: # absolute standard
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 3: # absolute deeper SA
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.apply(self.init_weights)
def backward_compatible(self):
self.sample_softmax = -1
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def init_mems(self, data):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer):
empty = torch.zeros(self.mem_len, data.size(1), self.config.d_model,
dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, qlen, mlen):
# does not deal with None
if mems is None: return None
# mems is not None
assert len(hids) == len(mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
def _forward(self, dec_inp, mems=None):
qlen, bsz = dec_inp.size()
word_emb = self.word_emb(dec_inp)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones(qlen, klen)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1+mlen)
+ torch.tril(all_ones, -mask_shift_len)).byte()[:, :, None] # -1
else:
dec_attn_mask = torch.triu(
word_emb.new_ones(qlen, klen), diagonal=1+mlen).byte()[:,:,None]
hids = []
if self.attn_type == 0: # default
pos_seq = torch.arange(klen-1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, pos_emb, dec_attn_mask=dec_attn_mask, mems=mems_i)
elif self.attn_type == 1: # learnable
core_out = self.drop(word_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
if self.clamp_len > 0:
r_emb = self.r_emb[i][-self.clamp_len :]
r_bias = self.r_bias[i][-self.clamp_len :]
else:
r_emb, r_bias = self.r_emb[i], self.r_bias[i]
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, r_emb, self.r_w_bias[i],
r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i)
elif self.attn_type == 2: # absolute
pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb + pos_emb[-qlen:])
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
if mems_i is not None and i == 0:
mems_i += pos_emb[:mlen]
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
elif self.attn_type == 3:
core_out = self.drop(word_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
if mems_i is not None and mlen > 0:
cur_emb = self.r_emb[i][:-qlen]
cur_size = cur_emb.size(0)
if cur_size < mlen:
cur_emb_pad = cur_emb[0:1].expand(mlen-cur_size, -1, -1)
cur_emb = torch.cat([cur_emb_pad, cur_emb], 0)
else:
cur_emb = cur_emb[-mlen:]
mems_i += cur_emb.view(mlen, 1, -1)
core_out += self.r_emb[i][-qlen:].view(qlen, 1, -1)
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
return core_out, new_mems
def forward(self, input_ids, mems=None):
""" Params:
input_ids :: [bsz, len]
mems :: optional mems from previous forwar passes (or init_mems)
list (num layers) of mem states at the entry of each layer
shape :: [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Returns:
tuple (last_hidden, new_mems) where:
new_mems: list (num layers) of mem states at the entry of each layer
shape :: [self.config.mem_len, bsz, self.config.d_model]
last_hidden: output of the last layer:
shape :: [bsz, len, self.config.d_model]
"""
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
# so we transpose here from shape [bsz, len] to shape [len, bsz]
input_ids = input_ids.transpose(0, 1).contiguous()
if mems is None:
mems = self.init_mems(input_ids)
last_hidden, new_mems = self._forward(input_ids, mems=mems)
# We transpose back here to shape [bsz, len, hidden_dim]
last_hidden = last_hidden.transpose(0, 1).contiguous()
return (last_hidden, new_mems)
class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
"""Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context").
This model add an (adaptive) softmax head on top of the TransfoXLModel
Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that:
- you don't need to specify positioning embeddings indices
- the tokens in the vocabulary have to be sorted to decreasing frequency.
Call self.tie_weights() if you update/load the weights of the transformer to keep the weights tied.
Params:
config: a TransfoXLConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the token indices selected in the range [0, self.config.n_token[
`target`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with the target token indices selected in the range [0, self.config.n_token[
`mems`: an optional memory of hidden states from previous forward passes
as a list (num layers) of hidden states at the entry of each layer
each hidden states has shape [self.config.mem_len, bsz, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Outputs:
A tuple of (last_hidden_state, new_mems)
`softmax_output`: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape [batch_size, sequence_length]
else:
log probabilities of tokens, shape [batch_size, sequence_length, n_tokens]
`new_mems`: list (num layers) of updated mem states at the entry of each layer
each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model]
Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target`
Example usage:
```python
# Already been converted into BPE token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]])
config = TransfoXLConfig()
model = TransfoXLModel(config)
last_hidden_state, new_mems = model(input_ids)
# Another time on input_ids_next using the memory:
last_hidden_state, new_mems = model(input_ids_next, mems=new_mems)
```
"""
def __init__(self, config):
super(TransfoXLLMHeadModel, self).__init__(config)
self.transformer = TransfoXLModel(config)
self.sample_softmax = config.sample_softmax
# use sampled softmax
if config.sample_softmax > 0:
self.out_layer = nn.Linear(config.d_model, config.n_token)
self.sampler = LogUniformSampler(config.n_token, config.sample_softmax)
# use adaptive softmax (including standard softmax)
else:
self.crit = ProjectedAdaptiveLogSoftmax(config.n_token, config.d_embed, config.d_model,
config.cutoffs, div_val=config.div_val)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Run this to be sure output and input (adaptive) softmax weights are tied """
# sampled softmax
if self.sample_softmax > 0:
if self.config.tie_weight:
self.out_layer.weight = self.transformer.word_emb.weight
# adaptive softmax (including standard softmax)
else:
if self.config.tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.transformer.word_emb.emb_layers[i].weight
if self.config.tie_projs:
for i, tie_proj in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
elif tie_proj and self.config.div_val != 1:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
def reset_length(self, tgt_len, ext_len, mem_len):
self.transformer.reset_length(tgt_len, ext_len, mem_len)
def init_mems(self, data):
return self.transformer.init_mems(data)
def forward(self, input_ids, target=None, mems=None):
""" Params:
input_ids :: [bsz, len]
target :: [bsz, len]
Returns:
tuple(softmax_output, new_mems) where:
new_mems: list (num layers) of hidden states at the entry of each layer
shape :: [mem_len, bsz, self.config.d_model] :: Warning: shapes are transposed here w. regards to input_ids
softmax_output: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape :: [bsz, len]
else:
log probabilities of tokens, shape :: [bsz, len, n_tokens]
"""
bsz = input_ids.size(0)
tgt_len = input_ids.size(1)
last_hidden, new_mems = self.transformer(input_ids, mems)
pred_hid = last_hidden[:, -tgt_len:]
if self.sample_softmax > 0 and self.training:
assert self.config.tie_weight
logit = sample_logits(self.transformer.word_emb, self.out_layer.bias, target, pred_hid, self.sampler)
softmax_output = -F.log_softmax(logit, -1)[:, :, 0]
else:
softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target)
if target is None:
softmax_output = softmax_output.view(bsz, tgt_len, -1)
else:
softmax_output = softmax_output.view(bsz, tgt_len)
# We transpose back
return (softmax_output, new_mems)
| 59,065 | 41.40201 | 131 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/tokenization_transfo_xl.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for Transformer XL model.
Adapted from https://github.com/kimiyoung/transformer-xl.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import glob
import logging
import os
import sys
from collections import Counter, OrderedDict
from io import open
import unicodedata
import torch
import numpy as np
from .file_utils import cached_path
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-vocab.bin",
}
VOCAB_NAME = 'vocab.bin'
PRETRAINED_CORPUS_ARCHIVE_MAP = {
'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-corpus.bin",
}
CORPUS_NAME = 'corpus.bin'
class TransfoXLTokenizer(object):
"""
Transformer-XL tokenizer adapted from Vocab class in https://github.com/kimiyoung/transformer-xl
"""
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a TransfoXLTokenizer.
The TransfoXLTokenizer.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
if os.path.isdir(pretrained_model_name_or_path):
vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
else:
vocab_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
# Instantiate tokenizer.
tokenizer = cls(*inputs, **kwargs)
vocab_dict = torch.load(resolved_vocab_file)
for key, value in vocab_dict.items():
tokenizer.__dict__[key] = value
return tokenizer
def __init__(self, special=[], min_freq=0, max_size=None, lower_case=False,
delimiter=None, vocab_file=None, never_split=("<unk>", "<eos>", "<formula>")):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
self.never_split = never_split
def count_file(self, path, verbose=False, add_eos=False):
if verbose: print('counting file {} ...'.format(path))
assert os.path.exists(path)
sents = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose: print('counting {} sents ...'.format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
if '<UNK>' in self.sym2idx:
self.unk_idx = self.sym2idx['<UNK>']
elif '<unk>' in self.sym2idx:
self.unk_idx = self.sym2idx['<unk>']
else:
raise ValueError('No <unkown> token in vocabulary')
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
torch.save(self.__dict__, vocab_file)
return vocab_file
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq: break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
add_double_eos=False):
if verbose: print('encoding file {} ...'.format(path))
assert os.path.exists(path)
encoded = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos,
add_double_eos=add_double_eos)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose: print('encoding {} sents ...'.format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), 'Index {} out of vocabulary range'.format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# print('encounter unk {}'.format(sym))
# assert '<eos>' not in sym
if hasattr(self, 'unk_idx'):
return self.sym2idx.get(sym, self.unk_idx)
# Backward compatibility with pre-trained models
elif '<unk>' in self.sym2idx:
return self.sym2idx['<unk>']
elif '<UNK>' in self.sym2idx:
return self.sym2idx['<UNK>']
else:
raise ValueError('Token not in vocabulary and no <unk> token in vocabulary for replacement')
def convert_ids_to_tokens(self, indices):
"""Converts a sequence of indices in symbols using the vocab."""
return [self.get_sym(idx) for idx in indices]
def convert_tokens_to_ids(self, symbols):
"""Converts a sequence of symbols into ids using the vocab."""
return [self.get_idx(sym) for sym in symbols]
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.convert_tokens_to_ids(symbols))
def decode(self, indices, exclude=None):
"""Converts a sequence of indices in a string."""
if exclude is None:
return ' '.join([self.get_sym(idx) for idx in indices])
else:
return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude])
def __len__(self):
return len(self.idx2sym)
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == '':
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ['<S>'] + symbols + ['<S>']
elif add_eos:
return symbols + ['<eos>']
else:
return symbols
class LMOrderedIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', ext_len=None):
"""
data -- LongTensor -- the LongTensor is strictly ordered
"""
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
# Work out how cleanly we can divide the dataset into bsz parts.
self.n_step = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, self.n_step * bsz)
# Evenly divide the data across the bsz batches.
self.data = data.view(bsz, -1).t().contiguous().to(device)
# Number of mini-batches
self.n_batch = (self.n_step + self.bptt - 1) // self.bptt
def get_batch(self, i, bptt=None):
if bptt is None: bptt = self.bptt
seq_len = min(bptt, self.data.size(0) - 1 - i)
end_idx = i + seq_len
beg_idx = max(0, i - self.ext_len)
data = self.data[beg_idx:end_idx]
target = self.data[i+1:i+1+seq_len]
data_out = data.transpose(0, 1).contiguous().to(self.device)
target_out = target.transpose(0, 1).contiguous().to(self.device)
return data_out, target_out, seq_len
def get_fixlen_iter(self, start=0):
for i in range(start, self.data.size(0) - 1, self.bptt):
yield self.get_batch(i)
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
max_len = self.bptt + max_deviation * std
i = start
while True:
bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.
bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
data, target, seq_len = self.get_batch(i, bptt)
i += seq_len
yield data, target, seq_len
if i >= self.data.size(0) - 2:
break
def __iter__(self):
return self.get_fixlen_iter()
class LMShuffledIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', ext_len=None, shuffle=False):
"""
data -- list[LongTensor] -- there is no order among the LongTensors
"""
self.data = data
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self):
# index iterator
epoch_indices = np.random.permutation(len(self.data)) if self.shuffle \
else np.array(range(len(self.data)))
# sentence iterator
for idx in epoch_indices:
yield self.data[idx]
def stream_iterator(self, sent_stream):
# streams for each data in the batch
streams = [None] * self.bsz
data = torch.LongTensor(self.bptt, self.bsz)
target = torch.LongTensor(self.bptt, self.bsz)
n_retain = 0
while True:
# data : [n_retain+bptt x bsz]
# target : [bptt x bsz]
data[n_retain:].fill_(-1)
target.fill_(-1)
valid_batch = True
for i in range(self.bsz):
n_filled = 0
try:
while n_filled < self.bptt:
if streams[i] is None or len(streams[i]) <= 1:
streams[i] = next(sent_stream)
# number of new tokens to fill in
n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
# first n_retain tokens are retained from last batch
data[n_retain+n_filled:n_retain+n_filled+n_new, i] = \
streams[i][:n_new]
target[n_filled:n_filled+n_new, i] = \
streams[i][1:n_new+1]
streams[i] = streams[i][n_new:]
n_filled += n_new
except StopIteration:
valid_batch = False
break
if not valid_batch:
return
data_out = data.transpose(0, 1).contiguous().to(self.device)
target_out = target.transpose(0, 1).contiguous().to(self.device)
yield data_out, target_out, self.bptt
n_retain = min(data.size(0), self.ext_len)
if n_retain > 0:
data[:n_retain] = data[-n_retain:]
data.resize_(n_retain + self.bptt, data.size(1))
def __iter__(self):
# sent_stream is an iterator
sent_stream = self.get_sent_stream()
for batch in self.stream_iterator(sent_stream):
yield batch
class LMMultiFileIterator(LMShuffledIterator):
def __init__(self, paths, vocab, bsz, bptt, device='cpu', ext_len=None,
shuffle=False):
self.paths = paths
self.vocab = vocab
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self, path):
sents = self.vocab.encode_file(path, add_double_eos=True)
if self.shuffle:
np.random.shuffle(sents)
sent_stream = iter(sents)
return sent_stream
def __iter__(self):
if self.shuffle:
np.random.shuffle(self.paths)
for path in self.paths:
# sent_stream is an iterator
sent_stream = self.get_sent_stream(path)
for batch in self.stream_iterator(sent_stream):
yield batch
class TransfoXLCorpus(object):
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a pre-processed corpus.
"""
vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
if pretrained_model_name_or_path in PRETRAINED_CORPUS_ARCHIVE_MAP:
corpus_file = PRETRAINED_CORPUS_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
corpus_file = os.path.join(pretrained_model_name_or_path, CORPUS_NAME)
# redirect to the cache, if necessary
try:
resolved_corpus_file = cached_path(corpus_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Corpus '{}' was not found in corpus list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
corpus_file))
return None
if resolved_corpus_file == corpus_file:
logger.info("loading corpus file {}".format(corpus_file))
else:
logger.info("loading corpus file {} from cache at {}".format(
corpus_file, resolved_corpus_file))
# Instantiate tokenizer.
corpus = cls(*inputs, **kwargs)
corpus_dict = torch.load(resolved_corpus_file)
for key, value in corpus_dict.items():
corpus.__dict__[key] = value
corpus.vocab = vocab
if corpus.train is not None:
corpus.train = torch.tensor(corpus.train, dtype=torch.long)
if corpus.valid is not None:
corpus.valid = torch.tensor(corpus.valid, dtype=torch.long)
if corpus.test is not None:
corpus.test = torch.tensor(corpus.test, dtype=torch.long)
return corpus
def __init__(self, *args, **kwargs):
self.vocab = TransfoXLTokenizer(*args, **kwargs)
self.dataset = None
self.train = None
self.valid = None
self.test = None
def build_corpus(self, path, dataset):
self.dataset = dataset
if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']:
self.vocab.count_file(os.path.join(path, 'train.txt'))
self.vocab.count_file(os.path.join(path, 'valid.txt'))
self.vocab.count_file(os.path.join(path, 'test.txt'))
elif self.dataset == 'wt103':
self.vocab.count_file(os.path.join(path, 'train.txt'))
elif self.dataset == 'lm1b':
train_path_pattern = os.path.join(
path, '1-billion-word-language-modeling-benchmark-r13output',
'training-monolingual.tokenized.shuffled', 'news.en-*')
train_paths = glob.glob(train_path_pattern)
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
if self.dataset in ['ptb', 'wt2', 'wt103']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True)
elif self.dataset in ['enwik8', 'text8']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True, add_eos=False)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True, add_eos=False)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True, add_eos=False)
elif self.dataset == 'lm1b':
self.train = train_paths
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True)
def get_iterator(self, split, *args, **kwargs):
if split == 'train':
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(self.train, *args, **kwargs)
elif self.dataset == 'lm1b':
kwargs['shuffle'] = True
data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
elif split in ['valid', 'test']:
data = self.valid if split == 'valid' else self.test
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(data, *args, **kwargs)
elif self.dataset == 'lm1b':
data_iter = LMShuffledIterator(data, *args, **kwargs)
return data_iter
def get_lm_corpus(datadir, dataset):
fn = os.path.join(datadir, 'cache.pt')
fn_pickle = os.path.join(datadir, 'cache.pkl')
if os.path.exists(fn):
print('Loading cached dataset...')
corpus = torch.load(fn_pickle)
elif os.path.exists(fn):
print('Loading cached dataset from pickle...')
with open(fn, "rb") as fp:
corpus = pickle.load(fp)
else:
print('Producing dataset {}...'.format(dataset))
kwargs = {}
if dataset in ['wt103', 'wt2']:
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = False
elif dataset == 'ptb':
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = True
elif dataset == 'lm1b':
kwargs['special'] = []
kwargs['lower_case'] = False
kwargs['vocab_file'] = os.path.join(datadir, '1b_word_vocab.txt')
elif dataset in ['enwik8', 'text8']:
pass
corpus = TransfoXLCorpus(datadir, dataset, **kwargs)
torch.save(corpus, fn)
return corpus
| 22,060 | 36.582624 | 110 | py |
EmpTransfo | EmpTransfo-master/pytorch_pretrained_bert/modeling_transfo_xl_utilities.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Utilities for PyTorch Transformer XL model.
Directly adapted from https://github.com/kimiyoung/transformer-xl.
"""
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
# CUDA_MINOR = int(torch.version.cuda.split('.')[1])
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
keep_order=False):
super(ProjectedAdaptiveLogSoftmax, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.out_layers = nn.ModuleList()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_embed))
)
else:
self.out_projs.append(None)
self.out_layers.append(nn.Linear(d_embed, n_token))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_emb_i))
)
self.out_layers.append(nn.Linear(d_emb_i, r_idx-l_idx))
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
proj_hid = F.linear(hidden, proj.t().contiguous())
logit = F.linear(proj_hid, weight, bias=bias)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def forward(self, hidden, target=None, keep_order=False):
'''
Params:
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
Return:
if target is None:
out :: [len*bsz] Negative log likelihood
else:
out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary
We could replace this implementation by the native PyTorch one
if their's had an option to set bias on all clusters in the native one.
here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138
'''
if target is not None:
target = target.view(-1)
if hidden.size(0) != target.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight,
self.out_layers[0].bias, self.out_projs[0])
if target is not None:
out = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
out = F.log_softmax(logit, dim=-1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
if target is None:
out = hidden.new_empty((head_logit.size(0), self.n_token))
else:
out = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
if target is not None:
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
hidden_i = hidden.index_select(0, indices_i)
else:
hidden_i = hidden
if i == 0:
if target is not None:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
if target is not None:
logprob_i = head_logprob_i[:, cluster_prob_idx] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
out[:, l_idx:r_idx] = logprob_i
if target is not None:
if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:
out.index_copy_(0, indices_i, -logprob_i)
else:
out[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def log_prob(self, hidden):
r""" Computes log probabilities for all :math:`n\_classes`
From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py
Args:
hidden (Tensor): a minibatch of examples
Returns:
log-probabilities of for each class :math:`c`
in range :math:`0 <= c <= n\_classes`, where :math:`n\_classes` is a
parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.
Shape:
- Input: :math:`(N, in\_features)`
- Output: :math:`(N, n\_classes)`
"""
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight,
self.out_layers[0].bias, self.out_projs[0])
return F.log_softmax(logit, dim=-1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
out = hidden.new_empty((head_logit.size(0), self.n_token))
head_logprob = F.log_softmax(head_logit, dim=1)
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob[:, -i] + tail_logprob_i
out[:, start_idx, stop_idx] = logprob_i
return out
class LogUniformSampler(object):
def __init__(self, range_max, n_sample):
"""
Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
`P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
expected count can be approximated by 1 - (1 - p)^n
and we use a numerically stable version -expm1(num_tries * log1p(-p))
Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run
"""
with torch.no_grad():
self.range_max = range_max
log_indices = torch.arange(1., range_max+2., 1.).log_()
self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# print('P', self.dist.numpy().tolist()[-30:])
self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float()
self.n_sample = n_sample
def sample(self, labels):
"""
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
"""
# neg_samples = torch.empty(0).long()
n_sample = self.n_sample
n_tries = 2 * n_sample
with torch.no_grad():
neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()
device = labels.device
neg_samples = neg_samples.to(device)
true_log_probs = self.log_q[labels].to(device)
samp_log_probs = self.log_q[neg_samples].to(device)
return true_log_probs, samp_log_probs, neg_samples
def sample_logits(embedding, bias, labels, inputs, sampler):
"""
embedding: an nn.Embedding layer
bias: [n_vocab]
labels: [b1, b2]
inputs: [b1, b2, n_emb]
sampler: you may use a LogUniformSampler
Return
logits: [b1, b2, 1 + n_sample]
"""
true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
n_sample = neg_samples.size(0)
b1, b2 = labels.size(0), labels.size(1)
all_ids = torch.cat([labels.view(-1), neg_samples])
all_w = embedding(all_ids)
true_w = all_w[: -n_sample].view(b1, b2, -1)
sample_w = all_w[- n_sample:].view(n_sample, -1)
all_b = bias[all_ids]
true_b = all_b[: -n_sample].view(b1, b2)
sample_b = all_b[- n_sample:]
hit = (labels[:, :, None] == neg_samples).detach()
true_logits = torch.einsum('ijk,ijk->ij',
[true_w, inputs]) + true_b - true_log_probs
sample_logits = torch.einsum('lk,ijk->ijl',
[sample_w, inputs]) + sample_b - samp_log_probs
sample_logits.masked_fill_(hit, -1e30)
logits = torch.cat([true_logits[:, :, None], sample_logits], -1)
return logits
# class LogUniformSampler(object):
# def __init__(self, range_max, unique=False):
# """
# Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
# `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
# """
# self.range_max = range_max
# log_indices = torch.arange(1., range_max+2., 1.).log_()
# self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# self.unique = unique
# if self.unique:
# self.exclude_mask = torch.ByteTensor(range_max).fill_(0)
# def sample(self, n_sample, labels):
# pos_sample, new_labels = labels.unique(return_inverse=True)
# n_pos_sample = pos_sample.size(0)
# n_neg_sample = n_sample - n_pos_sample
# if self.unique:
# self.exclude_mask.index_fill_(0, pos_sample, 1)
# sample_dist = self.dist.clone().masked_fill_(self.exclude_mask, 0)
# self.exclude_mask.index_fill_(0, pos_sample, 0)
# else:
# sample_dist = self.dist
# neg_sample = torch.multinomial(sample_dist, n_neg_sample)
# sample = torch.cat([pos_sample, neg_sample])
# sample_prob = self.dist[sample]
# return new_labels, sample, sample_prob
if __name__ == '__main__':
S, B = 3, 4
n_vocab = 10000
n_sample = 5
H = 32
labels = torch.LongTensor(S, B).random_(0, n_vocab)
# sampler = LogUniformSampler(n_vocab, unique=False)
# new_labels, sample, sample_prob = sampler.sample(n_sample, labels)
sampler = LogUniformSampler(n_vocab, n_sample)#, unique=True)
# true_probs, samp_probs, neg_samples = sampler.sample(n_sample, labels)
# print('true_probs', true_probs.numpy().tolist())
# print('samp_probs', samp_probs.numpy().tolist())
# print('neg_samples', neg_samples.numpy().tolist())
# print('sum', torch.sum(sampler.dist).item())
# assert torch.all(torch.sort(sample.unique())[0].eq(torch.sort(sample)[0])).item()
embedding = nn.Embedding(n_vocab, H)
bias = torch.zeros(n_vocab)
inputs = torch.Tensor(S, B, H).normal_()
logits, out_labels = sample_logits(embedding, bias, labels, inputs, sampler, n_sample)
print('logits', logits.detach().numpy().tolist())
print('logits shape', logits.size())
print('out_labels', out_labels.detach().numpy().tolist())
print('out_labels shape', out_labels.size())
| 16,108 | 38.972705 | 132 | py |
potapov_interpolation | potapov_interpolation-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Potapov_interpolation documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 25 15:36:39 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# from recommonmark.parser import CommonMarkParser
#
# source_parsers = {
# '.md': CommonMarkParser,
# }
source_suffix = ['.rst', '.md']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../Potapov_Code'))
import mock
MOCK_MODULES = ['numpy', 'scipy', 'matplotlib', 'matplotlib.pyplot',
'numpy.linalg','scipy.constants','scipy.integrate',
'scipy.optimize',
'numpy.testing',
'matplotlib.patches',
'qnet.algebra.circuit_algebra',
'sympy.utilities.autowrap'
#'mpmath','mpmath.libmp','mpmath.libmp.backend',
#'mpmath.libmp.libmpc','mpmath.libmp.libmpf',
#'mpmath.libmp.gammazeta',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
sys.modules.update((mod_name, mock.Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinxcontrib.napoleon',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Potapov_interpolation'
copyright = u'2016, Gil Tabak'
author = u'Gil Tabak'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Potapov_interpolationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Potapov_interpolation.tex', u'Potapov\\_interpolation Documentation',
u'Gil Tabak', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'potapov_interpolation', u'Potapov_interpolation Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Potapov_interpolation', u'Potapov_interpolation Documentation',
author, 'Potapov_interpolation', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 10,438 | 30.923547 | 85 | py |
gnn_cff | gnn_cff-main/graphconv.py | """Torch modules for graph convolutions(GCN)."""
# pylint: disable= no-member, arguments-differ, invalid-name
import torch as th
from torch import nn
from torch.nn import init
from .... import function as fn
from ....base import DGLError
from ....utils import expand_as_pair
from ....transform import reverse
from ....convert import block_to_graph
from ....heterograph import DGLBlock
class EdgeWeightNorm(nn.Module):
r"""
Description
-----------
This module normalizes positive scalar edge weights on a graph
following the form in `GCN <https://arxiv.org/abs/1609.02907>`__.
Mathematically, setting ``norm='both'`` yields the following normalization term:
.. math:
c_{ji} = (\sqrt{\sum_{k\in\mathcal{N}(j)}e_{jk}}\sqrt{\sum_{k\in\mathcal{N}(i)}e_{ki}})
And, setting ``norm='right'`` yields the following normalization term:
.. math:
c_{ji} = (\sum_{k\in\mathcal{N}(i)}}e_{ki})
where :math:`e_{ji}` is the scalar weight on the edge from node :math:`j` to node :math:`i`.
The module returns the normalized weight :math:`e_{ji} / c_{ji}`.
Parameters
----------
norm : str, optional
The normalizer as specified above. Default is `'both'`.
eps : float, optional
A small offset value in the denominator. Default is 0.
Examples
--------
>>> import dgl
>>> import numpy as np
>>> import torch as th
>>> from dgl.nn import EdgeWeightNorm, GraphConv
>>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
>>> g = dgl.add_self_loop(g)
>>> feat = th.ones(6, 10)
>>> edge_weight = th.tensor([0.5, 0.6, 0.4, 0.7, 0.9, 0.1, 1, 1, 1, 1, 1, 1])
>>> norm = EdgeWeightNorm(norm='both')
>>> norm_edge_weight = norm(g, edge_weight)
>>> conv = GraphConv(10, 2, norm='none', weight=True, bias=True)
>>> res = conv(g, feat, edge_weight=norm_edge_weight)
>>> print(res)
tensor([[-1.1849, -0.7525],
[-1.3514, -0.8582],
[-1.2384, -0.7865],
[-1.9949, -1.2669],
[-1.3658, -0.8674],
[-0.8323, -0.5286]], grad_fn=<AddBackward0>)
"""
def __init__(self, norm='both', eps=0.):
super(EdgeWeightNorm, self).__init__()
self._norm = norm
self._eps = eps
def forward(self, graph, edge_weight):
r"""
Description
-----------
Compute normalized edge weight for the GCN model.
Parameters
----------
graph : DGLGraph
The graph.
edge_weight : torch.Tensor
Unnormalized scalar weights on the edges.
The shape is expected to be :math:`(|E|)`.
Returns
-------
torch.Tensor
The normalized edge weight.
Raises
------
DGLError
Case 1:
The edge weight is multi-dimensional. Currently this module
only supports a scalar weight on each edge.
Case 2:
The edge weight has non-positive values with ``norm='both'``.
This will trigger square root and division by a non-positive number.
"""
with graph.local_scope():
if isinstance(graph, DGLBlock):
graph = block_to_graph(graph)
if len(edge_weight.shape) > 1:
raise DGLError('Currently the normalization is only defined '
'on scalar edge weight. Please customize the '
'normalization for your high-dimensional weights.')
if self._norm == 'both' and th.any(edge_weight <= 0).item():
raise DGLError('Non-positive edge weight detected with `norm="both"`. '
'This leads to square root of zero or negative values.')
dev = graph.device
graph.srcdata['_src_out_w'] = th.ones((graph.number_of_src_nodes())).float().to(dev)
graph.dstdata['_dst_in_w'] = th.ones((graph.number_of_dst_nodes())).float().to(dev)
graph.edata['_edge_w'] = edge_weight
if self._norm == 'both':
reversed_g = reverse(graph)
reversed_g.edata['_edge_w'] = edge_weight
reversed_g.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'out_weight'))
degs = reversed_g.dstdata['out_weight'] + self._eps
norm = th.pow(degs, -0.5)
graph.srcdata['_src_out_w'] = norm
if self._norm != 'none':
graph.update_all(fn.copy_edge('_edge_w', 'm'), fn.sum('m', 'in_weight'))
degs = graph.dstdata['in_weight'] + self._eps
if self._norm == 'both':
norm = th.pow(degs, -0.5)
else:
norm = 1.0 / degs
graph.dstdata['_dst_in_w'] = norm
graph.apply_edges(lambda e: {'_norm_edge_weights': e.src['_src_out_w'] * \
e.dst['_dst_in_w'] * \
e.data['_edge_w']})
return graph.edata['_norm_edge_weights']
# # pylint: disable=W0235
# class GraphConv(nn.Module):
# r"""
#
# Description
# -----------
# Graph convolution was introduced in `GCN <https://arxiv.org/abs/1609.02907>`__
# and mathematically is defined as follows:
#
# .. math::
# h_i^{(l+1)} = \sigma(b^{(l)} + \sum_{j\in\mathcal{N}(i)}\frac{1}{c_{ji}}h_j^{(l)}W^{(l)})
#
# where :math:`\mathcal{N}(i)` is the set of neighbors of node :math:`i`,
# :math:`c_{ji}` is the product of the square root of node degrees
# (i.e., :math:`c_{ji} = \sqrt{|\mathcal{N}(j)|}\sqrt{|\mathcal{N}(i)|}`),
# and :math:`\sigma` is an activation function.
#
# If a weight tensor on each edge is provided, the weighted graph convolution is defined as:
#
# .. math::
# h_i^{(l+1)} = \sigma(b^{(l)} + \sum_{j\in\mathcal{N}(i)}\frac{e_{ji}}{c_{ji}}h_j^{(l)}W^{(l)})
#
# where :math:`e_{ji}` is the scalar weight on the edge from node :math:`j` to node :math:`i`.
# This is NOT equivalent to the weighted graph convolutional network formulation in the paper.
#
# To customize the normalization term :math:`c_{ji}`, one can first set ``norm='none'`` for
# the model, and send the pre-normalized :math:`e_{ji}` to the forward computation. We provide
# :class:`~dgl.nn.pytorch.EdgeWeightNorm` to normalize scalar edge weight following the GCN paper.
#
# Parameters
# ----------
# in_feats : int
# Input feature size; i.e, the number of dimensions of :math:`h_j^{(l)}`.
# out_feats : int
# Output feature size; i.e., the number of dimensions of :math:`h_i^{(l+1)}`.
# norm : str, optional
# How to apply the normalizer. If is `'right'`, divide the aggregated messages
# by each node's in-degrees, which is equivalent to averaging the received messages.
# If is `'none'`, no normalization is applied. Default is `'both'`,
# where the :math:`c_{ji}` in the paper is applied.
# weight : bool, optional
# If True, apply a linear layer. Otherwise, aggregating the messages
# without a weight matrix.
# bias : bool, optional
# If True, adds a learnable bias to the output. Default: ``True``.
# activation : callable activation function/layer or None, optional
# If not None, applies an activation function to the updated node features.
# Default: ``None``.
# allow_zero_in_degree : bool, optional
# If there are 0-in-degree nodes in the graph, output for those nodes will be invalid
# since no message will be passed to those nodes. This is harmful for some applications
# causing silent performance regression. This module will raise a DGLError if it detects
# 0-in-degree nodes in input graph. By setting ``True``, it will suppress the check
# and let the users handle it by themselves. Default: ``False``.
#
# Attributes
# ----------
# weight : torch.Tensor
# The learnable weight tensor.
# bias : torch.Tensor
# The learnable bias tensor.
#
# Note
# ----
# Zero in-degree nodes will lead to invalid output value. This is because no message
# will be passed to those nodes, the aggregation function will be appied on empty input.
# A common practice to avoid this is to add a self-loop for each node in the graph if
# it is homogeneous, which can be achieved by:
#
# >>> g = ... # a DGLGraph
# >>> g = dgl.add_self_loop(g)
#
# Calling ``add_self_loop`` will not work for some graphs, for example, heterogeneous graph
# since the edge type can not be decided for self_loop edges. Set ``allow_zero_in_degree``
# to ``True`` for those cases to unblock the code and handle zero-in-degree nodes manually.
# A common practise to handle this is to filter out the nodes with zero-in-degree when use
# after conv.
#
# Examples
# --------
# >>> import dgl
# >>> import numpy as np
# >>> import torch as th
# >>> from dgl.nn import GraphConv
#
# >>> # Case 1: Homogeneous graph
# >>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
# >>> g = dgl.add_self_loop(g)
# >>> feat = th.ones(6, 10)
# >>> conv = GraphConv(10, 2, norm='both', weight=True, bias=True)
# >>> res = conv(g, feat)
# >>> print(res)
# tensor([[ 1.3326, -0.2797],
# [ 1.4673, -0.3080],
# [ 1.3326, -0.2797],
# [ 1.6871, -0.3541],
# [ 1.7711, -0.3717],
# [ 1.0375, -0.2178]], grad_fn=<AddBackward0>)
# >>> # allow_zero_in_degree example
# >>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
# >>> conv = GraphConv(10, 2, norm='both', weight=True, bias=True, allow_zero_in_degree=True)
# >>> res = conv(g, feat)
# >>> print(res)
# tensor([[-0.2473, -0.4631],
# [-0.3497, -0.6549],
# [-0.3497, -0.6549],
# [-0.4221, -0.7905],
# [-0.3497, -0.6549],
# [ 0.0000, 0.0000]], grad_fn=<AddBackward0>)
#
# >>> # Case 2: Unidirectional bipartite graph
# >>> u = [0, 1, 0, 0, 1]
# >>> v = [0, 1, 2, 3, 2]
# >>> g = dgl.heterograph({('_U', '_E', '_V') : (u, v)})
# >>> u_fea = th.rand(2, 5)
# >>> v_fea = th.rand(4, 5)
# >>> conv = GraphConv(5, 2, norm='both', weight=True, bias=True)
# >>> res = conv(g, (u_fea, v_fea))
# >>> res
# tensor([[-0.2994, 0.6106],
# [-0.4482, 0.5540],
# [-0.5287, 0.8235],
# [-0.2994, 0.6106]], grad_fn=<AddBackward0>)
# """
# def __init__(self,
# in_feats,
# out_feats,
# norm='both',
# weight=True,
# bias=True,
# activation=None,
# allow_zero_in_degree=False):
# super(GraphConv, self).__init__()
# if norm not in ('none', 'both', 'right'):
# raise DGLError('Invalid norm value. Must be either "none", "both" or "right".'
# ' But got "{}".'.format(norm))
# self._in_feats = in_feats
# self._out_feats = out_feats
# self._norm = norm
# self._allow_zero_in_degree = allow_zero_in_degree
#
# if weight:
# self.weight = nn.Parameter(th.Tensor(in_feats, out_feats))
# else:
# self.register_parameter('weight', None)
#
# if bias:
# self.bias = nn.Parameter(th.Tensor(out_feats))
# else:
# self.register_parameter('bias', None)
#
# self.reset_parameters()
#
# self._activation = activation
#
# def reset_parameters(self):
# r"""
#
# Description
# -----------
# Reinitialize learnable parameters.
#
# Note
# ----
# The model parameters are initialized as in the
# `original implementation <https://github.com/tkipf/gcn/blob/master/gcn/layers.py>`__
# where the weight :math:`W^{(l)}` is initialized using Glorot uniform initialization
# and the bias is initialized to be zero.
#
# """
# if self.weight is not None:
# init.xavier_uniform_(self.weight)
# if self.bias is not None:
# init.zeros_(self.bias)
#
# def set_allow_zero_in_degree(self, set_value):
# r"""
#
# Description
# -----------
# Set allow_zero_in_degree flag.
#
# Parameters
# ----------
# set_value : bool
# The value to be set to the flag.
# """
# self._allow_zero_in_degree = set_value
#
# def forward(self, graph, feat, weight=None, edge_weight=None):
# r"""
#
# Description
# -----------
# Compute graph convolution.
#
# Parameters
# ----------
# graph : DGLGraph
# The graph.
# feat : torch.Tensor or pair of torch.Tensor
# If a torch.Tensor is given, it represents the input feature of shape
# :math:`(N, D_{in})`
# where :math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
# If a pair of torch.Tensor is given, which is the case for bipartite graph, the pair
# must contain two tensors of shape :math:`(N_{in}, D_{in_{src}})` and
# :math:`(N_{out}, D_{in_{dst}})`.
# weight : torch.Tensor, optional
# Optional external weight tensor.
# edge_weight : torch.Tensor, optional
# Optional tensor on the edge. If given, the convolution will weight
# with regard to the message.
#
# Returns
# -------
# torch.Tensor
# The output feature
#
# Raises
# ------
# DGLError
# Case 1:
# If there are 0-in-degree nodes in the input graph, it will raise DGLError
# since no message will be passed to those nodes. This will cause invalid output.
# The error can be ignored by setting ``allow_zero_in_degree`` parameter to ``True``.
#
# Case 2:
# External weight is provided while at the same time the module
# has defined its own weight parameter.
#
# Note
# ----
# * Input shape: :math:`(N, *, \text{in_feats})` where * means any number of additional
# dimensions, :math:`N` is the number of nodes.
# * Output shape: :math:`(N, *, \text{out_feats})` where all but the last dimension are
# the same shape as the input.
# * Weight shape: :math:`(\text{in_feats}, \text{out_feats})`.
# """
# with graph.local_scope():
# if not self._allow_zero_in_degree:
# if (graph.in_degrees() == 0).any():
# raise DGLError('There are 0-in-degree nodes in the graph, '
# 'output for those nodes will be invalid. '
# 'This is harmful for some applications, '
# 'causing silent performance regression. '
# 'Adding self-loop on the input graph by '
# 'calling `g = dgl.add_self_loop(g)` will resolve '
# 'the issue. Setting ``allow_zero_in_degree`` '
# 'to be `True` when constructing this module will '
# 'suppress the check and let the code run.')
# aggregate_fn = fn.copy_src('h', 'm')
# if edge_weight is not None:
# assert edge_weight.shape[0] == graph.number_of_edges()
# graph.edata['_edge_weight'] = edge_weight
# aggregate_fn = fn.u_mul_e('h', '_edge_weight', 'm')
#
# # (BarclayII) For RGCN on heterogeneous graphs we need to support GCN on bipartite.
# feat_src, feat_dst = expand_as_pair(feat, graph)
# if self._norm == 'both':
# degs = graph.out_degrees().float().clamp(min=1)
# norm = th.pow(degs, -0.5)
# shp = norm.shape + (1,) * (feat_src.dim() - 1)
# norm = th.reshape(norm, shp)
# feat_src = feat_src * norm
#
# if weight is not None:
# if self.weight is not None:
# raise DGLError('External weight is provided while at the same time the'
# ' module has defined its own weight parameter. Please'
# ' create the module with flag weight=False.')
# else:
# weight = self.weight
#
# if self._in_feats > self._out_feats:
# # mult W first to reduce the feature size for aggregation.
# if weight is not None:
# feat_src = th.matmul(feat_src, weight)
# graph.srcdata['h'] = feat_src
# graph.update_all(aggregate_fn, fn.sum(msg='m', out='h'))
# rst = graph.dstdata['h']
# else:
# # aggregate first then mult W
# graph.srcdata['h'] = feat_src
# graph.update_all(aggregate_fn, fn.sum(msg='m', out='h'))
# rst = graph.dstdata['h']
# if weight is not None:
# rst = th.matmul(rst, weight)
#
# if self._norm != 'none':
# degs = graph.in_degrees().float().clamp(min=1)
# if self._norm == 'both':
# norm = th.pow(degs, -0.5)
# else:
# norm = 1.0 / degs
# shp = norm.shape + (1,) * (feat_dst.dim() - 1)
# norm = th.reshape(norm, shp)
# rst = rst * norm
#
# if self.bias is not None:
# rst = rst + self.bias
#
# if self._activation is not None:
# rst = self._activation(rst)
#
# return rst
#
# def extra_repr(self):
# """Set the extra representation of the module,
# which will come into effect when printing the model.
# """
# summary = 'in={_in_feats}, out={_out_feats}'
# summary += ', normalization={_norm}'
# if '_activation' in self.__dict__:
# summary += ', activation={_activation}'
# return summary.format(**self.__dict__)
class GraphConv(nn.Module):
r"""
Description
-----------
Graph convolution was introduced in `GCN <https://arxiv.org/abs/1609.02907>`__
and mathematically is defined as follows:
.. math::
h_i^{(l+1)} = \sigma(b^{(l)} + \sum_{j\in\mathcal{N}(i)}\frac{1}{c_{ij}}h_j^{(l)}W^{(l)})
where :math:`\mathcal{N}(i)` is the set of neighbors of node :math:`i`,
:math:`c_{ij}` is the product of the square root of node degrees
(i.e., :math:`c_{ij} = \sqrt{|\mathcal{N}(i)|}\sqrt{|\mathcal{N}(j)|}`),
and :math:`\sigma` is an activation function.
Parameters
----------
in_feats : int
Input feature size; i.e, the number of dimensions of :math:`h_j^{(l)}`.
out_feats : int
Output feature size; i.e., the number of dimensions of :math:`h_i^{(l+1)}`.
norm : str, optional
How to apply the normalizer. If is `'right'`, divide the aggregated messages
by each node's in-degrees, which is equivalent to averaging the received messages.
If is `'none'`, no normalization is applied. Default is `'both'`,
where the :math:`c_{ij}` in the paper is applied.
weight : bool, optional
If True, apply a linear layer. Otherwise, aggregating the messages
without a weight matrix.
bias : bool, optional
If True, adds a learnable bias to the output. Default: ``True``.
activation : callable activation function/layer or None, optional
If not None, applies an activation function to the updated node features.
Default: ``None``.
allow_zero_in_degree : bool, optional
If there are 0-in-degree nodes in the graph, output for those nodes will be invalid
since no message will be passed to those nodes. This is harmful for some applications
causing silent performance regression. This module will raise a DGLError if it detects
0-in-degree nodes in input graph. By setting ``True``, it will suppress the check
and let the users handle it by themselves. Default: ``False``.
Attributes
----------
weight : torch.Tensor
The learnable weight tensor.
bias : torch.Tensor
The learnable bias tensor.
Note
----
Zero in-degree nodes will lead to invalid output value. This is because no message
will be passed to those nodes, the aggregation function will be appied on empty input.
A common practice to avoid this is to add a self-loop for each node in the graph if
it is homogeneous, which can be achieved by:
>>> g = ... # a DGLGraph
>>> g = dgl.add_self_loop(g)
Calling ``add_self_loop`` will not work for some graphs, for example, heterogeneous graph
since the edge type can not be decided for self_loop edges. Set ``allow_zero_in_degree``
to ``True`` for those cases to unblock the code and handle zere-in-degree nodes manually.
A common practise to handle this is to filter out the nodes with zere-in-degree when use
after conv.
Examples
--------
>>> import dgl
>>> import numpy as np
>>> import torch as th
>>> from dgl.nn import GraphConv
>>> # Case 1: Homogeneous graph
>>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
>>> g = dgl.add_self_loop(g)
>>> feat = th.ones(6, 10)
>>> conv = GraphConv(10, 2, norm='both', weight=True, bias=True)
>>> res = conv(g, feat)
>>> print(res)
tensor([[ 1.3326, -0.2797],
[ 1.4673, -0.3080],
[ 1.3326, -0.2797],
[ 1.6871, -0.3541],
[ 1.7711, -0.3717],
[ 1.0375, -0.2178]], grad_fn=<AddBackward0>)
>>> # allow_zero_in_degree example
>>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
>>> conv = GraphConv(10, 2, norm='both', weight=True, bias=True, allow_zero_in_degree=True)
>>> res = conv(g, feat)
>>> print(res)
tensor([[-0.2473, -0.4631],
[-0.3497, -0.6549],
[-0.3497, -0.6549],
[-0.4221, -0.7905],
[-0.3497, -0.6549],
[ 0.0000, 0.0000]], grad_fn=<AddBackward0>)
>>> # Case 2: Unidirectional bipartite graph
>>> u = [0, 1, 0, 0, 1]
>>> v = [0, 1, 2, 3, 2]
>>> g = dgl.bipartite((u, v))
>>> u_fea = th.rand(2, 5)
>>> v_fea = th.rand(4, 5)
>>> conv = GraphConv(5, 2, norm='both', weight=True, bias=True)
>>> res = conv(g, (u_fea, v_fea))
>>> res
tensor([[-0.2994, 0.6106],
[-0.4482, 0.5540],
[-0.5287, 0.8235],
[-0.2994, 0.6106]], grad_fn=<AddBackward0>)
"""
def __init__(self,
in_feats,
out_feats,
norm='both',
weight=True,
bias=True,
activation=None,
allow_zero_in_degree=False):
super(GraphConv, self).__init__()
if norm not in ('none', 'both', 'right'):
raise DGLError('Invalid norm value. Must be either "none", "both" or "right".'
' But got "{}".'.format(norm))
self._in_feats = in_feats
self._out_feats = out_feats
self._norm = norm
self._allow_zero_in_degree = allow_zero_in_degree
if weight:
self.weight = nn.Parameter(th.Tensor(in_feats, out_feats))
else:
self.register_parameter('weight', None)
if bias:
self.bias = nn.Parameter(th.Tensor(out_feats))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self._activation = activation
def reset_parameters(self):
r"""
Description
-----------
Reinitialize learnable parameters.
Note
----
The model parameters are initialized as in the
`original implementation <https://github.com/tkipf/gcn/blob/master/gcn/layers.py>`__
where the weight :math:`W^{(l)}` is initialized using Glorot uniform initialization
and the bias is initialized to be zero.
"""
if self.weight is not None:
init.xavier_uniform_(self.weight)
if self.bias is not None:
init.zeros_(self.bias)
def set_allow_zero_in_degree(self, set_value):
r"""
Description
-----------
Set allow_zero_in_degree flag.
Parameters
----------
set_value : bool
The value to be set to the flag.
"""
self._allow_zero_in_degree = set_value
def forward(self, graph, feat, eweight, weight=None):
r"""
Description
-----------
Compute graph convolution.
Parameters
----------
graph : DGLGraph
The graph.
feat : torch.Tensor or pair of torch.Tensor
If a torch.Tensor is given, it represents the input feature of shape
:math:`(N, D_{in})`
where :math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
If a pair of torch.Tensor is given, which is the case for bipartite graph, the pair
must contain two tensors of shape :math:`(N_{in}, D_{in_{src}})` and
:math:`(N_{out}, D_{in_{dst}})`.
eweight : torch.Tensor of shape (E, 1)
Edge weights, E for the number of edges.
weight : torch.Tensor, optional
Optional external weight tensor.
Returns
-------
torch.Tensor
The output feature
Raises
------
DGLError
Case 1:
If there are 0-in-degree nodes in the input graph, it will raise DGLError
since no message will be passed to those nodes. This will cause invalid output.
The error can be ignored by setting ``allow_zero_in_degree`` parameter to ``True``.
Case 2:
External weight is provided while at the same time the module
has defined its own weight parameter.
Note
----
* Input shape: :math:`(N, *, \text{in_feats})` where * means any number of additional
dimensions, :math:`N` is the number of nodes.
* Output shape: :math:`(N, *, \text{out_feats})` where all but the last dimension are
the same shape as the input.
* Weight shape: :math:`(\text{in_feats}, \text{out_feats})`.
"""
with graph.local_scope():
if not self._allow_zero_in_degree:
if (graph.in_degrees() == 0).any():
raise DGLError('There are 0-in-degree nodes in the graph, '
'output for those nodes will be invalid. '
'This is harmful for some applications, '
'causing silent performance regression. '
'Adding self-loop on the input graph by '
'calling `g = dgl.add_self_loop(g)` will resolve '
'the issue. Setting ``allow_zero_in_degree`` '
'to be `True` when constructing this module will '
'suppress the check and let the code run.')
# (BarclayII) For RGCN on heterogeneous graphs we need to support GCN on bipartite.
feat_src, feat_dst = expand_as_pair(feat, graph)
if self._norm == 'both':
degs = graph.out_degrees().float().clamp(min=1)
norm = th.pow(degs, -0.5)
shp = norm.shape + (1,) * (feat_src.dim() - 1)
norm = th.reshape(norm, shp)
feat_src = feat_src * norm
if weight is not None:
if self.weight is not None:
raise DGLError('External weight is provided while at the same time the'
' module has defined its own weight parameter. Please'
' create the module with flag weight=False.')
else:
weight = self.weight
# Set edge weights
graph.edata['w'] = eweight
if self._in_feats > self._out_feats:
# mult W first to reduce the feature size for aggregation.
if weight is not None:
feat_src = th.matmul(feat_src, weight)
graph.srcdata['h'] = feat_src
# Changed from fn.copy_src to fn.u_mul_e
graph.update_all(fn.u_mul_e(lhs_field='h', rhs_field='w', out='m'),
fn.sum(msg='m', out='h'))
rst = graph.dstdata['h']
else:
# aggregate first then mult W
graph.srcdata['h'] = feat_src
# Changed from fn.copy_src to fn.u_mul_e
graph.update_all(fn.u_mul_e(lhs_field='h', rhs_field='w', out='m'),
fn.sum(msg='m', out='h'))
rst = graph.dstdata['h']
if weight is not None:
rst = th.matmul(rst, weight)
if self._norm != 'none':
degs = graph.in_degrees().float().clamp(min=1)
if self._norm == 'both':
norm = th.pow(degs, -0.5)
else:
norm = 1.0 / degs
shp = norm.shape + (1,) * (feat_dst.dim() - 1)
norm = th.reshape(norm, shp)
rst = rst * norm
if self.bias is not None:
rst = rst + self.bias
if self._activation is not None:
rst = self._activation(rst)
return rst
def extra_repr(self):
"""Set the extra representation of the module,
which will come into effect when printing the model.
"""
summary = 'in={_in_feats}, out={_out_feats}'
summary += ', normalization={_norm}'
if '_activation' in self.__dict__:
summary += ', activation={_activation}'
return summary.format(**self.__dict__) | 30,598 | 40.072483 | 102 | py |
gnn_cff | gnn_cff-main/models/gcn.py | import numpy as np
from dgl.nn.pytorch import GraphConv
import dgl
import torch
# class GCNGraphNew(torch.nn.Module):
# def __init__(self, in_feats, h_feats):
# super(GCNGraphNew, self).__init__()
# self.conv1 = GraphConv(in_feats, h_feats)
# self.conv2 = GraphConv(h_feats, h_feats)
# self.conv3 = GraphConv(h_feats, h_feats)
# self.dense = torch.nn.Linear(h_feats, 1)
# self.maxpool = dgl.nn.pytorch.glob.MaxPooling()
# def forward(self, g, in_feat, e_weight):
# h = self.conv1(g, in_feat, e_weight)
# h = torch.nn.functional.relu(h)
# h = self.conv2(g, h, e_weight)
# h = torch.nn.functional.relu(h)
# h = self.conv3(g, h, e_weight)
# h = torch.nn.functional.relu(h)
# g.ndata['h'] = h
# h = self.maxpool(g, h) # pooling
# h = self.dense(h)
# h = torch.nn.functional.sigmoid(h)
# return h
class GCNGraph(torch.nn.Module):
def __init__(self, in_feats, h_feats):
super(GCNGraph, self).__init__()
self.conv1 = GraphConv(in_feats, h_feats)
self.conv2 = GraphConv(h_feats, h_feats)
self.conv3 = GraphConv(h_feats, h_feats)
self.dense1 = torch.nn.Linear(h_feats, 16)
self.dense2 = torch.nn.Linear(16, 8)
self.dense3 = torch.nn.Linear(8, 1)
def forward(self, g, in_feat, e_weight):
h = self.conv1(g, in_feat, e_weight)
h = torch.nn.functional.relu(h)
h = self.conv2(g, h, e_weight)
h = torch.nn.functional.relu(h)
h = self.conv3(g, h, e_weight)
h = torch.nn.functional.relu(h)
g.ndata['h'] = h
h = dgl.mean_nodes(g, 'h') # pooling
h = self.dense1(h)
h = torch.nn.functional.relu(h)
h = self.dense2(h)
h = torch.nn.functional.relu(h)
h = self.dense3(h)
h = torch.nn.functional.sigmoid(h)
return h
class GCNNodeBAShapes(torch.nn.Module):
# TODO
def __init__(self, in_feats, h_feats, num_classes, device, if_exp=False):
super(GCNNodeBAShapes, self).__init__()
self.conv1 = GraphConv(in_feats, h_feats)
self.conv2 = GraphConv(h_feats, h_feats)
self.conv3 = GraphConv(h_feats, num_classes)
self.if_exp = if_exp
self.device = device
def forward(self, g, in_feat, e_weight, target_node):
# map target node index
x = torch.cat((torch.tensor([0]).to(self.device), torch.cumsum(g.batch_num_nodes(), dim=0)), dim=0)[:-1]
target_node = target_node + x
h = self.conv1(g, in_feat, e_weight)
h = torch.nn.functional.relu(h)
h = self.conv2(g, h, e_weight)
h = torch.nn.functional.relu(h)
h = self.conv3(g, h, e_weight)
if self.if_exp: # if in the explanation mod, should add softmax layer
h = torch.nn.functional.softmax(h)
g.ndata['h'] = h
return g.ndata['h'][target_node]
class GCNNodeTreeCycles(torch.nn.Module):
# TODO
def __init__(self, in_feats, h_feats, num_classes, if_exp=False):
super(GCNNodeTreeCycles, self).__init__()
self.conv1 = GraphConv(in_feats, h_feats)
self.conv2 = GraphConv(h_feats, h_feats)
self.conv3 = GraphConv(h_feats, num_classes)
self.if_exp = if_exp
def forward(self, g, in_feat, e_weight, target_node):
# map target node index
x = torch.cat((torch.tensor([0]), torch.cumsum(g.batch_num_nodes(), dim=0)), dim=0)[:-1]
target_node = target_node + x
h = self.conv1(g, in_feat, e_weight)
h = torch.nn.functional.relu(h)
h = self.conv2(g, h, e_weight)
h = torch.nn.functional.relu(h)
h = self.conv3(g, h, e_weight)
if self.if_exp: # if in the explanation mod, should add softmax layer
h = torch.nn.functional.sigmoid(h)
g.ndata['h'] = h
return g.ndata['h'][target_node]
class GCNNodeCiteSeer(torch.nn.Module):
# TODO
def __init__(self, in_feats, h_feats, num_classes, if_exp=False):
super(GCNNodeCiteSeer, self).__init__()
self.conv1 = GraphConv(in_feats, h_feats)
self.conv2 = GraphConv(h_feats, num_classes)
self.if_exp = if_exp
def forward(self, g, in_feat, e_weight, target_node):
# map target node index
x = torch.cat((torch.tensor([0]), torch.cumsum(g.batch_num_nodes(), dim=0)), dim=0)[:-1]
target_node = target_node + x
h = self.conv1(g, in_feat, e_weight)
h = torch.nn.functional.relu(h)
h = self.conv2(g, h, e_weight)
if self.if_exp: # if in the explanation mod, should add softmax layer
h = torch.nn.functional.softmax(h)
g.ndata['h'] = h
return g.ndata['h'][target_node]
| 4,777 | 36.622047 | 112 | py |
gnn_cff | gnn_cff-main/models/explainer_models.py | from re import S
import numpy as np
import torch
import math
import tqdm
import sys
import matplotlib.pyplot as plt
import networkx as nx
from utils.common_utils import mutag_dgl_to_networkx, get_mutag_color_dict, ba_shapes_dgl_to_networkx
class GraphExplainerEdge(torch.nn.Module):
def __init__(self, base_model, G_dataset, test_indices, args, fix_exp=None):
super(GraphExplainerEdge, self).__init__()
self.base_model = base_model
self.base_model.eval()
self.G_dataset = G_dataset
self.test_indices = test_indices
self.args = args
if fix_exp:
self.fix_exp = fix_exp * 2
else:
self.fix_exp = None
def explain_nodes_gnn_stats(self):
exp_dict = {} # {'gid': masked_adj, 'gid': mask_adj}
num_dict = {} # {'gid': exp_num, 'gid': exp_num}
num=200
for gid in tqdm.tqdm(self.test_indices[:num]):
ori_pred = self.base_model(self.G_dataset.graphs[gid],
self.G_dataset.graphs[gid].ndata['feat'].float(),
self.G_dataset.graphs[gid].edata['weight'])[0, 0]
pred_label = torch.round(ori_pred)
ori_label = self.G_dataset.labels[gid]
if pred_label == 1 and ori_label == 1: # only explain why the graph is predicted as mutagenic
masked_adj, exp_num = self.explain(gid, ori_pred)
exp_dict[gid] = masked_adj
num_dict[gid] = exp_num
print('average number of exps:', sum(num_dict.values()) / len(num_dict.keys()))
PN = self.compute_pn(exp_dict)
PS = self.compute_ps(exp_dict)
acc, pre, rec, f1 = self.compute_precision_recall(exp_dict)
print('PN', PN)
print('PS', PS)
print('FNS', 2 * PN * PS / (PN + PS))
print('acc: ', acc, ' pre: ', pre, ' rec: ', rec, ' f1: ', f1)
return PN, PS, 2 * PN * PS / (PN + PS), sum(num_dict.values()) / len(num_dict.keys()), acc, pre, rec, f1
def explain(self, gid, ori_pred):
# only generate exps for the correct predictions for now (to be consistent with GNN Explainer).
explainer = ExplainModelGraph(
graph=self.G_dataset.graphs[gid],
base_model=self.base_model,
args=self.args
)
if self.args.gpu:
explainer = explainer.cuda()
# train explainer
optimizer = torch.optim.Adam(explainer.parameters(), lr=self.args.lr, weight_decay=0)
explainer.train()
for epoch in range(self.args.num_epochs):
explainer.zero_grad()
pred1, pred2 = explainer()
bpr1, bpr2, l1, loss = explainer.loss(
pred1[0, 0], pred2[0, 0], ori_pred, self.args.gam, self.args.lam, self.args.alp)
# if epoch % 200 == 0:
# print('bpr1: ', self.args.lam * self.args.alp * bpr1,
# 'bpr2:', self.args.lam * (1 - self.args.alp) * bpr2,
# 'l1', l1,
# 'loss', loss)
# print('bpr: ', 50 * bpr, 'l1', l1, 'loss', loss)
loss.backward()
optimizer.step()
masked_adj = explainer.get_masked_adj()
masked_adj = explainer.get_masked_adj()
new_edge_num = len(masked_adj[masked_adj > self.args.mask_thresh])
exp_num = new_edge_num / 2
return masked_adj, exp_num
def compute_pn(self, exp_dict):
pn_count = 0
for gid, masked_adj in exp_dict.items():
graph = self.G_dataset.graphs[gid]
if self.fix_exp:
thresh = masked_adj.flatten().sort(descending=True)[0][self.fix_exp+1]
else:
thresh = self.args.mask_thresh
ps_adj = (masked_adj > thresh).float()
pn_adj = graph.edata['weight'] - ps_adj
new_pre = self.base_model(graph, graph.ndata['feat'].float(), pn_adj)[0, 0]
if new_pre < 0.5:
pn_count += 1
pn = pn_count / len(exp_dict.keys())
return pn
def compute_ps(self, exp_dict):
ps_count = 0
for gid, masked_adj in exp_dict.items():
graph = self.G_dataset.graphs[gid]
if self.fix_exp:
thresh = masked_adj.flatten().sort(descending=True)[0][self.fix_exp+1]
else:
thresh = self.args.mask_thresh
ps_adj = (masked_adj > thresh).float()
new_pre = self.base_model(graph, graph.ndata['feat'].float(), ps_adj)[0, 0]
if new_pre > 0.5:
ps_count += 1
ps = ps_count / len(exp_dict.keys())
return ps
def compute_precision_recall(self, exp_dict):
pres = []
recalls = []
f1s = []
accs = []
for gid, masked_adj in exp_dict.items():
if self.fix_exp:
thresh = masked_adj.flatten().sort(descending=True)[0][self.fix_exp+1]
else:
thresh = self.args.mask_thresh
e_labels = self.G_dataset[gid][0].edata['label']
new_edges = [masked_adj > thresh][0].numpy()
old_edges = [self.G_dataset[gid][0].edata['weight'] > thresh][0].numpy()
int_map = map(int, new_edges)
new_edges = list(int_map)
int_map = map(int, old_edges)
old_edges = list(int_map)
exp_list = np.array(new_edges)
TP = 0
FP = 0
TN = 0
FN = 0
for i in range(len(e_labels)):
if exp_list[i] == 1:
if e_labels[i] == 1:
TP += 1
else:
FP += 1
else:
if e_labels[i] == 1:
FN += 1
else:
TN += 1
if TP != 0:
pre = TP / (TP + FP)
rec = TP / (TP + FN)
acc = (TP + TN) / (TP + FP + TN + FN)
f1 = 2 * pre * rec / (pre + rec)
else:
pre = 0
rec = 0
f1 = 0
acc = (TP + TN) / (TP + FP + TN + FN)
pres.append(pre)
recalls.append(rec)
f1s.append(f1)
accs.append(acc)
return np.mean(accs), np.mean(pres), np.mean(recalls), np.mean(f1s)
class ExplainModelGraph(torch.nn.Module):
def __init__(self, graph, base_model, args):
super(ExplainModelGraph, self).__init__()
self.graph = graph
self.num_nodes = len(self.graph.nodes())
self.base_model = base_model
self.args = args
self.adj_mask = self.construct_adj_mask()
# For masking diagonal entries
self.diag_mask = torch.ones(self.num_nodes, self.num_nodes) - torch.eye(self.num_nodes)
if self.args.gpu:
self.diag_mask = self.diag_mask.cuda()
def forward(self):
masked_adj = self.get_masked_adj()
# should be reversed in the future
pred1 = self.base_model(self.graph, self.graph.ndata['feat'].float(), masked_adj) # factual
pred2 = self.base_model(self.graph, self.graph.ndata['feat'].float(), self.graph.edata['weight'] - masked_adj) # counterfactual
return pred1, pred2
def loss(self, pred1, pred2, ori_pred, gam, lam, alp):
relu = torch.nn.ReLU()
bpr1 = relu(gam + 0.5 - pred1) # factual
bpr2 = relu(gam + pred2 - 0.5) # counterfactual
masked_adj = self.get_masked_adj()
L1 = torch.linalg.norm(masked_adj, ord=1)
loss = L1 + lam * (alp * bpr1 + (1 - alp) * bpr2)
return bpr1, bpr2, L1, loss
def construct_adj_mask(self):
mask = torch.nn.Parameter(torch.FloatTensor(self.num_nodes, self.num_nodes))
std = torch.nn.init.calculate_gain("relu") * math.sqrt(
2.0 / (self.num_nodes + self.num_nodes)
)
with torch.no_grad():
mask.normal_(1.0, std)
return mask
def get_masked_adj(self):
sym_mask = torch.sigmoid(self.adj_mask)
sym_mask = (sym_mask + sym_mask.t()) / 2
adj = self.graph.edata['weight']
flatten_sym_mask = torch.reshape(sym_mask, (-1, ))
masked_adj = adj * flatten_sym_mask
# masked_adj = masked_adj * self.diag_mask
''
return masked_adj
class NodeExplainerEdgeMulti(torch.nn.Module):
def __init__(self, base_model, G_dataset, test_indices, args, fix_exp=None):
super(NodeExplainerEdgeMulti, self).__init__()
self.base_model = base_model
self.base_model.eval()
self.G_dataset = G_dataset
self.test_indices = test_indices
self.args = args
if fix_exp:
self.fix_exp = fix_exp * 2
else:
self.fix_exp = None
def explain_nodes_gnn_stats(self):
exp_dict = {} # {'gid': masked_adj, 'gid': mask_adj}
num_dict = {} # {'gid': exp_num, 'gid': exp_num}
pred_label_dict = {}
t_gid = []
for gid in tqdm.tqdm(self.test_indices):
ori_pred = self.base_model(self.G_dataset.graphs[gid],
self.G_dataset.graphs[gid].ndata['feat'].float(),
self.G_dataset.graphs[gid].edata['weight'], self.G_dataset.targets[gid])[0]
ori_pred_label = torch.argmax(ori_pred)
if self.args.dataset == 'citeseer':
ori_label = self.G_dataset.labels[gid]
else:
ori_label = torch.argmax(self.G_dataset.labels[gid])
if self.args.dataset == 'citeseer' or (ori_pred_label != 0 and ori_label != 0):
t_gid.append(gid)
masked_adj, exp_num = self.explain(gid, ori_pred_label)
exp_dict[gid] = masked_adj
num_dict[gid] = exp_num
pred_label_dict[gid] = ori_pred_label
print('average number of exps:', sum(num_dict.values()) / len(num_dict.keys()))
PN = self.compute_pn(exp_dict, pred_label_dict)
PS = self.compute_ps(exp_dict, pred_label_dict)
if self.args.dataset == 'citeseer':
acc = -1
pre = -1
rec = -1
f1 = -1
else:
acc, pre, rec, f1 = self.compute_precision_recall(exp_dict)
print('PN', PN)
print('PS', PS)
print('PNS', 2 * PN * PS / (PN + PS))
print('ave exp', sum(num_dict.values()) / len(num_dict.keys()))
print('acc: ', acc, ' pre: ', pre, ' rec: ', rec, ' f1: ', f1)
return PN, PS, 2 * PN * PS / (PN + PS), sum(num_dict.values()) / len(num_dict.keys()), acc, pre, rec, f1
def explain(self, gid, pred_label):
explainer = ExplainModelNodeMulti(
graph=self.G_dataset.graphs[gid],
base_model=self.base_model,
target_node=self.G_dataset.targets[gid],
args=self.args
)
if self.args.gpu:
explainer = explainer.cuda()
optimizer = torch.optim.Adam(explainer.parameters(), lr=self.args.lr, weight_decay=0)
explainer.train()
for epoch in range(self.args.num_epochs):
explainer.zero_grad()
pred1, pred2 = explainer()
bpr1, bpr2, l1, loss = explainer.loss(
pred1[0], pred2[0], pred_label, self.args.gam, self.args.lam, self.args.alp)
# if epoch % 201 == 0:
# print('bpr1: ', self.args.lam * self.args.alp * bpr1,
# 'bpr2:', self.args.lam * (1 - self.args.alp) * bpr2,
# 'l1', l1,
# 'loss', loss)
loss.backward()
optimizer.step()
masked_adj = explainer.get_masked_adj()
new_edge_num = len(masked_adj[masked_adj > self.args.mask_thresh])
exp_num = new_edge_num / 2
return masked_adj, exp_num
def compute_pn(self, exp_dict, pred_label_dict):
pn_count = 0
for gid, masked_adj in exp_dict.items():
graph = self.G_dataset.graphs[gid]
target = self.G_dataset.targets[gid]
ori_pred_label = pred_label_dict[gid]
if self.fix_exp:
if self.fix_exp > (len(masked_adj.flatten()) - 1):
thresh = masked_adj.flatten().sort(descending=True)[0][len(masked_adj.flatten()) - 1]
else:
thresh = masked_adj.flatten().sort(descending=True)[0][self.fix_exp + 1]
else:
thresh = self.args.mask_thresh
ps_adj = (masked_adj > thresh).float()
pn_adj = graph.edata['weight'] - ps_adj
new_pre = self.base_model(graph, graph.ndata['feat'].float(), pn_adj, target)[0]
new_label = torch.argmax(new_pre)
if new_label != ori_pred_label:
pn_count += 1
pn = pn_count / len(exp_dict.keys())
return pn
def compute_ps(self, exp_dict, pred_label_dict):
ps_count = 0
for gid, masked_adj in exp_dict.items():
graph = self.G_dataset.graphs[gid]
target = self.G_dataset.targets[gid]
ori_pred_label = pred_label_dict[gid]
if self.fix_exp:
if self.fix_exp > (len(masked_adj.flatten()) - 1):
thresh = masked_adj.flatten().sort(descending=True)[0][len(masked_adj.flatten()) - 1]
else:
thresh = masked_adj.flatten().sort(descending=True)[0][self.fix_exp + 1]
else:
thresh = self.args.mask_thresh
ps_adj = (masked_adj > thresh).float()
new_pre = self.base_model(graph, graph.ndata['feat'].float(), ps_adj, target)[0]
new_label = torch.argmax(new_pre)
if new_label == ori_pred_label:
ps_count += 1
ps = ps_count / len(exp_dict.keys())
return ps
def compute_precision_recall(self, exp_dict):
pres = []
recalls = []
f1s = []
accs = []
for gid, masked_adj in exp_dict.items():
if self.fix_exp:
if self.fix_exp > (len(masked_adj.flatten()) - 1):
thresh = masked_adj.flatten().sort(descending=True)[0][len(masked_adj.flatten()) - 1]
else:
thresh = masked_adj.flatten().sort(descending=True)[0][self.fix_exp + 1]
else:
thresh = self.args.mask_thresh
e_labels = self.G_dataset[gid][0].edata['gt']
new_edges = [masked_adj > thresh][0].numpy()
old_edges = [self.G_dataset[gid][0].edata['weight'] > thresh][0].numpy()
int_map = map(int, new_edges)
new_edges = list(int_map)
int_map = map(int, old_edges)
old_edges = list(int_map)
exp_list = np.array(new_edges)
TP = 0
FP = 0
TN = 0
FN = 0
for i in range(len(e_labels)):
if exp_list[i] == 1:
if e_labels[i] == 1:
TP += 1
else:
FP += 1
else:
if e_labels[i] == 1:
FN += 1
else:
TN += 1
# print('TP', TP, 'FP', FP, 'TN', TN, 'FN', FN)
if TP != 0:
pre = TP / (TP + FP)
rec = TP / (TP + FN)
acc = (TP + TN) / (TP + FP + TN + FN)
f1 = 2 * pre * rec / (pre + rec)
else:
pre = 0
rec = 0
f1 = 0
acc = (TP + TN) / (TP + FP + TN + FN)
pres.append(pre)
recalls.append(rec)
f1s.append(f1)
accs.append(acc)
return np.mean(accs), np.mean(pres), np.mean(recalls), np.mean(f1s)
class ExplainModelNodeMulti(torch.nn.Module):
"""
explain BA-shapes and CiteSeer
"""
def __init__(self, graph, base_model, target_node, args):
super(ExplainModelNodeMulti, self).__init__()
self.graph = graph
self.num_nodes = len(self.graph.nodes())
self.base_model = base_model
self.target_node = target_node
self.args = args
self.adj_mask = self.construct_adj_mask()
# For masking diagonal entries
self.diag_mask = torch.ones(self.num_nodes, self.num_nodes) - torch.eye(self.num_nodes)
if self.args.gpu:
self.diag_mask = self.diag_mask.cuda()
def forward(self):
masked_adj = self.get_masked_adj()
pred1 = self.base_model(self.graph, self.graph.ndata['feat'].float(),
masked_adj, self.target_node)
pred2 = self.base_model(self.graph, self.graph.ndata['feat'].float(),
self.graph.edata['weight'] - masked_adj,
self.target_node)
return pred1, pred2
def loss(self, pred1, pred2, pred_label, gam, lam, alp):
relu = torch.nn.ReLU()
f_next = torch.max(torch.cat((pred1[:pred_label],
pred1[pred_label+1:])))
cf_next = torch.max(torch.cat((pred2[:pred_label],
pred2[pred_label+1:])))
bpr1 = relu(gam + f_next - pred1[pred_label])
bpr2 = relu(gam + pred2[pred_label] - cf_next)
masked_adj = self.get_masked_adj()
L1 = torch.linalg.norm(masked_adj, ord=1)
loss = L1 + lam * (alp * bpr1 + (1 - alp) * bpr2)
return bpr1, bpr2, L1, loss
def construct_adj_mask(self):
mask = torch.nn.Parameter(torch.FloatTensor(self.num_nodes, self.num_nodes))
std = torch.nn.init.calculate_gain("relu") * math.sqrt(
2.0 / (self.num_nodes + self.num_nodes)
)
with torch.no_grad():
mask.normal_(1.0, std)
return mask
def get_masked_adj(self):
sym_mask = torch.sigmoid(self.adj_mask)
sym_mask = (sym_mask + sym_mask.t()) / 2
adj = self.graph.edata['weight']
flatten_sym_mask = torch.reshape(sym_mask, (-1, ))
masked_adj = adj * flatten_sym_mask
''
return masked_adj
class NodeExplainerFeatureMulti(torch.nn.Module):
def __init__(self, base_model, G_dataset, test_indices, args, fix_exp=None):
super(NodeExplainerFeatureMulti, self).__init__()
self.base_model = base_model
self.base_model.eval()
self.G_dataset = G_dataset
self.test_indices = test_indices
self.args = args
if fix_exp:
self.fix_exp = fix_exp * 2
else:
self.fix_exp = None
def explain_nodes_gnn_stats(self):
exp_dict = {} # {'gid': masked_adj, 'gid': mask_adj}
num_dict = {} # {'gid': exp_num, 'gid': exp_num}
pred_label_dict = {}
for gid in tqdm.tqdm(self.test_indices[:51]):
ori_pred = self.base_model(self.G_dataset.graphs[gid],
self.G_dataset.graphs[gid].ndata['feat'].float(),
self.G_dataset.graphs[gid].edata['weight'], self.G_dataset.targets[gid])[0]
ori_pred_label = torch.argmax(ori_pred)
if self.args.dataset == 'citeseer':
ori_label = self.G_dataset.labels[gid]
else:
ori_label = torch.argmax(self.G_dataset.labels[gid])
if self.args.dataset == 'citeseer' or (ori_pred_label != 0 and ori_label != 0): # only explain when the graph is not on the motif
print('explain gid: ', gid)
print('num of nodes: ', torch.sum(self.G_dataset[gid][0].edata['weight']))
masked_feat, exp_num = self.explain(gid, ori_pred_label)
exp_dict[gid] = masked_feat
num_dict[gid] = exp_num
pred_label_dict[gid] = ori_pred_label
print('average number of exps:', sum(num_dict.values()) / len(num_dict.keys()))
PN = self.compute_pn(exp_dict, pred_label_dict)
PS = self.compute_ps(exp_dict, pred_label_dict)
if self.args.dataset == 'citeseer':
acc = -1
pre = -1
rec = -1
f1 = -1
else:
acc, pre, rec, f1 = self.compute_precision_recall(exp_dict)
print('PN', PN)
print('PS', PS)
print('PNS', 2 * PN * PS / (PN + PS))
print('ave exp', sum(num_dict.values()) / len(num_dict.keys()))
print('acc: ', acc, ' pre: ', pre, ' rec: ', rec, ' f1: ', f1)
return PN, PS, 2 * PN * PS / (PN + PS), sum(num_dict.values()) / len(num_dict.keys()), acc, pre, rec, f1
def explain(self, gid, pred_label):
# only generate exps for the correct predictions for now (to be consistent with GNN Explainer).
explainer = ExplainModelNodeMultiFeature(
graph=self.G_dataset.graphs[gid],
base_model=self.base_model,
target_node=self.G_dataset.targets[gid],
args=self.args
)
print('ori label', self.G_dataset.labels[gid])
print('ori feat num', torch.sum(self.G_dataset.graphs[gid].ndata['feat']))
if self.args.gpu:
explainer = explainer.cuda()
# train explainer
optimizer = torch.optim.Adam(explainer.parameters(), lr=self.args.lr, weight_decay=0)
explainer.train()
for epoch in range(self.args.num_epochs):
explainer.zero_grad()
pred1, pred2 = explainer()
bpr1, bpr2, l1, loss = explainer.loss(
pred1[0], pred2[0], pred_label, self.args.gam, self.args.lam, self.args.alp)
if epoch % 200 == 0:
print('bpr1: ', self.args.lam * self.args.alp * bpr1,
'bpr2:', self.args.lam * (1 - self.args.alp) * bpr2,
'l1', l1,
'loss', loss)
loss.backward()
optimizer.step()
masked_feat = explainer.get_masked_feat()
new_feat_num = len(masked_feat[masked_feat > self.args.mask_thresh])
exp_num = new_feat_num
print('exp num', exp_num)
return masked_feat, exp_num
def compute_pn(self, exp_dict, pred_label_dict):
pn_count = 0
for gid, masked_feat in exp_dict.items():
graph = self.G_dataset.graphs[gid]
target = self.G_dataset.targets[gid]
ori_pred_label = pred_label_dict[gid]
if self.fix_exp:
thresh = masked_feat.flatten().sort(descending=True)[0][self.fix_exp+1]
else:
thresh = self.args.mask_thresh
ps_feat = (masked_feat > thresh).float()
pn_feat = graph.ndata['feat'] - ps_feat
new_pre = self.base_model(graph, pn_feat.float(), graph.edata['weight'], target)[0]
new_label = torch.argmax(new_pre)
if new_label != ori_pred_label:
pn_count += 1
pn = pn_count / len(exp_dict.keys())
return pn
def compute_ps(self, exp_dict, pred_label_dict):
ps_count = 0
for gid, masked_feat in exp_dict.items():
graph = self.G_dataset.graphs[gid]
target = self.G_dataset.targets[gid]
ori_pred_label = pred_label_dict[gid]
if self.fix_exp:
thresh = masked_feat.flatten().sort(descending=True)[0][self.fix_exp+1]
else:
thresh = self.args.mask_thresh
ps_feat = (masked_feat > thresh).float()
new_pre = self.base_model(graph, ps_feat.float(), graph.edata['weight'], target)[0]
new_label = torch.argmax(new_pre)
if new_label == ori_pred_label:
ps_count += 1
ps = ps_count / len(exp_dict.keys())
return ps
def compute_precision_recall(self, exp_dict):
pres = []
recalls = []
f1s = []
accs = []
for gid, masked_adj in exp_dict.items():
if self.fix_exp:
thresh = masked_adj.flatten().sort(descending=True)[0][self.fix_exp+1]
else:
thresh = self.args.mask_thresh
e_labels = self.G_dataset[gid][0].edata['gt']
new_edges = [masked_adj > thresh][0].numpy()
old_edges = [self.G_dataset[gid][0].edata['weight'] > thresh][0].numpy()
int_map = map(int, new_edges)
new_edges = list(int_map)
int_map = map(int, old_edges)
old_edges = list(int_map)
exp_list = np.array(new_edges)
TP = 0
FP = 0
TN = 0
FN = 0
for i in range(len(e_labels)):
if exp_list[i] == 1:
if e_labels[i] == 1:
TP += 1
else:
FP += 1
else:
if e_labels[i] == 1:
FN += 1
else:
TN += 1
if TP != 0:
pre = TP / (TP + FP)
rec = TP / (TP + FN)
acc = (TP + TN) / (TP + FP + TN + FN)
f1 = 2 * pre * rec / (pre + rec)
else:
pre = 0
rec = 0
f1 = 0
acc = (TP + TN) / (TP + FP + TN + FN)
pres.append(pre)
recalls.append(rec)
f1s.append(f1)
accs.append(acc)
return np.mean(accs), np.mean(pres), np.mean(recalls), np.mean(f1s)
class ExplainModelNodeMultiFeature(torch.nn.Module):
"""
explain BA-shapes and CiteSeer
"""
def __init__(self, graph, base_model, target_node, args):
super(ExplainModelNodeMultiFeature, self).__init__()
self.graph = graph
self.num_nodes = len(self.graph.nodes())
self.feat = self.graph.ndata['feat']
self.feat_dim = self.feat.shape[1]
self.base_model = base_model
self.target_node = target_node
self.args = args
self.feat_mask = self.construct_feat_mask()
def forward(self):
masked_feat = self.get_masked_feat() # masked adj is always the exp sub graph
pred1 = self.base_model(self.graph, masked_feat.float(),
self.graph.edata['weight'], self.target_node)
pred2 = self.base_model(self.graph, (self.feat - masked_feat).float(),
self.graph.edata['weight'],
self.target_node)
return pred1, pred2
def loss(self, pred1, pred2, pred_label, gam, lam, alp):
relu = torch.nn.ReLU()
f_next = torch.max(torch.cat((pred1[:pred_label],
pred1[pred_label+1:])))
cf_next = torch.max(torch.cat((pred2[:pred_label],
pred2[pred_label+1:])))
bpr1 = relu(gam + f_next - pred1[pred_label])
bpr2 = relu(gam + pred2[pred_label] - cf_next)
masked_feat = self.get_masked_feat()
L1 = torch.linalg.norm(masked_feat)
loss = L1 + lam * (alp * bpr1 + (1 - alp) * bpr2)
return bpr1, bpr2, L1, loss
def construct_feat_mask(self):
"""
construct mask for feature vector
:return:
"""
mask = torch.nn.Parameter(torch.FloatTensor(self.num_nodes, self.feat_dim))
std = torch.nn.init.calculate_gain("relu") * math.sqrt(
2.0 / (self.num_nodes + self.feat_dim)
)
with torch.no_grad():
mask.normal_(1.0, std)
return mask
def get_masked_feat(self):
feat_mask = torch.sigmoid(self.feat_mask)
masked_feat = self.feat * feat_mask
return masked_feat | 27,933 | 40.079412 | 142 | py |
gnn_cff | gnn_cff-main/scripts/exp_node_tree_cycles.py | import os
import numpy as np
import torch
from utils.argument import arg_parse_exp_node_tree_cycles
from models.explainer_models import NodeExplainerEdgeMulti
from models.gcn import GCNNodeTreeCycles
from utils.preprocessing.tree_cycles_preprocessing import TreeCyclesDataset
import sys
if __name__ == "__main__":
torch.manual_seed(1000)
np.random.seed(0)
np.set_printoptions(threshold=sys.maxsize)
exp_args = arg_parse_exp_node_tree_cycles()
print("argument:\n", exp_args)
model_path = exp_args.model_path
train_indices = np.load(os.path.join(model_path, 'train_indices.pickle'), allow_pickle=True)
test_indices = np.load(os.path.join(model_path, 'test_indices.pickle'), allow_pickle=True)
G_dataset = TreeCyclesDataset(load_path=os.path.join(model_path))
# targets = np.load(os.path.join(model_path, 'targets.pickle'), allow_pickle=True) # the target node to explain
graphs = G_dataset.graphs
labels = G_dataset.labels
targets = G_dataset.targets
if exp_args.gpu:
device = torch.device('cuda:%s' % exp_args.cuda)
else:
device = 'cpu'
base_model = GCNNodeTreeCycles(G_dataset.feat_dim, 32, num_classes=2, if_exp=True).to(device)
base_model.load_state_dict(torch.load(os.path.join(model_path, 'model.model')))
# fix the base model
for param in base_model.parameters():
param.requires_grad = False
# Create explainer
explainer = NodeExplainerEdgeMulti(
base_model=base_model,
G_dataset=G_dataset,
args=exp_args,
test_indices=test_indices,
# fix_exp=6
)
explainer.explain_nodes_gnn_stats()
| 1,650 | 35.688889 | 116 | py |
gnn_cff | gnn_cff-main/scripts/exp_node_ba_shapes.py | import os
import numpy as np
import torch
from utils.argument import arg_parse_exp_node_ba_shapes
from models.explainer_models import NodeExplainerEdgeMulti
from models.gcn import GCNNodeBAShapes
from utils.preprocessing.ba_shapes_preprocessing import BAShapesDataset
import sys
if __name__ == "__main__":
torch.manual_seed(0)
np.random.seed(0)
np.set_printoptions(threshold=sys.maxsize)
exp_args = arg_parse_exp_node_ba_shapes()
print("argument:\n", exp_args)
model_path = exp_args.model_path
train_indices = np.load(os.path.join(model_path, 'train_indices.pickle'), allow_pickle=True)
test_indices = np.load(os.path.join(model_path, 'test_indices.pickle'), allow_pickle=True)
G_dataset = BAShapesDataset(load_path=os.path.join(model_path))
# targets = np.load(os.path.join(model_path, 'targets.pickle'), allow_pickle=True) # the target node to explain
graphs = G_dataset.graphs
labels = G_dataset.labels
targets = G_dataset.targets
if exp_args.gpu:
device = torch.device('cuda:%s' % exp_args.cuda)
else:
device = 'cpu'
base_model = GCNNodeBAShapes(G_dataset.feat_dim, 16, num_classes=4, device=device, if_exp=True).to(device)
base_model.load_state_dict(torch.load(os.path.join(model_path, 'model.model')))
# fix the base model
for param in base_model.parameters():
param.requires_grad = False
# Create explainer
explainer = NodeExplainerEdgeMulti(
base_model=base_model,
G_dataset=G_dataset,
args=exp_args,
test_indices=test_indices,
# fix_exp=6
)
explainer.explain_nodes_gnn_stats() | 1,646 | 37.302326 | 116 | py |
gnn_cff | gnn_cff-main/scripts/train_graph_classification.py | import numpy as np
import torch
import os
import time
from pathlib import Path
from models.gcn import GCNGraph
from utils.argument import arg_parse_train_graph_mutag_0
from utils.graph_init import graph_init_real
from torch.utils.data.sampler import SubsetRandomSampler
from dgl.dataloading import GraphDataLoader
def train_graph_classification(args):
if args.gpu:
device = torch.device('cuda:%s' % args.cuda)
else:
device = 'cpu'
# device = 'cpu'
out_path = os.path.join(args.save_dir, args.dataset + "_logs")
G_dataset = graph_init_real(args.dataset)
Path(out_path).mkdir(parents=True, exist_ok=True)
num_examples = len(G_dataset)
num_train = int(num_examples * args.train_ratio)
train_indices = np.unique(np.random.choice(np.arange(num_examples), num_train, replace=False))
test_indices = np.unique(np.array([i for i in np.arange(num_examples) if i not in train_indices]))
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
train_dataloader = GraphDataLoader(
G_dataset, sampler=train_sampler, batch_size=128, drop_last=False)
test_dataloader = GraphDataLoader(
G_dataset, sampler=test_sampler, batch_size=128, drop_last=False)
model = GCNGraph(G_dataset.feat_dim, 128).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5)
loss_fn = torch.nn.BCELoss()
for epoch in range(args.num_epochs):
begin = time.time()
losses = []
num_correct = 0
num_train = 0
for batched_graph, labels in train_dataloader:
batched_graph = batched_graph.to(device)
labels = labels.to(device)
optimizer.zero_grad()
pred = model(batched_graph, batched_graph.ndata['feat'].float(), batched_graph.edata['weight']).squeeze()
num_correct += ((pred >= 0.5).int() == labels).sum().item()
num_train += len(labels)
loss = loss_fn(pred, labels.float())
losses.append(loss.to('cpu').detach().numpy())
loss.backward()
optimizer.step()
print('epoch:%d' % epoch, 'loss:', np.mean(losses), 'Train accuracy:', num_correct / num_train)
print('time', time.time() - begin)
# evaluate
num_correct = 0
num_tests = 0
for batched_graph, labels in train_dataloader:
batched_graph = batched_graph.to(device)
labels = labels.to(device)
pred = model(batched_graph, batched_graph.ndata['feat'].float(), batched_graph.edata['weight']).squeeze()
num_correct += ((pred >= 0.5).int() == labels).sum().item()
num_tests += len(labels)
print('Final train accuracy:', num_correct / num_tests)
num_correct = 0
num_tests = 0
for batched_graph, labels in test_dataloader:
batched_graph = batched_graph.to(device)
labels = labels.to(device)
pred = model(batched_graph, batched_graph.ndata['feat'].float(), batched_graph.edata['weight']).squeeze()
num_correct += ((pred >= 0.5).int() == labels).sum().item()
num_tests += len(labels)
print('Test accuracy:', num_correct / num_tests)
train_indices.dump(os.path.join(out_path, 'train_indices.pickle'))
test_indices.dump(os.path.join(out_path, 'test_indices.pickle'))
G_dataset.save_(os.path.join(out_path, 'dgl_graph.bin'))
torch.save(model.state_dict(), os.path.join(out_path, 'model.model'))
return True
if __name__ == "__main__":
torch.manual_seed(0)
np.random.seed(0)
train_args = arg_parse_train_graph_mutag_0()
if train_args.gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = train_args.cuda
print("Using CUDA", train_args.cuda)
else:
print("Using CPU")
train_graph_classification(train_args)
| 3,833 | 39.787234 | 117 | py |
gnn_cff | gnn_cff-main/scripts/exp_graph.py | import os
import numpy as np
import torch
from utils.argument import arg_parse_exp_graph_mutag_0
from models.explainer_models import GraphExplainerEdge
from models.gcn import GCNGraph
from utils.preprocessing.mutag_preprocessing_0 import MutagDataset0
import sys
if __name__ == "__main__":
np.set_printoptions(threshold=sys.maxsize)
torch.manual_seed(0)
np.random.seed(0)
exp_args = arg_parse_exp_graph_mutag_0()
print("argument:\n", exp_args)
model_path = exp_args.model_path
train_indices = np.load(os.path.join(model_path, 'train_indices.pickle'), allow_pickle=True)
test_indices = np.load(os.path.join(model_path, 'test_indices.pickle'), allow_pickle=True)
G_dataset = MutagDataset0(load_path=os.path.join(model_path))
graphs = G_dataset.graphs
labels = G_dataset.labels
if exp_args.gpu:
device = torch.device('cuda:%s' % exp_args.cuda)
else:
device = 'cpu'
base_model = GCNGraph(G_dataset.feat_dim, 128).to(device)
base_model.load_state_dict(torch.load(os.path.join(model_path, 'model.model')))
# fix the base model
for param in base_model.parameters():
param.requires_grad = False
# Create explainer
explainer = GraphExplainerEdge(
base_model=base_model,
G_dataset=G_dataset,
args=exp_args,
test_indices=test_indices,
# fix_exp=15
)
explainer.explain_nodes_gnn_stats()
| 1,428 | 32.232558 | 96 | py |
gnn_cff | gnn_cff-main/scripts/train_node_classification.py | import numpy as np
import torch
import os
import time
from pathlib import Path
from models.gcn import GCNNodeBAShapes
from utils.argument import arg_parse_train_node_ba_shapes
from utils.graph_init import graph_init_real
from torch.utils.data.sampler import SubsetRandomSampler
from dgl.dataloading import GraphDataLoader
def train_node_classification(args):
if args.gpu:
device = torch.device('cuda:%s' % args.cuda)
else:
device = 'cpu'
# device = 'cpu'
out_path = os.path.join(args.save_dir, args.dataset + "_logs")
G_dataset = graph_init_real(args.dataset)
Path(out_path).mkdir(parents=True, exist_ok=True)
num_examples = len(G_dataset)
num_train = int(num_examples * args.train_ratio)
train_indices = np.unique(np.random.choice(np.arange(num_examples), num_train, replace=False))
test_indices = np.unique(np.array([i for i in np.arange(num_examples) if i not in train_indices]))
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
train_dataloader = GraphDataLoader(
G_dataset, sampler=train_sampler, batch_size=32, drop_last=False)
test_dataloader = GraphDataLoader(
G_dataset, sampler=test_sampler, batch_size=32, drop_last=False)
model = GCNNodeBAShapes(G_dataset.feat_dim, 16, num_classes=4, device=device).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5)
loss_fn = torch.nn.CrossEntropyLoss()
begin = time.time()
for epoch in range(args.num_epochs):
losses = []
num_correct = 0
num_train = 0
# begin = time.time()
for batched_graph, labels, target_nodes in train_dataloader:
batched_graph = batched_graph.to(device)
labels = labels.to(device)
target_nodes = target_nodes.to(device)
optimizer.zero_grad()
pred = model(batched_graph, batched_graph.ndata['feat'].float(),
batched_graph.edata['weight'], target_nodes).squeeze()
# print(pred)
ori_int_labels = torch.argmax(labels, dim=1)
pre_int_labels = torch.argmax(pred, dim=1)
num_correct += (ori_int_labels == pre_int_labels).sum().item()
num_train += len(labels)
loss = loss_fn(pred, ori_int_labels)
losses.append(loss.to('cpu').detach().numpy())
loss.backward()
optimizer.step()
print('epoch:%d' % epoch, 'loss:', np.mean(losses), 'Train accuracy:', num_correct / num_train)
# evaluate
num_correct = 0
num_train = 0
for batched_graph, labels, target_nodes in train_dataloader:
batched_graph = batched_graph.to(device)
labels = labels.to(device)
target_nodes = target_nodes.to(device)
pred = model(batched_graph, batched_graph.ndata['feat'].float(),
batched_graph.edata['weight'], target_nodes).squeeze()
ori_int_labels = torch.argmax(labels, dim=1)
pre_int_labels = torch.argmax(pred, dim=1)
num_correct += (ori_int_labels == pre_int_labels).sum().item()
num_train += len(labels)
print('Final train accuracy:', num_correct / num_train)
num_correct = 0
num_tests = 0
for batched_graph, labels, target_nodes in test_dataloader:
batched_graph = batched_graph.to(device)
labels = labels.to(device)
target_nodes = target_nodes.to(device)
pred = model(batched_graph, batched_graph.ndata['feat'].float(),
batched_graph.edata['weight'], target_nodes).squeeze()
ori_int_labels = torch.argmax(labels, dim=1)
pre_int_labels = torch.argmax(pred, dim=1)
num_correct += (ori_int_labels == pre_int_labels).sum().item()
num_tests += len(labels)
print('Test accuracy:', num_correct / num_tests)
print('time: ', time.time() - begin)
train_indices.dump(os.path.join(out_path, 'train_indices.pickle'))
test_indices.dump(os.path.join(out_path, 'test_indices.pickle'))
G_dataset.save_(out_path)
torch.save(model.state_dict(), os.path.join(out_path, 'model.model'))
return True
if __name__ == "__main__":
torch.manual_seed(0)
np.random.seed(0)
train_args = arg_parse_train_node_ba_shapes()
if train_args.gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = train_args.cuda
print("Using CUDA", train_args.cuda)
else:
print("Using CPU")
print(train_args)
train_node_classification(train_args)
| 4,554 | 38.95614 | 104 | py |
gnn_cff | gnn_cff-main/utils/preprocessing/ba_shapes_preprocessing.py | """Read the Mutag dataset and create the graphx"""
import numpy as np
import os
import dgl
from dgl.data import DGLDataset
import torch
import networkx as nx
import matplotlib.pyplot as plt
from dgl import save_graphs, load_graphs
from utils.common_utils import read_file
from utils.common_utils import ba_shapes_dgl_to_networkx
class BAShapesDataset(DGLDataset):
def __init__(self, adj=None, node_labels=None, edge_labels=None, hop_num=3, feat_dim=10, load_path=None):
super().__init__(name='ba_shapes')
if load_path:
self.load_path = load_path
self.load_()
else:
self.adj = adj
self.edge_labels = edge_labels
self.node_labels = node_labels
self.hop_num = hop_num
self.feat_dim = feat_dim
self.graphs = []
self.labels = []
self.targets = []
for n_i, node in enumerate(np.arange(len(self.adj))):
n_l = self.node_labels[node]
g, new_idx = self.sub_graph_generator(node)
self.graphs.append(g)
self.labels.append(n_l)
self.targets.append(new_idx)
self.labels = torch.from_numpy(np.array(self.labels))
self.targets = torch.from_numpy(np.array(self.targets))
def sub_graph_generator(self, node):
"""
a simple bfs to find the k-hop sub-graph
:param node:
:param node_labels:
:return:
"""
sub_nodes = set() # the sub nodes in the sub graph (within k hop)
sub_nodes.add(node)
que = [node]
close_set = set()
for i in range(self.hop_num):
hop_nodes = []
while que:
tar = que.pop(0)
neighbors = np.where(self.adj[tar] == 1)[0]
hop_nodes.extend(neighbors)
sub_nodes.update(neighbors)
if tar not in close_set:
close_set.add(tar)
if len(hop_nodes) == 0:
break
for n in hop_nodes:
if n not in close_set:
que.append(n)
sub_nodes = np.sort(np.array(list(sub_nodes)))
node_new = np.where(sub_nodes == node)[0][0]
sub_edge_labels = self.edge_labels[sub_nodes][:, sub_nodes]
filtered_sub_edge_labels = np.zeros((sub_edge_labels.shape[0], sub_edge_labels.shape[1]))
sgt_nodes = set() # the sub nodes in the gt graph (within k hop)
sgt_nodes.add(node_new)
que = [node_new]
close_set = set()
for i in range(self.hop_num + 1):
hop_nodes = []
while que:
tar = que.pop(0)
neighbors = np.where(sub_edge_labels[tar] == 1)[0]
hop_nodes.extend(neighbors)
for n in neighbors:
filtered_sub_edge_labels[tar, n] = 1
filtered_sub_edge_labels[n, tar] = 1
sgt_nodes.update(neighbors)
if tar not in close_set:
close_set.add(tar)
if len(hop_nodes) == 0:
break
for n in hop_nodes:
if n not in close_set:
que.append(n)
sub_edge_labels = filtered_sub_edge_labels
sub_adj = self.adj[sub_nodes][:, sub_nodes]
sub_nodes = np.arange(len(sub_nodes))
# create dgl graph
comb = np.array(np.meshgrid(sub_nodes, sub_nodes)).T.reshape(-1, 2)
g = dgl.graph((torch.from_numpy(comb[:, 0]), torch.from_numpy(comb[:, 1])), num_nodes=len(sub_nodes))
g_feats = np.ones((len(sub_nodes), self.feat_dim))
g.ndata['feat'] = torch.from_numpy(g_feats)
edge_weights = sub_adj.reshape(1, -1)[0]
edge_gts = sub_edge_labels.reshape(1, -1)[0]
g.edata['weight'] = torch.from_numpy(edge_weights)
g.edata['gt'] = torch.from_numpy(edge_gts)
return g, node_new
def process(self):
print('processing')
def __getitem__(self, i):
return self.graphs[i], self.labels[i], self.targets[i]
def __len__(self):
return len(self.graphs)
def save_(self, save_path):
save_graphs(os.path.join(save_path, 'dgl_graph.bin'), self.graphs, {'labels': self.labels})
np.array(self.targets).dump(os.path.join(save_path, 'targets.pickle'))
def load_(self):
# load processed data from directory `self.save_path`
self.graphs, label_dict = load_graphs(os.path.join(self.load_path, 'dgl_graph.bin'))
self.labels = label_dict['labels']
self.feat_dim = self.graphs[0].ndata['feat'].shape[1]
self.targets = np.load(os.path.join(self.load_path, 'targets.pickle'), allow_pickle=True)
def ba_shapes_preprocessing(dataset_dir):
name = "BA_Shapes"
data = np.load(os.path.join(dataset_dir, 'syn_data.pkl'), allow_pickle=True)
adj = np.array(data[0], dtype='float32')
feats = data[1]
y_train = data[2]
y_val = data[3]
y_test = data[4]
e_labels = data[8]
e_labels = np.array(np.maximum(e_labels, e_labels.T), dtype="float32") # make symmetric
node_labels = np.array(np.logical_or(y_train, np.logical_or(y_val, y_test)), dtype=int)
G_dataset = BAShapesDataset(adj, node_labels, e_labels, hop_num=3, feat_dim=10)
return G_dataset
| 5,366 | 37.335714 | 109 | py |
gnn_cff | gnn_cff-main/utils/preprocessing/mutag_preprocessing_0.py | """Read the Mutag dataset and create the graphx"""
import numpy as np
import os
import dgl
from dgl.data import DGLDataset
import torch
from dgl import save_graphs, load_graphs
from utils.common_utils import read_file
class MutagDataset0(DGLDataset):
def __init__(self, edges=None, graph_indicator=None, node_labels=None, edge_labels=None, graph_labels=None, load_path=None):
super().__init__(name='mutag0')
if load_path:
self.load_path = load_path
self.load_()
else:
for i in range(len(graph_labels)):
if graph_labels[i] == 1:
graph_labels[i] = 0
else:
graph_labels[i] = 1
self.edges = edges
self.graph_indicator = graph_indicator
self.node_labels = node_labels
self.edge_labels = edge_labels
self.graph_labels = graph_labels
self.graphs = []
self.labels = []
self.feat_dim = len(np.unique(self.node_labels))
self.component_dict = {0: 'C', 1: 'O', 2: 'Cl', 3: 'H', 4: 'N', 5: 'F', 6: 'Br', 7: 'S',
8: 'P', 9: 'I', 10: 'Na', 11: 'K', 12: 'Li', 13: 'Ca'}
# group edges
edges_group = {}
e_labels_group = {}
for e_id, edge in enumerate(self.edges):
e_label = self.edge_labels[e_id]
g_id = self.graph_indicator[edge[0]]
if g_id != self.graph_indicator[edge[1]]:
print('graph indicator error!')
exit(1)
if g_id not in edges_group.keys():
edges_group[g_id] = [edge]
e_labels_group[g_id] = [e_label]
else:
edges_group[g_id].append(edge)
e_labels_group[g_id].append(e_label)
for g_id, g_edges in edges_group.items():
g_label = self.graph_labels[g_id]
g_edges = np.array(g_edges)
g_e_labels = e_labels_group[g_id]
src = g_edges[:, 0]
dst = g_edges[:, 1]
unique_nodes = np.unique(np.concatenate((src, dst), axis=0))
g_feats = np.zeros((len(unique_nodes), self.feat_dim))
int_feats = self.node_labels[unique_nodes]
g_feats[np.arange(len(unique_nodes)), int_feats] = 1 # convert feature to one-hot vec
n_id_dict = {}
n_id_dict_reverse = {}
for i in range(len(unique_nodes)):
n_id_dict[unique_nodes[i]] = i
n_id_dict_reverse[i] = unique_nodes[i]
for i in range(len(src)):
src[i] = n_id_dict[src[i]]
dst[i] = n_id_dict[dst[i]]
num_nodes = len(np.unique(np.concatenate((src, dst), axis=0)))
adj = np.zeros((num_nodes, num_nodes), dtype='float32')
adj_e_label = np.zeros((num_nodes, num_nodes), dtype='float32')
for i in range(len(src)):
n0 = src[i]
n1 = dst[i]
adj[n0, n1] = 1.0
adj_e_label[n0, n1] = g_e_labels[i]
comb = np.array(np.meshgrid(np.arange(num_nodes), np.arange(num_nodes))).T.reshape(-1, 2)
g = dgl.graph((torch.from_numpy(comb[:, 0]), torch.from_numpy(comb[:, 1])), num_nodes=num_nodes)
g.ndata['feat'] = torch.from_numpy(g_feats)
edge_weights = adj.reshape(1, -1)[0]
edge_labels = adj_e_label.reshape(1, -1)[0]
g.edata['weight'] = torch.from_numpy(edge_weights)
g.edata['label'] = torch.from_numpy(edge_labels)
self.graphs.append(g)
self.labels.append(g_label)
self.labels = torch.from_numpy(np.array(self.labels))
def process(self):
print('processing')
def __getitem__(self, i):
return self.graphs[i], self.labels[i]
def __len__(self):
return len(self.graphs)
def save_(self, save_path):
save_graphs(save_path, self.graphs, {'labels': self.labels})
def load_(self):
# load processed data from directory `self.save_path`
self.graphs, label_dict = load_graphs(os.path.join(self.load_path, 'dgl_graph.bin'))
self.labels = label_dict['labels']
self.feat_dim = self.graphs[0].ndata['feat'].shape[1]
def mutag_preprocessing_0(dataset_dir):
name = "Mutagenicity"
edge_path = os.path.join(dataset_dir, name + "_A.txt")
edge_label_path = os.path.join(dataset_dir, name + "_edge_gt.txt")
graph_indicator_path = os.path.join(dataset_dir, name + "_graph_indicator.txt")
node_label_path = os.path.join(dataset_dir, name + "_node_labels.txt")
graph_label_path = os.path.join(dataset_dir, name + "_graph_labels.txt")
edge_data = read_file(edge_path)
edge_data = np.array(edge_data)
edge_data = edge_data - 1
edge_labels = read_file(edge_label_path)
edge_labels = np.array(edge_labels)
graph_indicator = read_file(graph_indicator_path) - 1
node_labels = read_file(node_label_path)
graph_labels = read_file((graph_label_path))
G_dataset = MutagDataset0(edge_data, graph_indicator, node_labels, edge_labels, graph_labels)
return G_dataset
| 5,400 | 41.865079 | 128 | py |
gnn_cff | gnn_cff-main/utils/preprocessing/tree_cycles_preprocessing.py | """Read the Mutag dataset and create the graphx"""
import numpy as np
import os
import dgl
from dgl.data import DGLDataset
import torch
import networkx as nx
import matplotlib.pyplot as plt
from dgl import save_graphs, load_graphs
from utils.common_utils import read_file
from utils.common_utils import ba_shapes_dgl_to_networkx
class TreeCyclesDataset(DGLDataset):
def __init__(self, adj=None, node_labels=None, edge_labels=None, hop_num=3, feat_dim=10, load_path=None):
super().__init__(name='tree_cycles')
if load_path:
self.load_path = load_path
self.load_()
else:
self.adj = adj
self.edge_labels = edge_labels
self.node_labels = node_labels
self.hop_num = hop_num
self.feat_dim = feat_dim
self.graphs = []
self.labels = []
self.targets = []
for n_i, node in enumerate(np.arange(len(self.adj))):
n_l = self.node_labels[node]
g, new_idx = self.sub_graph_generator(node)
self.graphs.append(g)
self.labels.append(n_l)
self.targets.append(new_idx)
self.labels = torch.from_numpy(np.array(self.labels))
self.targets = torch.from_numpy(np.array(self.targets))
def sub_graph_generator(self, node):
"""
a simple bfs to find the k-hop sub-graph
:param node:
:param node_labels:
:return:
"""
sub_nodes = set()
sub_nodes.add(node)
que = [node]
close_set = set()
for i in range(self.hop_num):
hop_nodes = []
while que:
tar = que.pop(0)
neighbors = np.where(self.adj[tar] == 1)[0]
hop_nodes.extend(neighbors)
sub_nodes.update(neighbors)
if tar not in close_set:
close_set.add(tar)
if len(hop_nodes) == 0:
break
for n in hop_nodes:
if n not in close_set:
que.append(n)
sub_nodes = np.sort(np.array(list(sub_nodes)))
node_new = np.where(sub_nodes == node)[0][0]
sub_edge_labels = self.edge_labels[sub_nodes][:, sub_nodes]
filtered_sub_edge_labels = np.zeros((sub_edge_labels.shape[0], sub_edge_labels.shape[1]))
sgt_nodes = set()
sgt_nodes.add(node_new)
que = [node_new]
close_set = set()
for i in range(self.hop_num + 1):
hop_nodes = []
while que:
tar = que.pop(0)
neighbors = np.where(sub_edge_labels[tar] == 1)[0]
hop_nodes.extend(neighbors)
for n in neighbors:
filtered_sub_edge_labels[tar, n] = 1
filtered_sub_edge_labels[n, tar] = 1
sgt_nodes.update(neighbors)
if tar not in close_set:
close_set.add(tar)
if len(hop_nodes) == 0:
break
for n in hop_nodes:
if n not in close_set:
que.append(n)
sub_edge_labels = filtered_sub_edge_labels
sub_adj = self.adj[sub_nodes][:, sub_nodes]
sub_nodes = np.arange(len(sub_nodes))
# create dgl graph
comb = np.array(np.meshgrid(sub_nodes, sub_nodes)).T.reshape(-1, 2)
g = dgl.graph((torch.from_numpy(comb[:, 0]), torch.from_numpy(comb[:, 1])), num_nodes=len(sub_nodes))
g_feats = np.ones((len(sub_nodes), self.feat_dim))
g.ndata['feat'] = torch.from_numpy(g_feats)
edge_weights = sub_adj.reshape(1, -1)[0]
edge_gts = sub_edge_labels.reshape(1, -1)[0]
g.edata['weight'] = torch.from_numpy(edge_weights)
g.edata['gt'] = torch.from_numpy(edge_gts)
# # test plot
# nx_G = ba_shapes_dgl_to_networkx(g)
# edge_color_map = []
# for edge in nx_G.edges(data=True):
# if edge[2]['gt'] == 1:
# edge_color_map.append('red')
# else:
# edge_color_map.append('black')
# pos = nx.kamada_kawai_layout(nx_G)
# nx.draw_networkx(nx_G, pos, with_labels=True, edge_color=edge_color_map)
# ax = plt.gca()
# ax.margins(0.08)
# plt.axis("off")
# plt.tight_layout()
# # plt.savefig('mutag_%d.png' % graph_i)
# # plt.clf()
# plt.show()
return g, node_new
def process(self):
print('processing')
def __getitem__(self, i):
return self.graphs[i], self.labels[i], self.targets[i]
def __len__(self):
return len(self.graphs)
def save_(self, save_path):
save_graphs(os.path.join(save_path, 'dgl_graph.bin'), self.graphs, {'labels': self.labels})
np.array(self.targets).dump(os.path.join(save_path, 'targets.pickle'))
def load_(self):
# load processed data from directory `self.save_path`
self.graphs, label_dict = load_graphs(os.path.join(self.load_path, 'dgl_graph.bin'))
self.labels = label_dict['labels']
self.feat_dim = self.graphs[0].ndata['feat'].shape[1]
self.targets = np.load(os.path.join(self.load_path, 'targets.pickle'), allow_pickle=True)
def tree_cycles_preprocessing(dataset_dir):
name = "Tree_Cycles"
# assign path
data = np.load(os.path.join(dataset_dir, 'syn_data.pkl'), allow_pickle=True)
adj = np.array(data[0], dtype='float32')
feats = data[1]
y_train = data[2]
y_val = data[3]
y_test = data[4]
e_labels = data[8]
e_labels = np.array(np.maximum(e_labels, e_labels.T), dtype="float32")
node_labels = np.array(np.logical_or(y_train, np.logical_or(y_val, y_test)), dtype=int)
G_dataset = TreeCyclesDataset(adj, node_labels, e_labels, hop_num=3, feat_dim=10)
return G_dataset
| 5,903 | 36.367089 | 109 | py |
gnn_cff | gnn_cff-main/utils/preprocessing/citeseer_preprocessing.py | """Read the Mutag dataset and create the graphx"""
import numpy as np
import os
import dgl
from dgl.data import DGLDataset
import torch
import networkx as nx
import matplotlib.pyplot as plt
from dgl import save_graphs, load_graphs
from utils.common_utils import read_file_citeseer
from utils.common_utils import ba_shapes_dgl_to_networkx
class CiteSeerDataset(DGLDataset):
def __init__(self, adj=None, node_labels=None, node_feats=None, hop_num=3, load_path=None):
super().__init__(name='citeseer')
if load_path:
self.load_path = load_path
self.load_()
else:
self.adj = adj
self.node_feats = node_feats
self.node_labels = node_labels
self.hop_num = hop_num
self.feat_dim = len(node_feats[0])
self.graphs = []
self.labels = []
self.targets = []
for n_i, node in enumerate(np.arange(len(self.adj))):
n_l = self.node_labels[node]
g, new_idx = self.sub_graph_generator(node)
self.graphs.append(g)
self.labels.append(n_l)
self.targets.append(new_idx)
self.labels = torch.from_numpy(np.array(self.labels))
self.targets = torch.from_numpy(np.array(self.targets))
def sub_graph_generator(self, node):
"""
a simple bfs to find the k-hop sub-graph
:param node:
:param node_labels:
:return:
"""
# print(node)
sub_nodes = set() # the sub nodes in the sub graph (within k hop)
sub_nodes.add(node)
que = [node]
close_set = set()
for i in range(self.hop_num):
hop_nodes = []
while que:
tar = que.pop(0)
neighbors = np.where(self.adj[tar] == 1)[0]
hop_nodes.extend(neighbors)
sub_nodes.update(neighbors)
if tar not in close_set:
close_set.add(tar)
if len(hop_nodes) == 0:
break
for n in hop_nodes:
if n not in close_set:
que.append(n)
sub_nodes = np.sort(np.array(list(sub_nodes)))
node_new = np.where(sub_nodes == node)[0][0]
sub_adj = self.adj[sub_nodes][:, sub_nodes]
g_feats = self.node_feats[sub_nodes]
sub_nodes = np.arange(len(sub_nodes))
# create dgl graph
comb = np.array(np.meshgrid(sub_nodes, sub_nodes)).T.reshape(-1, 2)
g = dgl.graph((torch.from_numpy(comb[:, 0]), torch.from_numpy(comb[:, 1])), num_nodes=len(sub_nodes))
g.ndata['feat'] = torch.from_numpy(g_feats)
edge_weights = sub_adj.reshape(1, -1)[0]
g.edata['weight'] = torch.from_numpy(edge_weights)
return g, node_new
def process(self):
print('processing')
def __getitem__(self, i):
return self.graphs[i], self.labels[i], self.targets[i]
def __len__(self):
return len(self.graphs)
def save_(self, save_path):
save_graphs(os.path.join(save_path, 'dgl_graph.bin'), self.graphs, {'labels': self.labels})
np.array(self.targets).dump(os.path.join(save_path, 'targets.pickle'))
def load_(self):
# load processed data from directory `self.save_path`
self.graphs, label_dict = load_graphs(os.path.join(self.load_path, 'dgl_graph.bin'))
self.labels = label_dict['labels']
self.feat_dim = self.graphs[0].ndata['feat'].shape[1]
self.targets = np.load(os.path.join(self.load_path, 'targets.pickle'), allow_pickle=True)
def citeseer_preprocessing(dataset_dir):
name = "citeseer"
paper_type_dict = {"Agents": 0, "AI": 1, "DB": 2, "IR": 3, "ML": 4, "HCI": 5}
edge_data_path = os.path.join(dataset_dir, 'citeseer.cites')
node_info_data_path = os.path.join(dataset_dir, 'citeseer.content')
node_info_data = np.array(read_file_citeseer(node_info_data_path))
edge_data = np.array(read_file_citeseer(edge_data_path))
# filter out papers without info
valid_paper_set = set()
for info in node_info_data:
valid_paper_set.add(info[0])
valid_edge_data = []
for edge in edge_data:
if edge[0] in valid_paper_set and edge[1] in valid_paper_set:
valid_edge_data.append(edge)
edge_data = np.array(valid_edge_data) # only the edges with info
name_int_dict = {} # {'name': index}
idx = 0
for edge in edge_data:
if edge[0] not in name_int_dict:
name_int_dict[edge[0]] = idx
idx += 1
if edge[1] not in name_int_dict:
name_int_dict[edge[1]] = idx
idx += 1
for i in range(len(edge_data)):
edge_data[i][0] = name_int_dict[edge_data[i][0]]
edge_data[i][1] = name_int_dict[edge_data[i][1]]
edge_data = np.array(edge_data, dtype=int)
node_num = len(name_int_dict.keys())
feat_dim = len(node_info_data[0][1:-1])
node_labels = np.ones(node_num, dtype=int) * -1
node_feats = np.ones((node_num, feat_dim)) * -1
idx_set = set()
for i in range(len(node_info_data)):
paper_id = node_info_data[i][0]
paper_label = paper_type_dict[node_info_data[i][-1]]
paper_feat = node_info_data[i][1:-1]
paper_idx = name_int_dict[paper_id]
idx_set.add(paper_idx)
node_labels[paper_idx] = paper_label
node_feats[paper_idx] = paper_feat
# create adj matrix
adj = np.zeros((node_num, node_num), dtype='float32')
for edge in edge_data:
n0 = edge[0]
n1 = edge[1]
adj[n0, n1] = 1
adj[n1, n0] = 1
G_dataset = CiteSeerDataset(adj, node_labels, node_feats, hop_num=3)
return G_dataset
| 5,767 | 34.170732 | 109 | py |
gnn_cff | gnn_cff-main/utils/preprocessing/nci1_preprocessing.py | """Read the Mutag dataset and create the graphx"""
import numpy as np
import os
import dgl
from dgl.data import DGLDataset
import torch
from dgl import save_graphs, load_graphs
from utils.common_utils import read_file
class NCI1Dataset(DGLDataset):
def __init__(self, edges=None, graph_indicator=None, node_labels=None, graph_labels=None, load_path=None):
super().__init__(name='mutag')
if load_path:
self.load_path = load_path
self.load_()
else:
self.edges = edges
self.graph_indicator = graph_indicator
self.node_labels = node_labels
self.graph_labels = graph_labels
self.graphs = []
self.labels = []
self.feat_dim = len(np.unique(self.node_labels))
self.component_dict = {0: 'C', 1: 'O', 2: 'Cl', 3: 'H', 4: 'N', 5: 'F', 6: 'Br', 7: 'S',
8: 'P', 9: 'I', 10: 'Na', 11: 'K', 12: 'Li', 13: 'Ca'}
# group edges
edges_group = {}
for e_id, edge in enumerate(self.edges):
g_id = self.graph_indicator[edge[0]]
if g_id != self.graph_indicator[edge[1]]:
print('graph indicator error!')
exit(1)
if g_id not in edges_group.keys():
edges_group[g_id] = [edge]
else:
edges_group[g_id].append(edge)
for g_id, g_edges in edges_group.items():
g_label = self.graph_labels[g_id]
g_edges = np.array(g_edges)
src = g_edges[:, 0]
dst = g_edges[:, 1]
unique_nodes = np.unique(np.concatenate((src, dst), axis=0))
g_feats = np.zeros((len(unique_nodes), self.feat_dim))
int_feats = self.node_labels[unique_nodes]
g_feats[np.arange(len(unique_nodes)), int_feats] = 1
n_id_dict = {}
n_id_dict_reverse = {}
for i in range(len(unique_nodes)):
n_id_dict[unique_nodes[i]] = i
n_id_dict_reverse[i] = unique_nodes[i]
for i in range(len(src)):
src[i] = n_id_dict[src[i]]
dst[i] = n_id_dict[dst[i]]
num_nodes = len(np.unique(np.concatenate((src, dst), axis=0)))
adj = np.zeros((num_nodes, num_nodes), dtype='float32')
adj_e_label = np.zeros((num_nodes, num_nodes), dtype='float32')
for i in range(len(src)):
n0 = src[i]
n1 = dst[i]
adj[n0, n1] = 1.0
comb = np.array(np.meshgrid(np.arange(num_nodes), np.arange(num_nodes))).T.reshape(-1, 2)
g = dgl.graph((torch.from_numpy(comb[:, 0]), torch.from_numpy(comb[:, 1])), num_nodes=num_nodes)
g.ndata['feat'] = torch.from_numpy(g_feats)
edge_weights = adj.reshape(1, -1)[0]
edge_labels = adj_e_label.reshape(1, -1)[0]
g.edata['weight'] = torch.from_numpy(edge_weights)
g.edata['label'] = torch.from_numpy(edge_labels)
self.graphs.append(g)
self.labels.append(g_label)
self.labels = torch.from_numpy(np.array(self.labels))
def process(self):
print('processing')
def __getitem__(self, i):
return self.graphs[i], self.labels[i]
def __len__(self):
return len(self.graphs)
def save_(self, save_path):
save_graphs(save_path, self.graphs, {'labels': self.labels})
def load_(self):
# load processed data from directory `self.save_path`
self.graphs, label_dict = load_graphs(os.path.join(self.load_path, 'dgl_graph.bin'))
self.labels = label_dict['labels']
self.feat_dim = self.graphs[0].ndata['feat'].shape[1]
def nci1_preprocessing(dataset_dir):
name = "NCI1"
# assign path
edge_path = os.path.join(dataset_dir, name + "_A.txt")
graph_indicator_path = os.path.join(dataset_dir, name + "_graph_indicator.txt")
node_label_path = os.path.join(dataset_dir, name + "_node_labels.txt")
graph_label_path = os.path.join(dataset_dir, name + "_graph_labels.txt")
edge_data = read_file(edge_path)
edge_data = np.array(edge_data)
edge_data = edge_data - 1
graph_indicator = read_file(graph_indicator_path) - 1
node_labels = np.array(read_file(node_label_path)) - 1
graph_labels = read_file((graph_label_path))
G_dataset = NCI1Dataset(edge_data, graph_indicator, node_labels, graph_labels)
return G_dataset
| 4,663 | 41.4 | 112 | py |
hifi-gan | hifi-gan-master/inference.py | from __future__ import absolute_import, division, print_function, unicode_literals
import glob
import os
import argparse
import json
import torch
from scipy.io.wavfile import write
from env import AttrDict
from meldataset import mel_spectrogram, MAX_WAV_VALUE, load_wav
from models import Generator
h = None
device = None
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def get_mel(x):
return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax)
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '*')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return ''
return sorted(cp_list)[-1]
def inference(a):
generator = Generator(h).to(device)
state_dict_g = load_checkpoint(a.checkpoint_file, device)
generator.load_state_dict(state_dict_g['generator'])
filelist = os.listdir(a.input_wavs_dir)
os.makedirs(a.output_dir, exist_ok=True)
generator.eval()
generator.remove_weight_norm()
with torch.no_grad():
for i, filname in enumerate(filelist):
wav, sr = load_wav(os.path.join(a.input_wavs_dir, filname))
wav = wav / MAX_WAV_VALUE
wav = torch.FloatTensor(wav).to(device)
x = get_mel(wav.unsqueeze(0))
y_g_hat = generator(x)
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio = audio.cpu().numpy().astype('int16')
output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + '_generated.wav')
write(output_file, h.sampling_rate, audio)
print(output_file)
def main():
print('Initializing Inference Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--input_wavs_dir', default='test_files')
parser.add_argument('--output_dir', default='generated_files')
parser.add_argument('--checkpoint_file', required=True)
a = parser.parse_args()
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
global h
json_config = json.loads(data)
h = AttrDict(json_config)
torch.manual_seed(h.seed)
global device
if torch.cuda.is_available():
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda')
else:
device = torch.device('cpu')
inference(a)
if __name__ == '__main__':
main()
| 2,652 | 26.635417 | 107 | py |
hifi-gan | hifi-gan-master/inference_e2e.py | from __future__ import absolute_import, division, print_function, unicode_literals
import glob
import os
import numpy as np
import argparse
import json
import torch
from scipy.io.wavfile import write
from env import AttrDict
from meldataset import MAX_WAV_VALUE
from models import Generator
h = None
device = None
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '*')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return ''
return sorted(cp_list)[-1]
def inference(a):
generator = Generator(h).to(device)
state_dict_g = load_checkpoint(a.checkpoint_file, device)
generator.load_state_dict(state_dict_g['generator'])
filelist = os.listdir(a.input_mels_dir)
os.makedirs(a.output_dir, exist_ok=True)
generator.eval()
generator.remove_weight_norm()
with torch.no_grad():
for i, filname in enumerate(filelist):
x = np.load(os.path.join(a.input_mels_dir, filname))
x = torch.FloatTensor(x).to(device)
y_g_hat = generator(x)
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio = audio.cpu().numpy().astype('int16')
output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + '_generated_e2e.wav')
write(output_file, h.sampling_rate, audio)
print(output_file)
def main():
print('Initializing Inference Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--input_mels_dir', default='test_mel_files')
parser.add_argument('--output_dir', default='generated_files_from_mel')
parser.add_argument('--checkpoint_file', required=True)
a = parser.parse_args()
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
global h
json_config = json.loads(data)
h = AttrDict(json_config)
torch.manual_seed(h.seed)
global device
if torch.cuda.is_available():
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda')
else:
device = torch.device('cpu')
inference(a)
if __name__ == '__main__':
main()
| 2,444 | 25.868132 | 105 | py |
hifi-gan | hifi-gan-master/meldataset.py | import math
import os
import random
import torch
import torch.utils.data
import numpy as np
from librosa.util import normalize
from scipy.io.wavfile import read
from librosa.filters import mel as librosa_mel_fn
MAX_WAV_VALUE = 32768.0
def load_wav(full_path):
sampling_rate, data = read(full_path)
return data, sampling_rate
def dynamic_range_compression(x, C=1, clip_val=1e-5):
return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
def dynamic_range_decompression(x, C=1):
return np.exp(x) / C
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression_torch(x, C=1):
return torch.exp(x) / C
def spectral_normalize_torch(magnitudes):
output = dynamic_range_compression_torch(magnitudes)
return output
def spectral_de_normalize_torch(magnitudes):
output = dynamic_range_decompression_torch(magnitudes)
return output
mel_basis = {}
hann_window = {}
def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
if torch.min(y) < -1.:
print('min value is ', torch.min(y))
if torch.max(y) > 1.:
print('max value is ', torch.max(y))
global mel_basis, hann_window
if fmax not in mel_basis:
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
y = y.squeeze(1)
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],
center=center, pad_mode='reflect', normalized=False, onesided=True)
spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)
spec = spectral_normalize_torch(spec)
return spec
def get_dataset_filelist(a):
with open(a.input_training_file, 'r', encoding='utf-8') as fi:
training_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + '.wav')
for x in fi.read().split('\n') if len(x) > 0]
with open(a.input_validation_file, 'r', encoding='utf-8') as fi:
validation_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + '.wav')
for x in fi.read().split('\n') if len(x) > 0]
return training_files, validation_files
class MelDataset(torch.utils.data.Dataset):
def __init__(self, training_files, segment_size, n_fft, num_mels,
hop_size, win_size, sampling_rate, fmin, fmax, split=True, shuffle=True, n_cache_reuse=1,
device=None, fmax_loss=None, fine_tuning=False, base_mels_path=None):
self.audio_files = training_files
random.seed(1234)
if shuffle:
random.shuffle(self.audio_files)
self.segment_size = segment_size
self.sampling_rate = sampling_rate
self.split = split
self.n_fft = n_fft
self.num_mels = num_mels
self.hop_size = hop_size
self.win_size = win_size
self.fmin = fmin
self.fmax = fmax
self.fmax_loss = fmax_loss
self.cached_wav = None
self.n_cache_reuse = n_cache_reuse
self._cache_ref_count = 0
self.device = device
self.fine_tuning = fine_tuning
self.base_mels_path = base_mels_path
def __getitem__(self, index):
filename = self.audio_files[index]
if self._cache_ref_count == 0:
audio, sampling_rate = load_wav(filename)
audio = audio / MAX_WAV_VALUE
if not self.fine_tuning:
audio = normalize(audio) * 0.95
self.cached_wav = audio
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
self._cache_ref_count = self.n_cache_reuse
else:
audio = self.cached_wav
self._cache_ref_count -= 1
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0)
if not self.fine_tuning:
if self.split:
if audio.size(1) >= self.segment_size:
max_audio_start = audio.size(1) - self.segment_size
audio_start = random.randint(0, max_audio_start)
audio = audio[:, audio_start:audio_start+self.segment_size]
else:
audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')
mel = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax,
center=False)
else:
mel = np.load(
os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + '.npy'))
mel = torch.from_numpy(mel)
if len(mel.shape) < 3:
mel = mel.unsqueeze(0)
if self.split:
frames_per_seg = math.ceil(self.segment_size / self.hop_size)
if audio.size(1) >= self.segment_size:
mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)
mel = mel[:, :, mel_start:mel_start + frames_per_seg]
audio = audio[:, mel_start * self.hop_size:(mel_start + frames_per_seg) * self.hop_size]
else:
mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), 'constant')
audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')
mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss,
center=False)
return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())
def __len__(self):
return len(self.audio_files)
| 6,314 | 36.366864 | 115 | py |
hifi-gan | hifi-gan-master/utils.py | import glob
import os
import matplotlib
import torch
from torch.nn.utils import weight_norm
matplotlib.use("Agg")
import matplotlib.pylab as plt
def plot_spectrogram(spectrogram):
fig, ax = plt.subplots(figsize=(10, 2))
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
interpolation='none')
plt.colorbar(im, ax=ax)
fig.canvas.draw()
plt.close()
return fig
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def apply_weight_norm(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
weight_norm(m)
def get_padding(kernel_size, dilation=1):
return int((kernel_size*dilation - dilation)/2)
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def save_checkpoint(filepath, obj):
print("Saving checkpoint to {}".format(filepath))
torch.save(obj, filepath)
print("Complete.")
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '????????')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return None
return sorted(cp_list)[-1]
| 1,377 | 22.355932 | 63 | py |
hifi-gan | hifi-gan-master/models.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from utils import init_weights, get_padding
LRELU_SLOPE = 0.1
class ResBlock1(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.h = h
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
super(ResBlock2, self).__init__()
self.h = h
self.convs = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1])))
])
self.convs.apply(init_weights)
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Generator(torch.nn.Module):
def __init__(self, h):
super(Generator, self).__init__()
self.h = h
self.num_kernels = len(h.resblock_kernel_sizes)
self.num_upsamples = len(h.upsample_rates)
self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3))
resblock = ResBlock1 if h.resblock == '1' else ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
self.ups.append(weight_norm(
ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),
k, u, padding=(k-u)//2)))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = h.upsample_initial_channel//(2**(i+1))
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
self.resblocks.append(resblock(h, ch, k, d))
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x):
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.ups[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i*self.num_kernels+j](x)
else:
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class DiscriminatorP(torch.nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorP, self).__init__()
self.period = period
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(torch.nn.Module):
def __init__(self):
super(MultiPeriodDiscriminator, self).__init__()
self.discriminators = nn.ModuleList([
DiscriminatorP(2),
DiscriminatorP(3),
DiscriminatorP(5),
DiscriminatorP(7),
DiscriminatorP(11),
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(torch.nn.Module):
def __init__(self, use_spectral_norm=False):
super(DiscriminatorS, self).__init__()
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv1d(1, 128, 15, 1, padding=7)),
norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
fmap = []
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(torch.nn.Module):
def __init__(self):
super(MultiScaleDiscriminator, self).__init__()
self.discriminators = nn.ModuleList([
DiscriminatorS(use_spectral_norm=True),
DiscriminatorS(),
DiscriminatorS(),
])
self.meanpools = nn.ModuleList([
AvgPool1d(4, 2, padding=2),
AvgPool1d(4, 2, padding=2)
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if i != 0:
y = self.meanpools[i-1](y)
y_hat = self.meanpools[i-1](y_hat)
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss*2
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2)
g_loss = torch.mean(dg**2)
loss += (r_loss + g_loss)
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
def generator_loss(disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean((1-dg)**2)
gen_losses.append(l)
loss += l
return loss, gen_losses
| 9,905 | 33.880282 | 107 | py |
hifi-gan | hifi-gan-master/train.py | import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import itertools
import os
import time
import argparse
import json
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DistributedSampler, DataLoader
import torch.multiprocessing as mp
from torch.distributed import init_process_group
from torch.nn.parallel import DistributedDataParallel
from env import AttrDict, build_env
from meldataset import MelDataset, mel_spectrogram, get_dataset_filelist
from models import Generator, MultiPeriodDiscriminator, MultiScaleDiscriminator, feature_loss, generator_loss,\
discriminator_loss
from utils import plot_spectrogram, scan_checkpoint, load_checkpoint, save_checkpoint
torch.backends.cudnn.benchmark = True
def train(rank, a, h):
if h.num_gpus > 1:
init_process_group(backend=h.dist_config['dist_backend'], init_method=h.dist_config['dist_url'],
world_size=h.dist_config['world_size'] * h.num_gpus, rank=rank)
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda:{:d}'.format(rank))
generator = Generator(h).to(device)
mpd = MultiPeriodDiscriminator().to(device)
msd = MultiScaleDiscriminator().to(device)
if rank == 0:
print(generator)
os.makedirs(a.checkpoint_path, exist_ok=True)
print("checkpoints directory : ", a.checkpoint_path)
if os.path.isdir(a.checkpoint_path):
cp_g = scan_checkpoint(a.checkpoint_path, 'g_')
cp_do = scan_checkpoint(a.checkpoint_path, 'do_')
steps = 0
if cp_g is None or cp_do is None:
state_dict_do = None
last_epoch = -1
else:
state_dict_g = load_checkpoint(cp_g, device)
state_dict_do = load_checkpoint(cp_do, device)
generator.load_state_dict(state_dict_g['generator'])
mpd.load_state_dict(state_dict_do['mpd'])
msd.load_state_dict(state_dict_do['msd'])
steps = state_dict_do['steps'] + 1
last_epoch = state_dict_do['epoch']
if h.num_gpus > 1:
generator = DistributedDataParallel(generator, device_ids=[rank]).to(device)
mpd = DistributedDataParallel(mpd, device_ids=[rank]).to(device)
msd = DistributedDataParallel(msd, device_ids=[rank]).to(device)
optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2])
optim_d = torch.optim.AdamW(itertools.chain(msd.parameters(), mpd.parameters()),
h.learning_rate, betas=[h.adam_b1, h.adam_b2])
if state_dict_do is not None:
optim_g.load_state_dict(state_dict_do['optim_g'])
optim_d.load_state_dict(state_dict_do['optim_d'])
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch)
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=h.lr_decay, last_epoch=last_epoch)
training_filelist, validation_filelist = get_dataset_filelist(a)
trainset = MelDataset(training_filelist, h.segment_size, h.n_fft, h.num_mels,
h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, n_cache_reuse=0,
shuffle=False if h.num_gpus > 1 else True, fmax_loss=h.fmax_for_loss, device=device,
fine_tuning=a.fine_tuning, base_mels_path=a.input_mels_dir)
train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None
train_loader = DataLoader(trainset, num_workers=h.num_workers, shuffle=False,
sampler=train_sampler,
batch_size=h.batch_size,
pin_memory=True,
drop_last=True)
if rank == 0:
validset = MelDataset(validation_filelist, h.segment_size, h.n_fft, h.num_mels,
h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, False, False, n_cache_reuse=0,
fmax_loss=h.fmax_for_loss, device=device, fine_tuning=a.fine_tuning,
base_mels_path=a.input_mels_dir)
validation_loader = DataLoader(validset, num_workers=1, shuffle=False,
sampler=None,
batch_size=1,
pin_memory=True,
drop_last=True)
sw = SummaryWriter(os.path.join(a.checkpoint_path, 'logs'))
generator.train()
mpd.train()
msd.train()
for epoch in range(max(0, last_epoch), a.training_epochs):
if rank == 0:
start = time.time()
print("Epoch: {}".format(epoch+1))
if h.num_gpus > 1:
train_sampler.set_epoch(epoch)
for i, batch in enumerate(train_loader):
if rank == 0:
start_b = time.time()
x, y, _, y_mel = batch
x = torch.autograd.Variable(x.to(device, non_blocking=True))
y = torch.autograd.Variable(y.to(device, non_blocking=True))
y_mel = torch.autograd.Variable(y_mel.to(device, non_blocking=True))
y = y.unsqueeze(1)
y_g_hat = generator(x)
y_g_hat_mel = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size,
h.fmin, h.fmax_for_loss)
optim_d.zero_grad()
# MPD
y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach())
loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss(y_df_hat_r, y_df_hat_g)
# MSD
y_ds_hat_r, y_ds_hat_g, _, _ = msd(y, y_g_hat.detach())
loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss(y_ds_hat_r, y_ds_hat_g)
loss_disc_all = loss_disc_s + loss_disc_f
loss_disc_all.backward()
optim_d.step()
# Generator
optim_g.zero_grad()
# L1 Mel-Spectrogram Loss
loss_mel = F.l1_loss(y_mel, y_g_hat_mel) * 45
y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat)
y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd(y, y_g_hat)
loss_fm_f = feature_loss(fmap_f_r, fmap_f_g)
loss_fm_s = feature_loss(fmap_s_r, fmap_s_g)
loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g)
loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g)
loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel
loss_gen_all.backward()
optim_g.step()
if rank == 0:
# STDOUT logging
if steps % a.stdout_interval == 0:
with torch.no_grad():
mel_error = F.l1_loss(y_mel, y_g_hat_mel).item()
print('Steps : {:d}, Gen Loss Total : {:4.3f}, Mel-Spec. Error : {:4.3f}, s/b : {:4.3f}'.
format(steps, loss_gen_all, mel_error, time.time() - start_b))
# checkpointing
if steps % a.checkpoint_interval == 0 and steps != 0:
checkpoint_path = "{}/g_{:08d}".format(a.checkpoint_path, steps)
save_checkpoint(checkpoint_path,
{'generator': (generator.module if h.num_gpus > 1 else generator).state_dict()})
checkpoint_path = "{}/do_{:08d}".format(a.checkpoint_path, steps)
save_checkpoint(checkpoint_path,
{'mpd': (mpd.module if h.num_gpus > 1
else mpd).state_dict(),
'msd': (msd.module if h.num_gpus > 1
else msd).state_dict(),
'optim_g': optim_g.state_dict(), 'optim_d': optim_d.state_dict(), 'steps': steps,
'epoch': epoch})
# Tensorboard summary logging
if steps % a.summary_interval == 0:
sw.add_scalar("training/gen_loss_total", loss_gen_all, steps)
sw.add_scalar("training/mel_spec_error", mel_error, steps)
# Validation
if steps % a.validation_interval == 0: # and steps != 0:
generator.eval()
torch.cuda.empty_cache()
val_err_tot = 0
with torch.no_grad():
for j, batch in enumerate(validation_loader):
x, y, _, y_mel = batch
y_g_hat = generator(x.to(device))
y_mel = torch.autograd.Variable(y_mel.to(device, non_blocking=True))
y_g_hat_mel = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate,
h.hop_size, h.win_size,
h.fmin, h.fmax_for_loss)
val_err_tot += F.l1_loss(y_mel, y_g_hat_mel).item()
if j <= 4:
if steps == 0:
sw.add_audio('gt/y_{}'.format(j), y[0], steps, h.sampling_rate)
sw.add_figure('gt/y_spec_{}'.format(j), plot_spectrogram(x[0]), steps)
sw.add_audio('generated/y_hat_{}'.format(j), y_g_hat[0], steps, h.sampling_rate)
y_hat_spec = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels,
h.sampling_rate, h.hop_size, h.win_size,
h.fmin, h.fmax)
sw.add_figure('generated/y_hat_spec_{}'.format(j),
plot_spectrogram(y_hat_spec.squeeze(0).cpu().numpy()), steps)
val_err = val_err_tot / (j+1)
sw.add_scalar("validation/mel_spec_error", val_err, steps)
generator.train()
steps += 1
scheduler_g.step()
scheduler_d.step()
if rank == 0:
print('Time taken for epoch {} is {} sec\n'.format(epoch + 1, int(time.time() - start)))
def main():
print('Initializing Training Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--group_name', default=None)
parser.add_argument('--input_wavs_dir', default='LJSpeech-1.1/wavs')
parser.add_argument('--input_mels_dir', default='ft_dataset')
parser.add_argument('--input_training_file', default='LJSpeech-1.1/training.txt')
parser.add_argument('--input_validation_file', default='LJSpeech-1.1/validation.txt')
parser.add_argument('--checkpoint_path', default='cp_hifigan')
parser.add_argument('--config', default='')
parser.add_argument('--training_epochs', default=3100, type=int)
parser.add_argument('--stdout_interval', default=5, type=int)
parser.add_argument('--checkpoint_interval', default=5000, type=int)
parser.add_argument('--summary_interval', default=100, type=int)
parser.add_argument('--validation_interval', default=1000, type=int)
parser.add_argument('--fine_tuning', default=False, type=bool)
a = parser.parse_args()
with open(a.config) as f:
data = f.read()
json_config = json.loads(data)
h = AttrDict(json_config)
build_env(a.config, 'config.json', a.checkpoint_path)
torch.manual_seed(h.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(h.seed)
h.num_gpus = torch.cuda.device_count()
h.batch_size = int(h.batch_size / h.num_gpus)
print('Batch size per GPU :', h.batch_size)
else:
pass
if h.num_gpus > 1:
mp.spawn(train, nprocs=h.num_gpus, args=(a, h,))
else:
train(0, a, h)
if __name__ == '__main__':
main()
| 12,153 | 43.683824 | 123 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/docs/conf.py | # -*- coding: utf-8 -*-
#
# Clang documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 9 20:01:55 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Clang'
copyright = u'2007-%d, The Clang Team' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4'
# The full version, including alpha/beta/rc tags.
release = '4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'analyzer']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Clangdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Clang.tex', u'Clang Documentation',
u'The Clang Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = []
# Automatically derive the list of man pages from the contents of the command
# guide subdirectory. This was copied from llvm/docs/conf.py.
basedir = os.path.dirname(__file__)
man_page_authors = u'Maintained by the Clang / LLVM Team (<http://clang.llvm.org>)'
command_guide_subpath = 'CommandGuide'
command_guide_path = os.path.join(basedir, command_guide_subpath)
for name in os.listdir(command_guide_path):
# Ignore non-ReST files and the index page.
if not name.endswith('.rst') or name in ('index.rst',):
continue
# Otherwise, automatically extract the description.
file_subpath = os.path.join(command_guide_subpath, name)
with open(os.path.join(command_guide_path, name)) as f:
title = f.readline().rstrip('\n')
header = f.readline().rstrip('\n')
if len(header) != len(title):
print >>sys.stderr, (
"error: invalid header in %r (does not match title)" % (
file_subpath,))
if ' - ' not in title:
print >>sys.stderr, (
("error: invalid title in %r "
"(expected '<name> - <description>')") % (
file_subpath,))
# Split the name out of the title.
name,description = title.split(' - ', 1)
man_pages.append((file_subpath.replace('.rst',''), name,
description, man_page_authors, 1))
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Clang', u'Clang Documentation',
u'The Clang Team', 'Clang', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 9,128 | 32.317518 | 83 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/docs/analyzer/conf.py | # -*- coding: utf-8 -*-
#
# Clang Static Analyzer documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 2 15:54:28 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Clang Static Analyzer'
copyright = u'2013-%d, Analyzer Team' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.0'
# The full version, including alpha/beta/rc tags.
release = '4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ClangStaticAnalyzerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ClangStaticAnalyzer.tex', u'Clang Static Analyzer Documentation',
u'Analyzer Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'clangstaticanalyzer', u'Clang Static Analyzer Documentation',
[u'Analyzer Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ClangStaticAnalyzer', u'Clang Static Analyzer Documentation',
u'Analyzer Team', 'ClangStaticAnalyzer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 8,070 | 31.544355 | 80 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/training/ner_metrics.py | from overrides import overrides
from typing import Optional
import torch
from allennlp.training.metrics.metric import Metric
from span_model.training.f1 import compute_f1
# TODO: Need to use the decoded predictions so that we catch the gold examples longer than
# the span boundary.
class NERMetrics(Metric):
"""
Computes precision, recall, and micro-averaged F1 from a list of predicted and gold labels.
"""
def __init__(self, number_of_classes: int, none_label: int = 0):
self.number_of_classes = number_of_classes
self.none_label = none_label
self.reset()
@overrides
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.Tensor] = None,
):
predictions = predictions.cpu()
gold_labels = gold_labels.cpu()
mask = mask.cpu()
for i in range(self.number_of_classes):
if i == self.none_label:
continue
self._true_positives += (
((predictions == i) * (gold_labels == i) * mask.bool()).sum().item()
)
self._false_positives += (
((predictions == i) * (gold_labels != i) * mask.bool()).sum().item()
)
self._true_negatives += (
((predictions != i) * (gold_labels != i) * mask.bool()).sum().item()
)
self._false_negatives += (
((predictions != i) * (gold_labels == i) * mask.bool()).sum().item()
)
@overrides
def get_metric(self, reset=False):
"""
Returns
-------
A tuple of the following metrics based on the accumulated count statistics:
precision : float
recall : float
f1-measure : float
"""
predicted = self._true_positives + self._false_positives
gold = self._true_positives + self._false_negatives
matched = self._true_positives
precision, recall, f1_measure = compute_f1(predicted, gold, matched)
# Reset counts if at end of epoch.
if reset:
self.reset()
return precision, recall, f1_measure
@overrides
def reset(self):
self._true_positives = 0
self._false_positives = 0
self._true_negatives = 0
self._false_negatives = 0
| 2,358 | 29.636364 | 95 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/models/ner.py | import logging
from typing import Any, Dict, List, Optional, Callable
import torch
from torch.nn import functional as F
from overrides import overrides
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TimeDistributed
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from span_model.models.shared import FocalLoss, BiAffineSingleInput
from span_model.training.ner_metrics import NERMetrics
from span_model.data.dataset_readers import document
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class NERTagger(Model):
"""
Named entity recognition module
Parameters
----------
mention_feedforward : ``FeedForward``
This feedforward network is applied to the span representations which is then scored
by a linear layer.
feature_size: ``int``
The embedding size for all the embedded features, such as distances or span widths.
lexical_dropout: ``int``
The probability of dropping out dimensions of the embedded text.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(
self,
vocab: Vocabulary,
make_feedforward: Callable,
span_emb_dim: int,
regularizer: Optional[RegularizerApplicator] = None,
use_bi_affine: bool = False,
neg_class_weight: float = -1,
use_focal_loss: bool = False,
focal_loss_gamma: int = 2,
use_double_scorer: bool = False,
use_gold_for_train_prune_scores: bool = False,
use_single_pool: bool = False,
name: str = "ner_labels"
) -> None:
super(NERTagger, self).__init__(vocab, regularizer)
self.use_single_pool = use_single_pool
self.use_gold_for_train_prune_scores = use_gold_for_train_prune_scores
self.use_double_scorer = use_double_scorer
self.use_bi_affine = use_bi_affine
self._name = name
self._namespaces = [
entry for entry in vocab.get_namespaces() if self._name in entry
]
# Number of classes determine the output dimension of the final layer
self._n_labels = {name: vocab.get_vocab_size(name) for name in self._namespaces}
if self.use_single_pool:
for n in self._namespaces:
self._n_labels[n] -= 1
# Null label is needed to keep track of when calculating the metrics
for namespace in self._namespaces:
null_label = vocab.get_token_index("", namespace)
assert (
null_label == 0
) # If not, the dummy class won't correspond to the null label.
# The output dim is 1 less than the number of labels because we don't score the null label;
# we just give it a score of 0 by default.
# Create a separate scorer and metric for each dataset we're dealing with.
self._ner_scorers = torch.nn.ModuleDict()
self._ner_metrics = {}
for namespace in self._namespaces:
self._ner_scorers[namespace] = self.make_scorer(
make_feedforward, span_emb_dim, self._n_labels[namespace])
if self.use_double_scorer:
self._ner_scorers[namespace] = None # noqa
self._ner_scorers["opinion"] = self.make_scorer(make_feedforward, span_emb_dim, 2)
self._ner_scorers["target"] = self.make_scorer(make_feedforward, span_emb_dim, 2)
self._ner_metrics[namespace] = NERMetrics(
self._n_labels[namespace], null_label
)
self.i_opinion = vocab.get_token_index("OPINION", namespace)
self.i_target = vocab.get_token_index("TARGET", namespace)
if self.use_single_pool:
self.i_opinion = self.i_target = 1
self._active_namespace = None
self._loss = torch.nn.CrossEntropyLoss(reduction="sum")
if neg_class_weight != -1:
assert len(self._namespaces) == 1
num_pos_classes = self._n_labels[self._namespaces[0]] - 1
pos_weight = (1 - neg_class_weight) / num_pos_classes
weight = [neg_class_weight] + [pos_weight] * num_pos_classes
print(dict(ner_class_weight=weight))
self._loss = torch.nn.CrossEntropyLoss(reduction="sum", weight=torch.tensor(weight))
if use_focal_loss:
assert neg_class_weight != -1
self._loss = FocalLoss(
reduction="sum", weight=torch.tensor(weight), gamma=focal_loss_gamma)
print(dict(ner_loss_fn=self._loss))
def make_scorer(self, make_feedforward, span_emb_dim, n_labels):
mention_feedforward = make_feedforward(input_dim=span_emb_dim)
scorer = torch.nn.Sequential(
TimeDistributed(mention_feedforward),
TimeDistributed(
torch.nn.Linear(
mention_feedforward.get_output_dim(),
n_labels
)
),
)
if self.use_bi_affine:
scorer = BiAffineSingleInput(
input_size=span_emb_dim // 2,
project_size=200,
output_size=n_labels,
)
return scorer
@overrides
def forward(
self, # type: ignore
spans: torch.IntTensor,
span_mask: torch.IntTensor,
span_embeddings: torch.IntTensor,
sentence_lengths: torch.Tensor,
ner_labels: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
TODO: Write documentation.
"""
# Shape: (Batch size, Number of Spans, Span Embedding Size)
# span_embeddings
self._active_namespace = f"{metadata.dataset}__{self._name}"
if self.use_double_scorer:
opinion_scores = self._ner_scorers["opinion"](span_embeddings)
target_scores = self._ner_scorers["target"](span_embeddings)
null_scores = torch.stack([opinion_scores[..., 0], target_scores[..., 0]], dim=-1).mean(dim=-1, keepdim=True)
pool = [null_scores, None, None]
pool[self.i_opinion] = opinion_scores[..., [1]]
pool[self.i_target] = target_scores[..., [1]]
ner_scores = torch.cat(pool, dim=-1)
else:
scorer = self._ner_scorers[self._active_namespace]
ner_scores = scorer(span_embeddings)
# Give large positive scores to "null" class in masked-out elements
ner_scores[..., 0] = util.replace_masked_values(ner_scores[..., 0], span_mask.bool(), 1e20)
_, predicted_ner = ner_scores.max(2)
predictions = self.predict(
ner_scores.detach().cpu(),
spans.detach().cpu(),
span_mask.detach().cpu(),
metadata,
)
output_dict = {"predictions": predictions}
# New
output_dict.update(ner_scores=ner_scores)
output_dict.update(opinion_scores=ner_scores.softmax(dim=-1)[..., [self.i_opinion]])
output_dict.update(target_scores=ner_scores.softmax(dim=-1)[..., [self.i_target]])
if ner_labels is not None:
if self.use_single_pool:
ner_labels = torch.ne(ner_labels, 0.0).long()
if self.use_gold_for_train_prune_scores:
for name, i in dict(opinion_scores=self.i_opinion, target_scores=self.i_target).items():
mask = ner_labels.eq(i).unsqueeze(dim=-1)
assert mask.shape == output_dict[name].shape
output_dict[name] = output_dict[name].masked_fill(mask, 1e20)
metrics = self._ner_metrics[self._active_namespace]
metrics(predicted_ner, ner_labels, span_mask)
ner_scores_flat = ner_scores.view(
-1, self._n_labels[self._active_namespace]
)
ner_labels_flat = ner_labels.view(-1)
mask_flat = span_mask.view(-1).bool()
loss = self._loss(ner_scores_flat[mask_flat], ner_labels_flat[mask_flat])
output_dict["loss"] = loss
return output_dict
def predict(self, ner_scores, spans, span_mask, metadata):
# TODO: Make sure the iteration works in documents with a single sentence.
# Zipping up and iterating iterates over the zeroth dimension of each tensor; this
# corresponds to iterating over sentences.
predictions = []
zipped = zip(ner_scores, spans, span_mask, metadata)
for ner_scores_sent, spans_sent, span_mask_sent, sentence in zipped:
predicted_scores_raw, predicted_labels = ner_scores_sent.max(dim=1)
softmax_scores = F.softmax(ner_scores_sent, dim=1)
predicted_scores_softmax, _ = softmax_scores.max(dim=1)
ix = (predicted_labels != 0) & span_mask_sent.bool()
predictions_sent = []
zip_pred = zip(
predicted_labels[ix],
predicted_scores_raw[ix],
predicted_scores_softmax[ix],
spans_sent[ix],
)
for label, label_score_raw, label_score_softmax, label_span in zip_pred:
label_str = self.vocab.get_token_from_index(
label.item(), self._active_namespace
)
span_start, span_end = label_span.tolist()
ner = [
span_start,
span_end,
label_str,
label_score_raw.item(),
label_score_softmax.item(),
]
prediction = document.PredictedNER(ner, sentence, sentence_offsets=True)
predictions_sent.append(prediction)
predictions.append(predictions_sent)
return predictions
# TODO: This code is repeated elsewhere. Refactor.
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
"Loop over the metrics for all namespaces, and return as dict."
res = {}
for namespace, metrics in self._ner_metrics.items():
precision, recall, f1 = metrics.get_metric(reset)
prefix = namespace.replace("_labels", "")
to_update = {
f"{prefix}_precision": precision,
f"{prefix}_recall": recall,
f"{prefix}_f1": f1,
}
res.update(to_update)
res_avg = {}
for name in ["precision", "recall", "f1"]:
values = [res[key] for key in res if name in key]
res_avg[f"MEAN__{self._name.replace('_labels', '')}_{name}"] = sum(values) / len(values) if values else 0
res.update(res_avg)
return res
| 10,868 | 39.707865 | 121 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/models/embedder.py | from typing import Optional, Tuple
from overrides import overrides
import torch
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder, TokenEmbedder
from allennlp.nn import util
from allennlp.modules.scalar_mix import ScalarMix
@TokenEmbedder.register("double_mix_ptm")
class DoubleMixPTMEmbedder(TokenEmbedder):
# Refer: PretrainedTransformerMismatchedEmbedder
"""
Use this embedder to embed wordpieces given by `PretrainedTransformerMismatchedIndexer`
and to pool the resulting vectors to get word-level representations.
Registered as a `TokenEmbedder` with name "pretrained_transformer_mismatchd".
# Parameters
model_name : `str`
The name of the `transformers` model to use. Should be the same as the corresponding
`PretrainedTransformerMismatchedIndexer`.
max_length : `int`, optional (default = `None`)
If positive, folds input token IDs into multiple segments of this length, pass them
through the transformer model independently, and concatenate the final representations.
Should be set to the same value as the `max_length` option on the
`PretrainedTransformerMismatchedIndexer`.
train_parameters: `bool`, optional (default = `True`)
If this is `True`, the transformer weights get updated during training.
last_layer_only: `bool`, optional (default = `True`)
When `True` (the default), only the final layer of the pretrained transformer is taken
for the embeddings. But if set to `False`, a scalar mix of all of the layers
is used.
gradient_checkpointing: `bool`, optional (default = `None`)
Enable or disable gradient checkpointing.
"""
def __init__(
self,
model_name: str,
max_length: int = None,
train_parameters: bool = True,
last_layer_only: bool = True,
gradient_checkpointing: Optional[bool] = None,
) -> None:
super().__init__()
# The matched version v.s. mismatched
self._matched_embedder = PretrainedTransformerEmbedder(
model_name,
max_length=max_length,
train_parameters=train_parameters,
last_layer_only=last_layer_only,
gradient_checkpointing=gradient_checkpointing,
)
self._matched_embedder.config.output_hidden_states = True
num_layers = self._matched_embedder.config.num_hidden_layers
mix_init = [float(i) for i in range(num_layers)] # Try to give useful prior, after softmax will be [..., 0.08, 0.23, 0.63]
self._mixer_a = ScalarMix(num_layers, initial_scalar_parameters=mix_init)
self._mixer_b = ScalarMix(num_layers, initial_scalar_parameters=mix_init)
self._matched_embedder.transformer_model.forward = self.make_fn_transformer(
self._matched_embedder.transformer_model.forward
)
# This method doesn't work, gradient doesn't propagate properly
# self.embeds_b = None # Bonus output because TokenEmbedder should produce single Tensor output
@classmethod
def make_fn_transformer(cls, fn):
def new_fn(*args, **kwargs):
transformer_output: tuple = fn(*args, **kwargs)
# As far as I can tell, the hidden states will always be the last element
# in the output tuple as long as the model is not also configured to return
# attention scores.
# See, for example, the return value description for BERT:
# https://huggingface.co/transformers/model_doc/bert.html#transformers.BertModel.forward
# These hidden states will also include the embedding layer, which we don't
# include in the scalar mix. Hence the `[1:]` slicing.
hidden_states = transformer_output[-1][1:]
# By default, PTM will return transformer_output[0] so we force the one we want in front
return (hidden_states,) + transformer_output
return new_fn
@overrides
def get_output_dim(self):
return self._matched_embedder.get_output_dim()
@staticmethod
def run_match(embeddings, offsets):
# span_embeddings: (batch_size, num_orig_tokens, max_span_length, embedding_size)
# span_mask: (batch_size, num_orig_tokens, max_span_length)
span_embeddings, span_mask = util.batched_span_select(embeddings.contiguous(), offsets)
span_mask = span_mask.unsqueeze(-1)
span_embeddings *= span_mask # zero out paddings
span_embeddings_sum = span_embeddings.sum(2)
span_embeddings_len = span_mask.sum(2)
# Shape: (batch_size, num_orig_tokens, embedding_size)
orig_embeddings = span_embeddings_sum / torch.clamp_min(span_embeddings_len, 1)
# All the places where the span length is zero, write in zeros.
orig_embeddings[(span_embeddings_len == 0).expand(orig_embeddings.shape)] = 0
return orig_embeddings
@overrides
def forward(
self,
token_ids: torch.LongTensor,
mask: torch.BoolTensor,
offsets: torch.LongTensor,
wordpiece_mask: torch.BoolTensor,
type_ids: Optional[torch.LongTensor] = None,
segment_concat_mask: Optional[torch.BoolTensor] = None,
) -> torch.Tensor: # type: ignore
"""
# Parameters
token_ids: `torch.LongTensor`
Shape: [batch_size, num_wordpieces] (for exception see `PretrainedTransformerEmbedder`).
mask: `torch.BoolTensor`
Shape: [batch_size, num_orig_tokens].
offsets: `torch.LongTensor`
Shape: [batch_size, num_orig_tokens, 2].
Maps indices for the original tokens, i.e. those given as input to the indexer,
to a span in token_ids. `token_ids[i][offsets[i][j][0]:offsets[i][j][1] + 1]`
corresponds to the original j-th token from the i-th batch.
wordpiece_mask: `torch.BoolTensor`
Shape: [batch_size, num_wordpieces].
type_ids: `Optional[torch.LongTensor]`
Shape: [batch_size, num_wordpieces].
segment_concat_mask: `Optional[torch.BoolTensor]`
See `PretrainedTransformerEmbedder`.
# Returns
`torch.Tensor`
Shape: [batch_size, num_orig_tokens, embedding_size].
"""
hidden_states = self._matched_embedder( # noqa
token_ids, wordpiece_mask, type_ids=type_ids, segment_concat_mask=segment_concat_mask
)
assert type(hidden_states) in {tuple, list}
embeds_a = self.run_match(self._mixer_a(hidden_states), offsets)
embeds_b = self.run_match(self._mixer_b(hidden_states), offsets)
x = torch.cat([embeds_a, embeds_b], dim=-1)
return x
# self.embeds_b = embeds_b
# return embeds_a
def split_outputs(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# Output has to be single tensor to suit forward signature but we need to split
output_dim = self.get_output_dim()
bs, seq_len, hidden_size = x.shape
assert hidden_size == output_dim * 2
return x[:, :, :output_dim], x[:, :, output_dim:]
| 7,218 | 44.689873 | 131 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/models/relation_proper.py | import logging
from typing import Any, Dict, List, Optional, Callable
import torch
import torch.nn.functional as F
from overrides import overrides
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.nn import util, RegularizerApplicator
from allennlp.modules import TimeDistributed
from span_model.models.shared import BiAffine, SpanLengthCrossEntropy, BagPairScorer, BiAffineV2
from span_model.training.relation_metrics import RelationMetrics
from span_model.models.entity_beam_pruner import Pruner, TwoScorePruner
from span_model.data.dataset_readers import document
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
import json
from pydantic import BaseModel
class PruneOutput(BaseModel):
class Config:
arbitrary_types_allowed = True
span_embeddings: torch.Tensor
span_mention_scores: torch.Tensor
num_spans_to_keep: torch.Tensor
span_mask: torch.Tensor
span_indices: torch.Tensor
spans: torch.Tensor
def analyze_info(info: dict):
for k, v in info.items():
if isinstance(v, torch.Size):
v = tuple(v)
info[k] = str(v)
logging.info(json.dumps(info, indent=2))
class DistanceEmbedder(torch.nn.Module):
def __init__(self, dim=128, vocab_size=10):
super().__init__()
self.vocab_size = vocab_size
self.dim = dim
self.embedder = torch.nn.Embedding(self.vocab_size, self.dim)
def to_distance_buckets(self, spans_a: torch.Tensor, spans_b: torch.Tensor) -> torch.Tensor:
bs, num_a, dim = spans_a.shape
bs, num_b, dim = spans_b.shape
assert dim == 2
spans_a = spans_a.view(bs, num_a, 1, dim)
spans_b = spans_b.view(bs, 1, num_b, dim)
d_ab = torch.abs(spans_b[..., 0] - spans_a[..., 1])
d_ba = torch.abs(spans_a[..., 0] - spans_b[..., 1])
distances = torch.minimum(d_ab, d_ba)
# pos_a = spans_a.float().mean(dim=-1).unsqueeze(dim=-1) # bs, num_spans, 1
# pos_b = spans_b.float().mean(dim=-1).unsqueeze(dim=-2) # bs, 1, num_spans
# distances = torch.abs(pos_a - pos_b)
x = util.bucket_values(distances, num_total_buckets=self.vocab_size)
# [0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+]
x = x.long()
assert x.shape == (bs, num_a, num_b)
return x
def forward(self, spans_a: torch.Tensor, spans_b: torch.Tensor) -> torch.Tensor:
buckets = self.to_distance_buckets(spans_a, spans_b)
x = self.embedder(buckets) # bs, num_spans, num_spans, dim
return x
def global_max_pool1d(x: torch.Tensor) -> torch.Tensor:
bs, seq_len, features = x.shape
x = x.transpose(-1, -2)
x = torch.nn.functional.adaptive_max_pool1d(x, output_size=1, return_indices=False)
x = x.transpose(-1, -2)
x = x.squeeze(dim=1)
assert tuple(x.shape) == (bs, features)
return x
def test_pool():
x = torch.zeros(3, 100, 32)
y = global_max_pool1d(x)
print(dict(x=x.shape, y=y.shape))
class ProperRelationExtractor(Model):
def __init__(
self,
vocab: Vocabulary,
make_feedforward: Callable,
span_emb_dim: int,
feature_size: int,
spans_per_word: float,
positive_label_weight: float = 1.0,
regularizer: Optional[RegularizerApplicator] = None,
use_distance_embeds: bool = False,
use_pair_feature_maxpool: bool = False,
use_pair_feature_cls: bool = False,
use_bi_affine_classifier: bool = False,
neg_class_weight: float = -1,
span_length_loss_weight_gamma: float = 0.0,
use_bag_pair_scorer: bool = False,
use_bi_affine_v2: bool = False,
use_pruning: bool = True,
use_single_pool: bool = False,
**kwargs, # noqa
) -> None:
super().__init__(vocab, regularizer)
print(dict(unused_keys=kwargs.keys()))
print(dict(locals=locals()))
self.use_single_pool = use_single_pool
self.use_pruning = use_pruning
self.use_bi_affine_v2 = use_bi_affine_v2
self.use_bag_pair_scorer = use_bag_pair_scorer
self.span_length_loss_weight_gamma = span_length_loss_weight_gamma
self.use_bi_affine_classifier = use_bi_affine_classifier
self.use_distance_embeds = use_distance_embeds
self.use_pair_feature_maxpool = use_pair_feature_maxpool
self.use_pair_feature_cls = use_pair_feature_cls
self._text_embeds: Optional[torch.Tensor] = None
self._text_mask: Optional[torch.Tensor] = None
self._spans_a: Optional[torch.Tensor] = None
self._spans_b: Optional[torch.Tensor] = None
token_emb_dim = 768
relation_scorer_dim = 2 * span_emb_dim
if self.use_distance_embeds:
self.d_embedder = DistanceEmbedder()
relation_scorer_dim += self.d_embedder.dim
if self.use_pair_feature_maxpool:
relation_scorer_dim += token_emb_dim
if self.use_pair_feature_cls:
relation_scorer_dim += token_emb_dim
print(dict(token_emb_dim=token_emb_dim, span_emb_dim=span_emb_dim, relation_scorer_dim=relation_scorer_dim))
self._namespaces = [
entry for entry in vocab.get_namespaces() if "relation_labels" in entry
]
self._n_labels = {name: vocab.get_vocab_size(name) for name in self._namespaces}
assert len(self._n_labels) == 1
n_labels = list(self._n_labels.values())[0] + 1
if self.use_bi_affine_classifier:
self._bi_affine_classifier = BiAffine(span_emb_dim, project_size=200, output_size=n_labels)
if self.use_bi_affine_v2:
self._bi_affine_v2 = BiAffineV2(span_emb_dim, project_size=200, output_size=n_labels)
self._mention_pruners = torch.nn.ModuleDict()
self._relation_feedforwards = torch.nn.ModuleDict()
self._relation_scorers = torch.nn.ModuleDict()
self._relation_metrics = {}
self._pruner_o = self._make_pruner(span_emb_dim, make_feedforward)
self._pruner_t = self._make_pruner(span_emb_dim, make_feedforward)
if not self.use_pruning:
self._pruner_o, self._pruner_t = None, None
if self.use_single_pool:
assert self.use_pruning
self._pruner_o = self._pruner_t
for namespace in self._namespaces:
relation_feedforward = make_feedforward(input_dim=relation_scorer_dim)
if self.use_bag_pair_scorer:
relation_feedforward = BagPairScorer(make_feedforward, span_emb_dim)
self._relation_feedforwards[namespace] = relation_feedforward
relation_scorer = torch.nn.Linear(
relation_feedforward.get_output_dim(), self._n_labels[namespace] + 1
)
self._relation_scorers[namespace] = relation_scorer
self._relation_metrics[namespace] = RelationMetrics()
self._spans_per_word = spans_per_word
self._active_namespace = None
self._loss = torch.nn.CrossEntropyLoss(reduction="sum", ignore_index=-1)
if self.span_length_loss_weight_gamma != 0:
assert neg_class_weight == -1
self._loss = SpanLengthCrossEntropy(
gamma=self.span_length_loss_weight_gamma, reduction="sum", ignore_index=-1)
if neg_class_weight != -1:
assert len(self._namespaces) == 1
num_pos_classes = self._n_labels[self._namespaces[0]]
weight = torch.tensor([neg_class_weight] + [1.0] * num_pos_classes)
print(dict(relation_neg_class_weight=weight))
self._loss = torch.nn.CrossEntropyLoss(reduction="sum", ignore_index=-1, weight=weight)
print(dict(relation_loss_fn=self._loss))
def _make_pruner(self, span_emb_dim:int, make_feedforward:Callable):
mention_feedforward = make_feedforward(input_dim=span_emb_dim)
feedforward_scorer = torch.nn.Sequential(
TimeDistributed(mention_feedforward),
TimeDistributed(
torch.nn.Linear(mention_feedforward.get_output_dim(), 1)
),
)
return Pruner(feedforward_scorer, use_external_score=True)
@overrides
def forward(
self, # type: ignore
spans: torch.IntTensor,
span_mask,
span_embeddings, # TODO: add type.
sentence_lengths,
relation_labels: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
self._active_namespace = f"{metadata.dataset}__relation_labels"
pruned_o: PruneOutput = self._prune_spans(spans, span_mask, span_embeddings, sentence_lengths, "opinion")
pruned_t: PruneOutput = self._prune_spans(spans, span_mask, span_embeddings, sentence_lengths, "target")
relation_scores = self._compute_relation_scores(pruned_o, pruned_t)
prediction_dict, predictions = self.predict(
spans_a=pruned_o.spans.detach().cpu(),
spans_b=pruned_t.spans.detach().cpu(),
relation_scores=relation_scores.detach().cpu(),
num_keep_a=pruned_o.num_spans_to_keep.detach().cpu(),
num_keep_b=pruned_t.num_spans_to_keep.detach().cpu(),
metadata=metadata,
)
output_dict = {"predictions": predictions}
# Evaluate loss and F1 if labels were provided.
if relation_labels is not None:
# Compute cross-entropy loss.
gold_relations = self._get_pruned_gold_relations(
relation_labels, pruned_o, pruned_t
)
self._relation_scores, self._gold_relations = relation_scores, gold_relations
cross_entropy = self._get_cross_entropy_loss(
relation_scores, gold_relations
)
# Compute F1.
assert len(prediction_dict) == len(
metadata
) # Make sure length of predictions is right.
relation_metrics = self._relation_metrics[self._active_namespace]
relation_metrics(prediction_dict, metadata)
output_dict["loss"] = cross_entropy
return output_dict
def _prune_spans(self, spans, span_mask, span_embeddings, sentence_lengths, name: str) -> PruneOutput:
if not self.use_pruning:
bs, num_spans, dim = span_embeddings.shape
device = span_embeddings.device
return PruneOutput(
spans=spans,
span_mask=span_mask.unsqueeze(dim=-1),
span_embeddings=span_embeddings,
num_spans_to_keep=torch.full((bs,), fill_value=num_spans, device=device, dtype=torch.long),
span_indices=torch.arange(num_spans, device=device, dtype=torch.long).view(1, num_spans).expand(bs, -1),
span_mention_scores=torch.zeros(bs, num_spans, 1, device=device),
)
pruner = dict(opinion=self._pruner_o, target=self._pruner_t)[name]
if self.use_single_pool:
self._opinion_scores = torch.maximum(self._opinion_scores, self._target_scores)
self._target_scores = self._opinion_scores
mention_scores = dict(opinion=self._opinion_scores, target=self._target_scores)[name]
pruner.set_external_score(mention_scores.detach())
# Prune
num_spans = spans.size(1) # Max number of spans for the minibatch.
# Keep different number of spans for each minibatch entry.
num_spans_to_keep = torch.ceil(
sentence_lengths.float() * self._spans_per_word
).long()
outputs = pruner(span_embeddings, span_mask, num_spans_to_keep)
(
top_span_embeddings,
top_span_mask,
top_span_indices,
top_span_mention_scores,
num_spans_kept,
) = outputs
top_span_mask = top_span_mask.unsqueeze(-1)
flat_top_span_indices = util.flatten_and_batch_shift_indices(
top_span_indices, num_spans
)
top_spans = util.batched_index_select(
spans, top_span_indices, flat_top_span_indices
)
return PruneOutput(
span_embeddings=top_span_embeddings,
span_mention_scores=top_span_mention_scores,
num_spans_to_keep=num_spans_to_keep,
span_mask=top_span_mask,
span_indices=top_span_indices,
spans=top_spans,
)
def predict(self, spans_a, spans_b, relation_scores, num_keep_a, num_keep_b, metadata):
preds_dict = []
predictions = []
for i in range(relation_scores.shape[0]):
# Each entry/sentence in batch
pred_dict_sent, predictions_sent = self._predict_sentence(
spans_a[i], spans_b[i], relation_scores[i],
num_keep_a[i], num_keep_b[i], metadata[i]
)
preds_dict.append(pred_dict_sent)
predictions.append(predictions_sent)
return preds_dict, predictions
def _predict_sentence(
self, top_spans_a, top_spans_b, relation_scores, num_keep_a, num_keep_b, sentence
):
num_a = num_keep_a.item() # noqa
num_b = num_keep_b.item() # noqa
spans_a = [tuple(x) for x in top_spans_a.tolist()]
spans_b = [tuple(x) for x in top_spans_b.tolist()]
# Iterate over all span pairs and labels. Record the span if the label isn't null.
predicted_scores_raw, predicted_labels = relation_scores.max(dim=-1)
softmax_scores = F.softmax(relation_scores, dim=-1)
predicted_scores_softmax, _ = softmax_scores.max(dim=-1)
predicted_labels -= 1 # Subtract 1 so that null labels get -1.
ix = (predicted_labels >= 0) # TODO: Figure out their keep_mask (relation.py:202)
res_dict = {}
predictions = []
for i, j in ix.nonzero(as_tuple=False):
span_1 = spans_a[i]
span_2 = spans_b[j]
label = predicted_labels[i, j].item()
raw_score = predicted_scores_raw[i, j].item()
softmax_score = predicted_scores_softmax[i, j].item()
label_name = self.vocab.get_token_from_index(
label, namespace=self._active_namespace
)
res_dict[(span_1, span_2)] = label_name
list_entry = (
span_1[0],
span_1[1],
span_2[0],
span_2[1],
label_name,
raw_score,
softmax_score,
)
predictions.append(
document.PredictedRelation(list_entry, sentence, sentence_offsets=True)
)
return res_dict, predictions
# TODO: This code is repeated elsewhere. Refactor.
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
"Loop over the metrics for all namespaces, and return as dict."
res = {}
for namespace, metrics in self._relation_metrics.items():
precision, recall, f1 = metrics.get_metric(reset)
prefix = namespace.replace("_labels", "")
to_update = {
f"{prefix}_precision": precision,
f"{prefix}_recall": recall,
f"{prefix}_f1": f1,
}
res.update(to_update)
res_avg = {}
for name in ["precision", "recall", "f1"]:
values = [res[key] for key in res if name in key]
res_avg[f"MEAN__relation_{name}"] = (
sum(values) / len(values) if values else 0
)
res.update(res_avg)
return res
def _make_pair_features(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
assert a.shape == b.shape
bs, num_a, num_b, size = a.shape
features = [a, b]
if self.use_pair_feature_maxpool:
x = self._text_embeds
c = global_max_pool1d(x) # [bs, size]
bs, size = c.shape
c = c.view(bs, 1, 1, size).expand(-1, num_a, num_b, -1)
features.append(c)
if self.use_pair_feature_cls:
c = self._text_embeds[:, 0, :]
bs, size = c.shape
c = c.view(bs, 1, 1, size).expand(-1, num_a, num_b, -1)
features.append(c)
if self.use_distance_embeds:
features.append(self.d_embedder(self._spans_a, self._spans_b))
x = torch.cat(features, dim=-1)
return x
def _compute_span_pair_embeddings(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
c = self._make_pair_features(a, b)
if self.use_bi_affine_classifier:
c = self._bi_affine_classifier(a, b)
return c
def _compute_relation_scores(self, pruned_a: PruneOutput, pruned_b: PruneOutput):
if self.span_length_loss_weight_gamma != 0:
bs, num_a, _ = pruned_a.spans.shape
bs, num_b, _ = pruned_b.spans.shape
widths_a = pruned_a.spans[..., [1]] - pruned_a.spans[..., [0]] + 1
widths_b = pruned_b.spans[..., [1]] - pruned_b.spans[... ,[0]] + 1
widths_a = widths_a.view(bs, num_a, 1, 1)
widths_b = widths_b.view(bs, 1, num_b, 1)
widths = (widths_a + widths_b) / 2
self._loss.lengths = widths.view(bs * num_a * num_b)
a_orig, b_orig = pruned_a.span_embeddings, pruned_b.span_embeddings
bs, num_a, size = a_orig.shape
bs, num_b, size = b_orig.shape
chunk_size = max(1000 // num_a, 1)
# logging.info(dict(a=num_a, b=num_b, chunk_size=chunk_size))
pool = []
for i in range(0, num_a, chunk_size):
a = a_orig[:, i:i + chunk_size, :]
num_chunk = a.shape[1]
a = a.view(bs, num_chunk, 1, size).expand(-1, -1, num_b, -1)
b = b_orig.view(bs, 1, num_b, size).expand(-1, num_chunk, -1, -1)
assert a.shape == b.shape
self._spans_a = pruned_a.spans[:, i:i + chunk_size, :]
self._spans_b = pruned_b.spans
embeds = self._compute_span_pair_embeddings(a, b)
self._relation_embeds = embeds
if self.use_bi_affine_classifier:
scores = embeds
else:
relation_feedforward = self._relation_feedforwards[self._active_namespace]
relation_scorer = self._relation_scorers[self._active_namespace]
embeds = torch.flatten(embeds, end_dim=-2)
projected = relation_feedforward(embeds)
scores = relation_scorer(projected)
scores = scores.view(bs, num_chunk, num_b, -1)
if self.use_bi_affine_v2:
scores += self._bi_affine_v2(a, b)
pool.append(scores)
scores = torch.cat(pool, dim=1)
return scores
@staticmethod
def _get_pruned_gold_relations(relation_labels: torch.Tensor, pruned_a: PruneOutput, pruned_b: PruneOutput) -> torch.Tensor:
"""
Loop over each slice and get the labels for the spans from that slice.
All labels are offset by 1 so that the "null" label gets class zero. This is the desired
behavior for the softmax. Labels corresponding to masked relations keep the label -1, which
the softmax loss ignores.
"""
# TODO: Test and possibly optimize.
relations = []
indices_a, masks_a = pruned_a.span_indices, pruned_a.span_mask.bool()
indices_b, masks_b = pruned_b.span_indices, pruned_b.span_mask.bool()
for i in range(relation_labels.shape[0]):
# Each entry in batch
entry = relation_labels[i]
entry = entry[indices_a[i], :][:, indices_b[i]]
mask_entry = masks_a[i] & masks_b[i].transpose(0, 1)
assert entry.shape == mask_entry.shape
entry[mask_entry] += 1
entry[~mask_entry] = -1
relations.append(entry)
# return torch.cat(relations, dim=0)
# This should be a mistake, don't want to concat items within a batch together
# Likely undiscovered because current bs=1 and _get_loss flattens everything
return torch.stack(relations, dim=0)
def _get_cross_entropy_loss(self, relation_scores, relation_labels):
"""
Compute cross-entropy loss on relation labels. Ignore diagonal entries and entries giving
relations between masked out spans.
"""
# Need to add one for the null class.
n_labels = self._n_labels[self._active_namespace] + 1
scores_flat = relation_scores.view(-1, n_labels)
# Need to add 1 so that the null label is 0, to line up with indices into prediction matrix.
labels_flat = relation_labels.view(-1)
# Compute cross-entropy loss.
loss = self._loss(scores_flat, labels_flat)
return loss
| 20,888 | 39.561165 | 128 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/models/shared.py | """
Short utility functions.
"""
from typing import Optional, Callable
import torch
import torch.nn.functional as F
from allennlp.modules import FeedForward
from allennlp.modules.span_extractors import EndpointSpanExtractor, SpanExtractor
from allennlp.nn.util import batched_span_select
from overrides import overrides
from torch import Tensor
def cumsum_shifted(xs):
"""
Assumes `xs` is a 1-d array.
The usual cumsum has elements [x[1], x[1] + x[2], ...]. This one has elements
[0, x[1], x[1] + x[2], ...]. Useful for calculating sentence offsets.
"""
cs = xs.cumsum(dim=0)
shift = torch.zeros(1, dtype=torch.long, device=cs.device) # Put on correct device.
return torch.cat([shift, cs[:-1]], dim=0)
def batch_identity(batch_size, matrix_size, *args, **kwargs):
"""
Tile the identity matrix along axis 0, `batch_size` times.
"""
ident = torch.eye(matrix_size, *args, **kwargs).unsqueeze(0)
res = ident.repeat(batch_size, 1, 1)
return res
def fields_to_batches(d, keys_to_ignore=[]):
"""
The input is a dict whose items are batched tensors. The output is a list of dictionaries - one
per entry in the batch - with the slices of the tensors for that entry. Here's an example.
Input:
d = {"a": [[1, 2], [3,4]], "b": [1, 2]}
Output:
res = [{"a": [1, 2], "b": 1}, {"a": [3, 4], "b": 2}].
"""
keys = [key for key in d.keys() if key not in keys_to_ignore]
# Make sure all input dicts have same length. If they don't, there's a problem.
lengths = {k: len(d[k]) for k in keys}
if len(set(lengths.values())) != 1:
msg = f"fields have different lengths: {lengths}."
# If there's a doc key, add it to specify where the error is.
if "doc_key" in d:
msg = f"For document {d['doc_key']}, " + msg
raise ValueError(msg)
length = list(lengths.values())[0]
res = [{k: d[k][i] for k in keys} for i in range(length)]
return res
def batches_to_fields(batches):
"""
The inverse of `fields_to_batches`.
"""
# Make sure all the keys match.
first_keys = batches[0].keys()
for entry in batches[1:]:
if set(entry.keys()) != set(first_keys):
raise ValueError("Keys to not match on all entries.")
res = {k: [] for k in first_keys}
for batch in batches:
for k, v in batch.items():
res[k].append(v)
return res
class FocalLoss(torch.nn.Module):
# Reference: https://github.com/AdeelH/pytorch-multi-class-focal-loss/blob/master/focal_loss.py
def __init__(
self,
weight: Optional[Tensor] = None,
gamma: float = 0.,
reduction: str = 'mean',
ignore_index: int = -100
):
super().__init__()
assert reduction in {"mean", "sum", "none"}
self.gamma = gamma
self.reduction = reduction
self.nll_loss = torch.nn.NLLLoss(
weight=weight, reduction="none", ignore_index=ignore_index)
def forward(self, x, y):
assert x.ndim == 2
# compute weighted cross entropy term: -alpha * log(pt)
# (alpha is already part of self.nll_loss)
log_p = F.log_softmax(x, dim=-1)
ce = self.nll_loss(log_p, y)
# get true class column from each row
all_rows = torch.arange(len(x))
log_pt = log_p[all_rows, y]
# compute focal term: (1 - pt)^gamma
pt = log_pt.exp()
focal_term = (1 - pt)**self.gamma
# the full loss: -alpha * ((1 - pt)^gamma) * log(pt)
loss = focal_term * ce
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
class BiAffine(torch.nn.Module):
def __init__(self, input_size: int, project_size: int, output_size: int):
super().__init__()
self.project_a = torch.nn.Linear(input_size, project_size)
self.project_b = torch.nn.Linear(input_size, project_size)
self.bi_affine = torch.nn.Bilinear(project_size, project_size, output_size)
self.linear = torch.nn.Linear(project_size * 2, output_size)
self.act = torch.nn.Tanh()
self.input_size, self.output_size = input_size, output_size
def forward(self, a: Tensor, b: Tensor) -> Tensor:
a = self.act(self.project_a(a))
b = self.act(self.project_b(b))
c = self.bi_affine(a, b)
d = self.linear(torch.cat([a, b], dim=-1))
return c + d
class BiAffineSingleInput(torch.nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.net = BiAffine(**kwargs)
def forward(self, x: Tensor) -> Tensor:
size = x.shape[-1]
a, b = torch.split(x, split_size_or_sections=size // 2, dim=-1)
return self.net(a, b)
class BiAffineV2(BiAffine):
def forward(self, a: Tensor, b: Tensor) -> Tensor:
a = self.act(self.project_a(a))
b = self.act(self.project_b(b))
c = self.bi_affine(a, b)
return c
class BiAffineSpanExtractor(SpanExtractor):
def __init__(self, endpoint_extractor: EndpointSpanExtractor, **kwargs):
super().__init__()
self.endpoint_extractor = endpoint_extractor
self.net = BiAffineSingleInput(**kwargs)
def get_input_dim(self) -> int:
return self.endpoint_extractor.get_input_dim()
def get_output_dim(self) -> int:
return self.net.net.output_size
@overrides
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
span_indices_mask: torch.BoolTensor = None,
) -> Tensor:
x = self.endpoint_extractor(sequence_tensor, span_indices, span_indices_mask)
x = self.net(x)
return x
class LSTMWithMarkers(SpanExtractor):
def __init__(self, input_size: int, hidden_size: int):
super().__init__()
self.lstm = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
batch_first=True,
bidirectional=True,
)
self.start = torch.nn.Parameter(torch.randn(input_size))
self.end = torch.nn.Parameter(torch.randn(input_size))
self.input_size = input_size
self.hidden_size = hidden_size
def get_input_dim(self) -> int:
return self.input_size
def get_output_dim(self) -> int:
return self.hidden_size * 2
@overrides
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
span_indices_mask: torch.BoolTensor = None,
) -> Tensor:
x, mask = batched_span_select(sequence_tensor, span_indices)
assert mask[:, :, 0].float().sum().item() == torch.numel(mask[:, :, 0])
bs, num_spans, max_width, size = x.shape
_mask = mask.view(bs, num_spans, max_width, 1).expand_as(x)
start = self.start.view(1, 1, 1, size).expand(bs, num_spans, 1, size)
end = self.end.view(1, 1, 1, size).expand(bs, num_spans, 1, size)
x = torch.where(_mask, x, end.expand_as(x))
x = torch.cat([start, x, end], dim=-2)
num_special = 2 # Start & end markers
# num_special = 0
x = x.view(bs * num_spans, max_width + num_special, size)
# lengths = mask.view(bs * num_spans, max_width).sum(dim=-1) + num_special
# x = pack_padded_sequence(x, lengths.cpu(), batch_first=True, enforce_sorted=False)
output, (last_hidden, last_cell) = self.lstm(x)
x = last_hidden.view(bs, num_spans, self.get_output_dim())
return x
class LearntWeightCrossEntropy(torch.nn.Module):
def __init__(self, num_classes: int, **kwargs):
super().__init__()
self.w = torch.nn.Parameter(torch.ones(num_classes))
self.kwargs = kwargs
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.cross_entropy(
input, target, weight=self.w, **self.kwargs)
class SpanLengthCrossEntropy(torch.nn.Module):
def __init__(self, gamma: float, reduction: str, ignore_index: int):
super().__init__()
self.gamma = gamma
self.reduction = reduction
self.loss_fn = torch.nn.CrossEntropyLoss(
reduction="none", ignore_index=ignore_index)
self.lengths: Optional[Tensor] = None
def make_instance_weights(self) -> Tensor:
assert self.lengths is not None
w = self.lengths ** self.gamma
self.lengths = None
return w
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
n, c = input.shape
w = self.make_instance_weights()
assert tuple(w.shape) == (n,)
x = self.loss_fn(input, target)
x *= w
if self.reduction == "sum":
x = x.sum()
elif self.reduction == "mean":
x = x.mean()
else:
assert self.reduction == "none", f"Unknown {dict(reduction=self.reduction)}"
return x
class BagPairScorer(torch.nn.Module):
def __init__(self, make_feedforward: Callable[[int], FeedForward], span_emb_dim: int):
super().__init__()
self.feed = make_feedforward(span_emb_dim)
self.input_dim = span_emb_dim * 2
def get_output_dim(self) -> int:
return self.feed.get_output_dim()
def forward(self, x: Tensor) -> Tensor:
*_, size = x.shape
a, b, c, d = torch.split(x, split_size_or_sections=size // 4, dim=-1)
bags = []
for pair in [(a, c), (a, d), (b, c), (b, d)]:
bags.append(self.feed(torch.cat(pair, dim=-1)))
x = torch.stack(bags, dim=0).mean(dim=0)
return x
class DualScorer(torch.nn.Module):
def __init__(self, make_feedforward: Callable[[int], FeedForward], input_size: int, num_classes: int):
super().__init__()
self.make_feedforward = make_feedforward
self.input_size = input_size
self.detector = self.make_scorer(2)
self.classifier = self.make_scorer(num_classes)
def make_scorer(self, num_classes: int):
feedforward = self.make_feedforward(self.input_size)
scorer = torch.nn.Linear(feedforward.get_output_dim(), num_classes)
return torch.nn.Sequential(feedforward, scorer)
def forward(self, x: Tensor, mention_scores: Tensor) -> Tensor:
x_detect = self.detector(x)
x_detect[..., :1] += mention_scores
scores_detect = x_detect.softmax(dim=-1)
scores_class = self.classifier(x).softmax(dim=-1)
scores = torch.cat([scores_detect[..., [0]], scores_class * scores_detect[..., [1]]], dim=-1)
return scores | 10,701 | 33.634304 | 106 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/models/span_model.py | import logging
from typing import Dict, List, Optional, Union
import copy
import torch
import torch.nn.functional as F
from overrides import overrides
from allennlp.data import Vocabulary
from allennlp.common.params import Params
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder, FeedForward, TimeDistributed
from allennlp.modules.span_extractors import EndpointSpanExtractor, SelfAttentiveSpanExtractor, SpanExtractor
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from span_model.models.ner import NERTagger
from span_model.models.relation_proper import ProperRelationExtractor
from span_model.models.shared import BiAffineSpanExtractor
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# New
from torch import Tensor
class MaxPoolSpanExtractor(SpanExtractor):
def __init__(self, input_dim: int) -> None:
super().__init__()
self._input_dim = input_dim
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
return self._input_dim
@staticmethod
def extract_pooled(x, mask) -> Tensor:
return util.masked_max(x, mask, dim=-2)
@overrides
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
span_indices_mask: torch.BoolTensor = None,
) -> Tensor:
span_embeddings, span_mask = util.batched_span_select(sequence_tensor, span_indices)
bs, num_spans, span_width, size = span_embeddings.shape
span_mask = span_mask.view(bs, num_spans, span_width, 1)
x = self.extract_pooled(span_embeddings, span_mask)
if span_indices_mask is not None:
# Above we were masking the widths of spans with respect to the max
# span width in the batch. Here we are masking the spans which were
# originally passed in as padding.
x *= span_indices_mask.view(bs, num_spans, 1)
assert tuple(x.shape) == (bs, num_spans, size)
return x
class MeanPoolSpanExtractor(MaxPoolSpanExtractor):
@staticmethod
def extract_pooled(x, mask) -> Tensor:
return util.masked_mean(x, mask, dim=-2)
class TextEmbedderWithBiLSTM(TextFieldEmbedder):
def __init__(self, embedder: TextFieldEmbedder, hidden_size: int):
super().__init__()
self.embedder = embedder
self.lstm = torch.nn.LSTM(
input_size=self.embedder.get_output_dim(),
hidden_size=hidden_size,
bidirectional=True,
batch_first=True,
num_layers=1, # Increasing num_layers can help but we want fair comparison
)
self.dropout = torch.nn.Dropout(p=0.5)
self.output_size = hidden_size * 2
def get_output_dim(self) -> int:
return self.output_size
def forward(self, *args, **kwargs) -> torch.Tensor:
x = self.embedder(*args, **kwargs)
x = x.squeeze(dim=0) # For some reason x.shape is (1, 1, seq_len, size)
x = self.dropout(x) # Seems to work best if dropout both before and after lstm
x, state = self.lstm(x)
x = self.dropout(x)
x = x.unsqueeze(dim=0)
return x
@Model.register("span_model")
class SpanModel(Model):
def __init__(
self,
vocab: Vocabulary,
embedder: TextFieldEmbedder,
modules, # TODO: Add type.
feature_size: int,
max_span_width: int,
target_task: str,
feedforward_params: Dict[str, Union[int, float]],
loss_weights: Dict[str, float],
initializer: InitializerApplicator = InitializerApplicator(),
module_initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
display_metrics: List[str] = None,
# New
use_ner_embeds: bool = None,
span_extractor_type: str = None,
use_double_mix_embedder: bool = None,
relation_head_type: str = "base",
use_span_width_embeds: bool = None,
use_bilstm_after_embedder: bool = False,
) -> None:
super(SpanModel, self).__init__(vocab, regularizer)
# New
info = dict(
use_ner_embeds=use_ner_embeds,
span_extractor_type=span_extractor_type,
use_double_mix_embedder=use_double_mix_embedder,
relation_head_type=relation_head_type,
use_span_width_embeds=use_span_width_embeds,
)
for k, v in info.items():
print(dict(locals=(k, v)))
assert v is not None, k
self.use_double_mix_embedder = use_double_mix_embedder
self.relation_head_type = relation_head_type
if use_bilstm_after_embedder:
embedder = TextEmbedderWithBiLSTM(embedder, hidden_size=300)
####################
assert span_extractor_type in {"endpoint", "attn", "max_pool", "mean_pool", "bi_affine"}
# Create span extractor.
if use_span_width_embeds:
self._endpoint_span_extractor = EndpointSpanExtractor(
embedder.get_output_dim(),
combination="x,y",
num_width_embeddings=max_span_width,
span_width_embedding_dim=feature_size,
bucket_widths=False,
)
# New
else:
self._endpoint_span_extractor = EndpointSpanExtractor(
embedder.get_output_dim(),
combination="x,y",
)
if span_extractor_type == "attn":
self._endpoint_span_extractor = SelfAttentiveSpanExtractor(
embedder.get_output_dim()
)
if span_extractor_type == "max_pool":
self._endpoint_span_extractor = MaxPoolSpanExtractor(
embedder.get_output_dim()
)
if span_extractor_type == "mean_pool":
self._endpoint_span_extractor = MeanPoolSpanExtractor(
embedder.get_output_dim()
)
if span_extractor_type == "bi_affine":
token_emb_dim = embedder.get_output_dim()
assert self._endpoint_span_extractor.get_output_dim() == token_emb_dim * 2
self._endpoint_span_extractor = BiAffineSpanExtractor(
endpoint_extractor=self._endpoint_span_extractor,
input_size=token_emb_dim,
project_size=200,
output_size=200,
)
self._visualize_outputs = []
####################
# Set parameters.
self._embedder = embedder
self._loss_weights = loss_weights
self._max_span_width = max_span_width
self._display_metrics = self._get_display_metrics(target_task)
token_emb_dim = self._embedder.get_output_dim()
span_emb_dim = self._endpoint_span_extractor.get_output_dim()
# New
self._feature_size = feature_size
####################
# Create submodules.
modules = Params(modules)
# Helper function to create feedforward networks.
def make_feedforward(input_dim):
return FeedForward(
input_dim=input_dim,
num_layers=feedforward_params["num_layers"],
hidden_dims=feedforward_params["hidden_dims"],
activations=torch.nn.ReLU(),
dropout=feedforward_params["dropout"],
)
# Submodules
self._ner = NERTagger.from_params(
vocab=vocab,
make_feedforward=make_feedforward,
span_emb_dim=span_emb_dim,
feature_size=feature_size,
params=modules.pop("ner"),
)
# New
self.use_ner_embeds = use_ner_embeds
if self.use_ner_embeds:
num_ner_labels = sorted(self._ner._n_labels.values())[0]
self._ner_embedder = torch.nn.Linear(num_ner_labels, feature_size)
span_emb_dim += feature_size
params = dict(
vocab=vocab,
make_feedforward=make_feedforward,
span_emb_dim=span_emb_dim,
feature_size=feature_size,
params=modules.pop("relation"),
)
if self.relation_head_type == "proper":
self._relation = ProperRelationExtractor.from_params(**params)
else:
raise ValueError(f"Unknown: {dict(relation_head_type=relation_head_type)}")
####################
# Initialize text embedder and all submodules
for module in [self._ner, self._relation]:
module_initializer(module)
initializer(self)
@staticmethod
def _get_display_metrics(target_task):
"""
The `target` is the name of the task used to make early stopping decisions. Show metrics
related to this task.
"""
lookup = {
"ner": [
f"MEAN__{name}" for name in ["ner_precision", "ner_recall", "ner_f1"]
],
"relation": [
f"MEAN__{name}"
for name in ["relation_precision", "relation_recall", "relation_f1"]
],
}
if target_task not in lookup:
raise ValueError(
f"Invalied value {target_task} has been given as the target task."
)
return lookup[target_task]
@staticmethod
def _debatch(x):
# TODO: Get rid of this when I find a better way to do it.
return x if x is None else x.squeeze(0)
def text_to_span_embeds(self, text_embeddings: torch.Tensor, spans):
# Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size)
span_embeddings = self._endpoint_span_extractor(text_embeddings, spans)
return span_embeddings
@overrides
def forward(
self,
text,
spans,
metadata,
ner_labels=None,
relation_labels=None,
dep_graph_labels=None, # New
tag_labels=None, # New
grid_labels=None, # New
):
"""
TODO: change this.
"""
# In AllenNLP, AdjacencyFields are passed in as floats. This fixes it.
if relation_labels is not None:
relation_labels = relation_labels.long()
# TODO: Multi-document minibatching isn't supported yet. For now, get rid of the
# extra dimension in the input tensors. Will return to this once the model runs.
if len(metadata) > 1:
raise NotImplementedError("Multi-document minibatching not yet supported.")
metadata = metadata[0]
spans = self._debatch(spans) # (n_sents, max_n_spans, 2)
ner_labels = self._debatch(ner_labels) # (n_sents, max_n_spans)
relation_labels = self._debatch(
relation_labels
) # (n_sents, max_n_spans, max_n_spans)
# Encode using BERT, then debatch.
# Since the data are batched, we use `num_wrapping_dims=1` to unwrap the document dimension.
# (1, n_sents, max_sententence_length, embedding_dim)
# TODO: Deal with the case where the input is longer than 512.
text_embeddings = self._embedder(text, num_wrapping_dims=1)
# (n_sents, max_n_wordpieces, embedding_dim)
text_embeddings = self._debatch(text_embeddings)
# (n_sents, max_sentence_length)
text_mask = self._debatch(
util.get_text_field_mask(text, num_wrapping_dims=1).float()
)
sentence_lengths = text_mask.sum(dim=1).long() # (n_sents)
span_mask = (spans[:, :, 0] >= 0).float() # (n_sents, max_n_spans)
# SpanFields return -1 when they are used as padding. As we do some comparisons based on
# span widths when we attend over the span representations that we generate from these
# indices, we need them to be <= 0. This is only relevant in edge cases where the number of
# spans we consider after the pruning stage is >= the total number of spans, because in this
# case, it is possible we might consider a masked span.
spans = F.relu(spans.float()).long() # (n_sents, max_n_spans, 2)
# New
text_embeds_b = text_embeddings
if self.use_double_mix_embedder:
# DoubleMixPTMEmbedder has to output single concatenated tensor so we need to split
embed_dim = self._embedder.get_output_dim()
assert text_embeddings.shape[-1] == embed_dim * 2
text_embeddings, text_embeds_b = text_embeddings[..., :embed_dim], text_embeddings[..., embed_dim:]
kwargs = dict(spans=spans)
span_embeddings = self.text_to_span_embeds(text_embeddings, **kwargs)
span_embeds_b = self.text_to_span_embeds(text_embeds_b, **kwargs)
# Make calls out to the modules to get results.
output_ner = {"loss": 0}
output_relation = {"loss": 0}
# Make predictions and compute losses for each module
if self._loss_weights["ner"] > 0:
output_ner = self._ner(
spans,
span_mask,
span_embeddings,
sentence_lengths,
ner_labels,
metadata,
)
ner_scores = output_ner.pop("ner_scores")
# New
if self._loss_weights["relation"] > 0:
if getattr(self._relation, "use_ner_scores_for_prune", False):
self._relation._ner_scores = ner_scores
self._relation._opinion_scores = output_ner["opinion_scores"]
self._relation._target_scores = output_ner["target_scores"]
self._relation._text_mask = text_mask
self._relation._text_embeds = text_embeddings
if getattr(self._relation, "use_span_loss_for_pruners", False):
self._relation._ner_labels = ner_labels
output_relation = self._relation(
spans,
span_mask,
# span_embeddings,
span_embeds_b,
sentence_lengths,
relation_labels,
metadata,
)
# Use `get` since there are some cases where the output dict won't have a loss - for
# instance, when doing prediction.
loss = (
+ self._loss_weights["ner"] * output_ner.get("loss", 0)
+ self._loss_weights["relation"] * output_relation.get("loss", 0)
)
# Multiply the loss by the weight multiplier for this document.
weight = metadata.weight if metadata.weight is not None else 1.0
loss *= torch.tensor(weight)
output_dict = dict(
relation=output_relation,
ner=output_ner,
)
output_dict["loss"] = loss
output_dict["metadata"] = metadata
return output_dict
def update_span_embeddings(
self,
span_embeddings,
span_mask,
top_span_embeddings,
top_span_mask,
top_span_indices,
):
# TODO(Ulme) Speed this up by tensorizing
new_span_embeddings = span_embeddings.clone()
for sample_nr in range(len(top_span_mask)):
for top_span_nr, span_nr in enumerate(top_span_indices[sample_nr]):
if (
top_span_mask[sample_nr, top_span_nr] == 0
or span_mask[sample_nr, span_nr] == 0
):
break
new_span_embeddings[sample_nr, span_nr] = top_span_embeddings[
sample_nr, top_span_nr
]
return new_span_embeddings
@overrides
def make_output_human_readable(self, output_dict: Dict[str, torch.Tensor]):
"""
Converts the list of spans and predicted antecedent indices into clusters
of spans for each element in the batch.
Parameters
----------
output_dict : ``Dict[str, torch.Tensor]``, required.
The result of calling :func:`forward` on an instance or batch of instances.
"""
doc = copy.deepcopy(output_dict["metadata"])
if self._loss_weights["ner"] > 0:
for predictions, sentence in zip(output_dict["ner"]["predictions"], doc):
sentence.predicted_ner = predictions
if self._loss_weights["relation"] > 0:
for predictions, sentence in zip(
output_dict["relation"]["predictions"], doc
):
sentence.predicted_relations = predictions
return doc
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
"""
Get all metrics from all modules. For the ones that shouldn't be displayed, prefix their
keys with an underscore.
"""
metrics_ner = self._ner.get_metrics(reset=reset)
metrics_relation = self._relation.get_metrics(reset=reset)
# Make sure that there aren't any conflicting names.
metric_names = (
list(metrics_ner.keys())
+ list(metrics_relation.keys())
)
assert len(set(metric_names)) == len(metric_names)
all_metrics = dict(
list(metrics_ner.items())
+ list(metrics_relation.items())
)
# If no list of desired metrics given, display them all.
if self._display_metrics is None:
return all_metrics
# Otherwise only display the selected ones.
res = {}
for k, v in all_metrics.items():
if k in self._display_metrics:
res[k] = v
else:
new_k = "_" + k
res[new_k] = v
return res
| 17,657 | 35.941423 | 111 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/models/entity_beam_pruner.py | """
This is basically a copy of AllenNLP's Pruner module, but with support for entity beams.
"""
from typing import Tuple, Union
from overrides import overrides
import torch
from allennlp.nn import util
from allennlp.modules import TimeDistributed
def make_pruner(scorer, entity_beam=False, gold_beam=False):
"""
Create a pruner that either takes outputs of other scorers (i.e. entity beam), or uses its own
scorer (the `default_scorer`).
"""
item_scorer = torch.nn.Sequential(
TimeDistributed(scorer),
TimeDistributed(torch.nn.Linear(scorer.get_output_dim(), 1)),
)
min_score_to_keep = 1e-10 if entity_beam else None
return Pruner(item_scorer, entity_beam, gold_beam, min_score_to_keep)
class Pruner(torch.nn.Module):
"""
This module scores and prunes items in a list using a parameterised scoring function and a
threshold.
Parameters
----------
scorer : ``torch.nn.Module``, required.
A module which, given a tensor of shape (batch_size, num_items, embedding_size),
produces a tensor of shape (batch_size, num_items, 1), representing a scalar score
per item in the tensor.
entity_beam: bool, optional.
If True, use class scores output from another module instead of using own scorer.
gold_beam: bool, optional.
If True, use gold arguments.
min_score_to_keep : float, optional.
If given, only keep items that score at least this high.
"""
def __init__(
self,
scorer: torch.nn.Module,
entity_beam: bool = False,
gold_beam: bool = False,
min_score_to_keep: float = None,
use_external_score: bool = False,
) -> None:
super().__init__()
# If gold beam is on, then entity beam must be off and min_score_to_keep must be None.
assert not (gold_beam and ((min_score_to_keep is not None) or entity_beam))
self._scorer = scorer
self._entity_beam = entity_beam
self._gold_beam = gold_beam
self._min_score_to_keep = min_score_to_keep
self._use_external_score = use_external_score
if self._use_external_score:
self._scorer = None
self._scores = None
def set_external_score(self, x: torch.Tensor):
self._scores = x
@overrides
def forward(
self, # pylint: disable=arguments-differ
embeddings: torch.FloatTensor,
mask: torch.LongTensor,
num_items_to_keep: Union[int, torch.LongTensor],
class_scores: torch.FloatTensor = None,
gold_labels: torch.long = None,
extra_scores: torch.FloatTensor = None, # Scores to add to scorer output
) -> Tuple[
torch.FloatTensor, torch.LongTensor, torch.LongTensor, torch.FloatTensor
]:
"""
Extracts the top-k scoring items with respect to the scorer. We additionally return
the indices of the top-k in their original order, not ordered by score, so that downstream
components can rely on the original ordering (e.g., for knowing what spans are valid
antecedents in a coreference resolution model). May use the same k for all sentences in
minibatch, or different k for each.
Parameters
----------
embeddings : ``torch.FloatTensor``, required.
A tensor of shape (batch_size, num_items, embedding_size), containing an embedding for
each item in the list that we want to prune.
mask : ``torch.LongTensor``, required.
A tensor of shape (batch_size, num_items), denoting unpadded elements of
``embeddings``.
num_items_to_keep : ``Union[int, torch.LongTensor]``, required.
If a tensor of shape (batch_size), specifies the number of items to keep for each
individual sentence in minibatch.
If an int, keep the same number of items for all sentences.
class_scores:
Class scores to be used with entity beam.
candidate_labels: If in debugging mode, use gold labels to get beam.
Returns
-------
top_embeddings : ``torch.FloatTensor``
The representations of the top-k scoring items.
Has shape (batch_size, max_num_items_to_keep, embedding_size).
top_mask : ``torch.LongTensor``
The corresponding mask for ``top_embeddings``.
Has shape (batch_size, max_num_items_to_keep).
top_indices : ``torch.IntTensor``
The indices of the top-k scoring items into the original ``embeddings``
tensor. This is returned because it can be useful to retain pointers to
the original items, if each item is being scored by multiple distinct
scorers, for instance. Has shape (batch_size, max_num_items_to_keep).
top_item_scores : ``torch.FloatTensor``
The values of the top-k scoring items.
Has shape (batch_size, max_num_items_to_keep, 1).
num_items_kept
"""
# If an int was given for number of items to keep, construct tensor by repeating the value.
if isinstance(num_items_to_keep, int):
batch_size = mask.size(0)
# Put the tensor on same device as the mask.
num_items_to_keep = num_items_to_keep * torch.ones(
[batch_size], dtype=torch.long, device=mask.device
)
mask = mask.unsqueeze(-1)
num_items = embeddings.size(1)
num_items_to_keep = torch.clamp(num_items_to_keep, max=num_items)
# Shape: (batch_size, num_items, 1)
# If entity beam is one, use the class scores. Else ignore them and use the scorer.
if self._entity_beam:
scores, _ = class_scores.max(dim=-1)
scores = scores.unsqueeze(-1)
# If gold beam is one, give a score of 0 wherever the gold label is non-zero (indicating a
# non-null label), otherwise give a large negative number.
elif self._gold_beam:
scores = torch.where(
gold_labels > 0,
torch.zeros_like(gold_labels, dtype=torch.float),
-1e20 * torch.ones_like(gold_labels, dtype=torch.float),
)
scores = scores.unsqueeze(-1)
else:
if self._use_external_score:
scores = self._scores
else:
scores = self._scorer(embeddings)
if extra_scores is not None:
# Assumes extra_scores is already in [0, 1] range
scores = scores.sigmoid() + extra_scores
# If we're only keeping items that score above a given threshold, change the number of kept
# items here.
if self._min_score_to_keep is not None:
num_good_items = torch.sum(
scores > self._min_score_to_keep, dim=1
).squeeze()
num_items_to_keep = torch.min(num_items_to_keep, num_good_items)
# If gold beam is on, keep the gold items.
if self._gold_beam:
num_items_to_keep = torch.sum(gold_labels > 0, dim=1)
# Always keep at least one item to avoid edge case with empty matrix.
max_items_to_keep = max(num_items_to_keep.max().item(), 1)
if scores.size(-1) != 1 or scores.dim() != 3:
raise ValueError(
f"The scorer passed to Pruner must produce a tensor of shape"
f"(batch_size, num_items, 1), but found shape {scores.size()}"
)
# Make sure that we don't select any masked items by setting their scores to be very
# negative. These are logits, typically, so -1e20 should be plenty negative.
# NOTE(`mask` needs to be a byte tensor now.)
scores = util.replace_masked_values(scores, mask.bool(), -1e20)
# Shape: (batch_size, max_num_items_to_keep, 1)
_, top_indices = scores.topk(max_items_to_keep, 1)
# Mask based on number of items to keep for each sentence.
# Shape: (batch_size, max_num_items_to_keep)
top_indices_mask = util.get_mask_from_sequence_lengths(
num_items_to_keep, max_items_to_keep
)
top_indices_mask = top_indices_mask.bool()
# Shape: (batch_size, max_num_items_to_keep)
top_indices = top_indices.squeeze(-1)
# Fill all masked indices with largest "top" index for that sentence, so that all masked
# indices will be sorted to the end.
# Shape: (batch_size, 1)
fill_value, _ = top_indices.max(dim=1)
fill_value = fill_value.unsqueeze(-1)
# Shape: (batch_size, max_num_items_to_keep)
top_indices = torch.where(top_indices_mask, top_indices, fill_value)
# Now we order the selected indices in increasing order with
# respect to their indices (and hence, with respect to the
# order they originally appeared in the ``embeddings`` tensor).
top_indices, _ = torch.sort(top_indices, 1)
# Shape: (batch_size * max_num_items_to_keep)
# torch.index_select only accepts 1D indices, but here
# we need to select items for each element in the batch.
flat_top_indices = util.flatten_and_batch_shift_indices(top_indices, num_items)
# Shape: (batch_size, max_num_items_to_keep, embedding_size)
top_embeddings = util.batched_index_select(
embeddings, top_indices, flat_top_indices
)
# Combine the masks on spans that are out-of-bounds, and the mask on spans that are outside
# the top k for each sentence.
# Shape: (batch_size, max_num_items_to_keep)
sequence_mask = util.batched_index_select(mask, top_indices, flat_top_indices)
sequence_mask = sequence_mask.squeeze(-1).bool()
top_mask = top_indices_mask & sequence_mask
top_mask = top_mask.long()
# Shape: (batch_size, max_num_items_to_keep, 1)
top_scores = util.batched_index_select(scores, top_indices, flat_top_indices)
return top_embeddings, top_mask, top_indices, top_scores, num_items_to_keep
class TwoScorePruner(torch.nn.Module):
"""
Output has 2 columns instead of 1
So that we have a invalid/valid score for spans separately
This way we can add the "invalid" span score to "invalid" relation score
And add the "valid" span score to pos/neg/neu relation score
Internally we normalize both columns and take 1 for top-k sorting
But output span scores should be un-normalized logits
"""
def __init__(self, scorer: torch.nn.Module) -> None:
super().__init__()
self._scorer = scorer
self.output_size = 2
@overrides
def forward(
self, # pylint: disable=arguments-differ
embeddings: torch.FloatTensor,
mask: torch.LongTensor,
num_items_to_keep: Union[int, torch.LongTensor],
) -> Tuple[
torch.FloatTensor, torch.LongTensor, torch.LongTensor, torch.FloatTensor
]:
# If an int was given for number of items to keep, construct tensor by repeating the value.
if isinstance(num_items_to_keep, int):
batch_size = mask.size(0)
# Put the tensor on same device as the mask.
num_items_to_keep = num_items_to_keep * torch.ones(
[batch_size], dtype=torch.long, device=mask.device
)
mask = mask.unsqueeze(-1)
num_items = embeddings.size(1)
output_scores = self._scorer(embeddings)
assert output_scores.shape[-1] == self.output_size
scores = output_scores.softmax(dim=-1)[..., [1]] # Normalize for sorting
# Always keep at least one item to avoid edge case with empty matrix.
max_items_to_keep = max(num_items_to_keep.max().item(), 1)
if scores.size(-1) != 1 or scores.dim() != 3:
raise ValueError(
f"The scorer passed to Pruner must produce a tensor of shape"
f"(batch_size, num_items, 1), but found shape {scores.size()}"
)
# Make sure that we don't select any masked items by setting their scores to be very
# negative. These are logits, typically, so -1e20 should be plenty negative.
# NOTE(`mask` needs to be a byte tensor now.)
scores = util.replace_masked_values(scores, mask.bool(), -1e20)
# Shape: (batch_size, max_num_items_to_keep, 1)
_, top_indices = scores.topk(max_items_to_keep, 1)
# Mask based on number of items to keep for each sentence.
# Shape: (batch_size, max_num_items_to_keep)
top_indices_mask = util.get_mask_from_sequence_lengths(
num_items_to_keep, max_items_to_keep
)
top_indices_mask = top_indices_mask.bool()
# Shape: (batch_size, max_num_items_to_keep)
top_indices = top_indices.squeeze(-1)
# Fill all masked indices with largest "top" index for that sentence, so that all masked
# indices will be sorted to the end.
# Shape: (batch_size, 1)
fill_value, _ = top_indices.max(dim=1)
fill_value = fill_value.unsqueeze(-1)
# Shape: (batch_size, max_num_items_to_keep)
top_indices = torch.where(top_indices_mask, top_indices, fill_value)
# Now we order the selected indices in increasing order with
# respect to their indices (and hence, with respect to the
# order they originally appeared in the ``embeddings`` tensor).
top_indices, _ = torch.sort(top_indices, 1)
# Shape: (batch_size * max_num_items_to_keep)
# torch.index_select only accepts 1D indices, but here
# we need to select items for each element in the batch.
flat_top_indices = util.flatten_and_batch_shift_indices(top_indices, num_items)
# Shape: (batch_size, max_num_items_to_keep, embedding_size)
top_embeddings = util.batched_index_select(
embeddings, top_indices, flat_top_indices
)
# Combine the masks on spans that are out-of-bounds, and the mask on spans that are outside
# the top k for each sentence.
# Shape: (batch_size, max_num_items_to_keep)
sequence_mask = util.batched_index_select(mask, top_indices, flat_top_indices)
sequence_mask = sequence_mask.squeeze(-1).bool()
top_mask = top_indices_mask & sequence_mask
top_mask = top_mask.long()
# Shape: (batch_size, max_num_items_to_keep, 1)
top_scores = util.batched_index_select(output_scores, top_indices, flat_top_indices)
return top_embeddings, top_mask, top_indices, top_scores, num_items_to_keep
class ClassifyMaskPruner(Pruner):
def __init__(self, scorer: torch.nn.Module, threshold=0.5, **kwargs):
super().__init__(scorer, **kwargs)
self._threshold = threshold
@overrides
def forward(
self, # pylint: disable=arguments-differ
embeddings: torch.FloatTensor,
mask: torch.LongTensor,
num_items_to_keep: Union[int, torch.LongTensor],
class_scores: torch.FloatTensor = None,
gold_labels: torch.long = None,
extra_scores: torch.FloatTensor = None, # Scores to add to scorer output
) -> Tuple[
torch.FloatTensor, torch.LongTensor, torch.LongTensor, torch.FloatTensor
]:
mask = mask.unsqueeze(-1)
scores = self._scorer(embeddings)
bs, num_items, size = scores.shape
assert size == 1
if extra_scores is not None:
# Assumes extra_scores is already in [0, 1] range
scores = scores.sigmoid() + extra_scores
# Make sure that we don't select any masked items by setting their scores to be very
# negative. These are logits, typically, so -1e20 should be plenty negative.
# NOTE(`mask` needs to be a byte tensor now.)
scores = util.replace_masked_values(scores, mask.bool(), -1e20)
keep = torch.gt(scores.sigmoid(), self._threshold).long()
num_items_to_keep = keep.sum(dim=1).view(bs)
num_items_to_keep = torch.clamp(num_items_to_keep, min=1)
# import logging
# logging.info(dict(num_items_to_keep=num_items_to_keep))
# Always keep at least one item to avoid edge case with empty matrix.
max_items_to_keep = max(num_items_to_keep.max().item(), 1)
# Shape: (batch_size, max_num_items_to_keep, 1)
_, top_indices = scores.topk(max_items_to_keep, 1)
# Mask based on number of items to keep for each sentence.
# Shape: (batch_size, max_num_items_to_keep)
top_indices_mask = util.get_mask_from_sequence_lengths(
num_items_to_keep, max_items_to_keep
)
top_indices_mask = top_indices_mask.bool()
# Shape: (batch_size, max_num_items_to_keep)
top_indices = top_indices.squeeze(-1)
# Fill all masked indices with largest "top" index for that sentence, so that all masked
# indices will be sorted to the end.
# Shape: (batch_size, 1)
fill_value, _ = top_indices.max(dim=1)
fill_value = fill_value.unsqueeze(-1)
# Shape: (batch_size, max_num_items_to_keep)
top_indices = torch.where(top_indices_mask, top_indices, fill_value)
# Now we order the selected indices in increasing order with
# respect to their indices (and hence, with respect to the
# order they originally appeared in the ``embeddings`` tensor).
top_indices, _ = torch.sort(top_indices, 1)
# Shape: (batch_size * max_num_items_to_keep)
# torch.index_select only accepts 1D indices, but here
# we need to select items for each element in the batch.
flat_top_indices = util.flatten_and_batch_shift_indices(top_indices, num_items)
# Shape: (batch_size, max_num_items_to_keep, embedding_size)
top_embeddings = util.batched_index_select(
embeddings, top_indices, flat_top_indices
)
# Combine the masks on spans that are out-of-bounds, and the mask on spans that are outside
# the top k for each sentence.
# Shape: (batch_size, max_num_items_to_keep)
sequence_mask = util.batched_index_select(mask, top_indices, flat_top_indices)
sequence_mask = sequence_mask.squeeze(-1).bool()
top_mask = top_indices_mask & sequence_mask
top_mask = top_mask.long()
# Shape: (batch_size, max_num_items_to_keep, 1)
top_scores = util.batched_index_select(scores, top_indices, flat_top_indices)
return top_embeddings, top_mask, top_indices, top_scores, num_items_to_keep
| 18,631 | 43.46778 | 99 | py |
DMASTE | DMASTE-main/Span-ASTE/aste/main.py | import json
import shutil
import time
from os import remove
from pathlib import Path
from typing import List, Tuple, Optional
import _jsonnet # noqa
import pandas as pd
from fire import Fire
from pydantic import BaseModel
from data_utils import (
LabelEnum,
SplitEnum,
Sentence,
SentimentTriple,
Data,
ResultAnalyzer,
)
from evaluation import nereval, LinearInstance, FScore
from utils import Shell, hash_text, update_nested_dict
class SpanModelDocument(BaseModel):
sentences: List[List[str]]
ner: List[List[Tuple[int, int, str]]]
relations: List[List[Tuple[int, int, int, int, str]]]
doc_key: str
@property
def is_valid(self) -> bool:
return len(set(map(len, [self.sentences, self.ner, self.relations]))) == 1
@classmethod
def from_sentence(cls, x: Sentence):
ner: List[Tuple[int, int, str]] = []
for t in x.triples:
ner.append((t.o_start, t.o_end, LabelEnum.opinion))
ner.append((t.t_start, t.t_end, LabelEnum.target))
ner = sorted(set(ner), key=lambda n: n[0])
relations = [
(t.o_start, t.o_end, t.t_start, t.t_end, t.label) for t in x.triples
]
return cls(
sentences=[x.tokens],
ner=[ner],
relations=[relations],
doc_key=str(x.id),
)
class SpanModelPrediction(SpanModelDocument):
predicted_ner: List[List[Tuple[int, int, LabelEnum, float, float]]] = [
[]
] # If loss_weights["ner"] == 0.0
predicted_relations: List[List[Tuple[int, int, int, int, LabelEnum, float, float]]]
def to_sentence(self) -> Sentence:
for lst in [self.sentences, self.predicted_ner, self.predicted_relations]:
assert len(lst) == 1
triples = [
SentimentTriple(o_start=os, o_end=oe, t_start=ts, t_end=te, label=label)
for os, oe, ts, te, label, value, prob in self.predicted_relations[0]
]
return Sentence(
id=int(self.doc_key),
tokens=self.sentences[0],
pos=[],
weight=1,
is_labeled=False,
triples=triples,
spans=[lst[:3] for lst in self.predicted_ner[0]],
)
def update_instance(self, x: LinearInstance) -> LinearInstance:
x.set_prediction(self.to_sentence().to_instance().output)
return x
class SpanModelData(BaseModel):
root: Path
data_split: SplitEnum
documents: Optional[List[SpanModelDocument]]
@classmethod
def read(cls, path: Path) -> List[SpanModelDocument]:
docs = []
with open(path) as f:
for line in f:
line = line.strip()
raw: dict = json.loads(line)
docs.append(SpanModelDocument(**raw))
return docs
def load(self):
if self.documents is None:
path = self.root / f"{self.data_split}.json"
self.documents = self.read(path)
def dump(self, path: Path, sep="\n"):
for d in self.documents:
assert d.is_valid
with open(path, "w") as f:
f.write(sep.join([d.json() for d in self.documents]))
assert all(
[a.dict() == b.dict() for a, b in zip(self.documents, self.read(path))]
)
@classmethod
def from_data(cls, x: Data):
data = cls(root=x.root, data_split=x.data_split)
data.documents = [SpanModelDocument.from_sentence(s) for s in x.sentences]
return data
class SpanModelConfigMaker(BaseModel):
root: Path = Path("/tmp/config_maker")
def run(self, path_in: Path, **kwargs) -> Path:
self.root.mkdir(exist_ok=True)
path_out = self.root / path_in.name
config = json.loads(_jsonnet.evaluate_file(str(path_in)))
assert isinstance(config, dict)
for key, value in kwargs.items():
config = update_nested_dict(config, key, value)
with open(path_out, "w") as f:
f.write(json.dumps(config, indent=2))
return path_out
class SpanModelTrainer(BaseModel):
root: Path
train_kwargs: dict
path_config: Path = Path("training_config/aste.jsonnet").resolve()
repo_span_model: Path = Path(".").resolve()
output_dir: Optional[Path]
model_path: Optional[Path]
data_name: Optional[str]
task_name: Optional[str]
@property
def name(self) -> str:
hash_id = hash_text(str(self.train_kwargs))
return "_".join([self.task_name, self.data_name, hash_id])
def load(self, overwrite: bool):
if self.data_name is None:
self.data_name = self.root.stem
if self.task_name is None:
self.task_name = self.path_config.stem
if self.model_path is None:
self.model_path = Path(f"models/{self.name}/model.tar.gz")
if self.output_dir is None:
self.output_dir = Path(f"model_outputs/{self.name}")
if self.model_path.parent.exists() and overwrite:
print(dict(rmtree=self.model_path.parent))
shutil.rmtree(self.model_path.parent)
if self.output_dir.exists() and overwrite:
print(dict(rmtree=self.output_dir))
shutil.rmtree(self.output_dir)
self.output_dir.mkdir(exist_ok=True, parents=True)
print(self.json(indent=2))
def get_processed_data_path(self, data_split: SplitEnum) -> Path:
# Should match the path in .jsonnet config file
return self.output_dir / f"{data_split}.json"
def get_predict_path(self, data_split: SplitEnum) -> Path:
return self.output_dir / f"predict_{data_split}.jsonl"
def setup_data(self):
for data_split in [SplitEnum.train, SplitEnum.dev, SplitEnum.test]:
data = Data(root=self.root, data_split=data_split)
data.load()
new = SpanModelData.from_data(data)
new.dump(self.get_processed_data_path(data_split))
def train(self, overwrite=True):
self.load(overwrite=overwrite)
if overwrite and self.model_path.exists():
return
self.setup_data()
kwargs = dict(self.train_kwargs)
data_map = dict(
train_data_path=SplitEnum.train,
validation_data_path=SplitEnum.dev,
test_data_path=SplitEnum.test,
)
for k, v in data_map.items():
kwargs[k] = str(self.get_processed_data_path(v).resolve())
kwargs.setdefault("seed", 0) # A bit sneaky to put "seed" in **kwargs but this is surgical
seed = kwargs.pop("seed")
for key in ["random_seed", "numpy_seed", "pytorch_seed"]:
kwargs[key] = seed
config_maker = SpanModelConfigMaker(root=self.output_dir)
path_config = config_maker.run(self.path_config, **kwargs).resolve()
shell = Shell()
shell.run(
f"cd {self.repo_span_model} && allennlp train {path_config}",
serialization_dir=self.model_path.parent,
include_package="span_model",
)
assert self.model_path.exists()
def predict(self, data_split: SplitEnum) -> Path:
self.load(overwrite=False)
path = self.get_predict_path(data_split)
if path.exists():
remove(path)
shell = Shell()
shell.run(
f"cd {self.repo_span_model} && allennlp predict {self.model_path}",
self.get_processed_data_path(data_split),
predictor="span_model",
include_package="span_model",
use_dataset_reader="",
output_file=path,
cuda_device=self.train_kwargs["trainer__cuda_device"],
silent="",
)
return path
def eval(self, data_split: SplitEnum) -> FScore:
data = Data(root=self.root, data_split=data_split)
data.load()
instances = [s.to_instance() for s in data.sentences]
path = self.predict(data_split)
with open(path) as f:
preds = [SpanModelPrediction(**json.loads(line.strip())) for line in f]
for i, p in zip(instances, preds):
p.update_instance(i)
pred_sents = [p.to_sentence() for p in preds]
for name, sents in dict(pred=pred_sents, gold=data.sentences).items():
path_out = self.output_dir / f"sentences_{data_split}_{name}.json"
print(dict(path_out=path_out))
with open(path_out, "w") as f:
f.write("\n".join([s.json() for s in sents]))
scorer = nereval()
analyzer = ResultAnalyzer()
analyzer.run(pred=pred_sents, gold=data.sentences)
return scorer.eval(instances) # noqa
def main_single(path: Path, overwrite=False, **kwargs):
trainer = SpanModelTrainer(root=path.resolve(), train_kwargs=kwargs)
trainer.train(overwrite=overwrite)
scores = {}
for data_split in [SplitEnum.dev, SplitEnum.test]:
scores[data_split] = trainer.eval(data_split=data_split)
return scores
def main(
root="aste/data/triplet_data",
names=("14lap",),
seeds=(0,),
sep=",",
name_out="results",
**kwargs,
):
print(json.dumps(locals(), indent=2))
records = {}
names = names if type(names) in {tuple, list} else names.split(sep)
paths = [Path(root) / n for n in names]
assert all([p.exists() for p in paths])
assert len(seeds) == len(paths)
for i, p in enumerate(paths):
start = time.time()
scores = main_single(p, overwrite=True, seed=seeds[i], **kwargs)
duration = time.time() - start
for k, v in scores.items():
row = dict(name=p.stem, k=k, score=str(v), duration=duration)
records.setdefault(k, []).append(row)
df = pd.DataFrame(records[k])
print(df)
path = Path(f"{name_out}_{k}.csv")
path.parent.mkdir(exist_ok=True)
df.to_csv(path, index=False)
print(dict(path_results=path))
if __name__ == "__main__":
Fire(main)
| 9,999 | 33.129693 | 99 | py |
DMASTE | DMASTE-main/Span-ASTE/aste/wrapper.py | import json
import os
from pathlib import Path
from typing import List
import _jsonnet
from fire import Fire
from pydantic import BaseModel
from tqdm import tqdm
from data_utils import Data, SentimentTriple, SplitEnum
from main import SpanModelData, SpanModelPrediction
from utils import Shell, safe_divide
class SpanModel(BaseModel):
save_dir: str
random_seed: int
path_config_base: str = "training_config/config.jsonnet"
def save_temp_data(self, path_in: str, name: str, is_test: bool = False) -> Path:
path_temp = Path(self.save_dir) / "temp_data" / f"{name}.json"
path_temp = path_temp.resolve()
path_temp.parent.mkdir(exist_ok=True, parents=True)
data = Data.load_from_full_path(path_in)
if is_test:
# SpanModel error if s.triples is empty list
assert data.sentences is not None
for s in data.sentences:
s.triples = [SentimentTriple.make_dummy()]
span_data = SpanModelData.from_data(data)
span_data.dump(path_temp)
return path_temp
def fit(self, path_train: str, path_dev: str, random_seed):
self.random_seed = random_seed
weights_dir = Path(self.save_dir) / "weights"
weights_dir.mkdir(exist_ok=True, parents=True)
print(dict(weights_dir=weights_dir))
path_config = Path(self.save_dir) / "config.jsonnet"
config = json.loads(_jsonnet.evaluate_file(self.path_config_base))
for key in ["random_seed", "pytorch_seed", "numpy_seed"]:
assert key in config.keys()
config[key] = self.random_seed
print({key: self.random_seed})
for name, path in dict(
train=path_train, validation=path_dev, test=path_dev
).items():
key = f"{name}_data_path"
assert key in config.keys()
path_temp = self.save_temp_data(path, name)
config[key] = str(path_temp)
print({key: path_temp})
with open(path_config, "w") as f:
f.write(json.dumps(config, indent=2))
print(dict(path_config=path_config))
shell = Shell()
work_dir = Path(".").resolve()
shell.run(
f"cd {work_dir} && allennlp train {path_config}",
serialization_dir=str(weights_dir),
include_package="span_model",
)
def predict(self, path_in: str, path_out: str, device=0):
work_dir = Path(".").resolve()
path_model = Path(self.save_dir) / "weights" / "model.tar.gz"
path_temp_in = self.save_temp_data(path_in, "pred_in", is_test=True)
path_temp_out = Path(self.save_dir) / "temp_data" / "pred_out.json"
if path_temp_out.exists():
os.remove(path_temp_out)
shell = Shell()
shell.run(
f"cd {work_dir} && allennlp predict {path_model}",
str(path_temp_in),
predictor="span_model",
include_package="span_model",
use_dataset_reader="",
output_file=str(path_temp_out),
cuda_device=0,
silent="",
)
with open(path_temp_out) as f:
preds = [SpanModelPrediction(**json.loads(line.strip())) for line in f]
data = Data(
root=Path(),
data_split=SplitEnum.test,
sentences=[p.to_sentence() for p in preds],
)
data.save_to_path(path_out)
def score(self, path_pred: str, path_gold: str) -> dict:
pred = Data.load_from_full_path(path_pred)
gold = Data.load_from_full_path(path_gold)
assert pred.sentences is not None
assert gold.sentences is not None
assert len(pred.sentences) == len(gold.sentences)
num_pred = 0
num_gold = 0
num_correct = 0
for i in range(len(gold.sentences)):
num_pred += len(pred.sentences[i].triples)
num_gold += len(gold.sentences[i].triples)
for p in pred.sentences[i].triples:
for g in gold.sentences[i].triples:
if p.dict() == g.dict():
num_correct += 1
precision = safe_divide(num_correct, num_pred)
recall = safe_divide(num_correct, num_gold)
info = dict(
path_pred=path_pred,
path_gold=path_gold,
precision=precision,
recall=recall,
score=safe_divide(2 * precision * recall, precision + recall),
)
return info
def run_train(path_train: str, path_dev: str, save_dir: str, random_seed: int):
print(dict(run_train=locals()))
if Path(save_dir).exists():
return
model = SpanModel(save_dir=save_dir, random_seed=random_seed)
model.fit(path_train, path_dev)
def run_train_many(save_dir_template: str, random_seeds: List[int], **kwargs):
for seed in tqdm(random_seeds):
save_dir = save_dir_template.format(seed)
run_train(save_dir=save_dir, random_seed=seed, **kwargs)
def run_eval(path_test: str, save_dir: str):
print(dict(run_eval=locals()))
model = SpanModel(save_dir=save_dir, random_seed=0)
path_pred = str(Path(save_dir) / "pred.txt")
model.predict(path_test, path_pred)
results = model.score(path_pred, path_test)
print(results)
return results
def run_eval_many(save_dir_template: str, random_seeds: List[int], **kwargs):
results = []
for seed in tqdm(random_seeds):
save_dir = save_dir_template.format(seed)
results.append(run_eval(save_dir=save_dir, **kwargs))
precision = sum(r["precision"] for r in results) / len(random_seeds)
recall = sum(r["recall"] for r in results) / len(random_seeds)
score = safe_divide(2 * precision * recall, precision + recall)
print(dict(precision=precision, recall=recall, score=score))
if __name__ == "__main__":
Fire()
| 5,884 | 33.617647 | 85 | py |
DMASTE | DMASTE-main/BMRC/main.py | # coding: UTF-8
# @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
import argparse
import Data
import Model
import utils
import torch
from torch.nn import functional as F
from transformers import AdamW, get_linear_schedule_with_warmup, BertTokenizer
import os
from torch.utils.data import Dataset
import random
import numpy as np
# os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(2)
class OriginalDataset(Dataset):
def __init__(self, pre_data):
self._forward_asp_query = pre_data['_forward_asp_query']
self._forward_opi_query = pre_data['_forward_opi_query']
self._forward_asp_answer_start = pre_data['_forward_asp_answer_start']
self._forward_asp_answer_end = pre_data['_forward_asp_answer_end']
self._forward_opi_answer_start = pre_data['_forward_opi_answer_start']
self._forward_opi_answer_end = pre_data['_forward_opi_answer_end']
self._forward_asp_query_mask = pre_data['_forward_asp_query_mask']
self._forward_opi_query_mask = pre_data['_forward_opi_query_mask']
self._forward_asp_query_seg = pre_data['_forward_asp_query_seg']
self._forward_opi_query_seg = pre_data['_forward_opi_query_seg']
self._backward_asp_query = pre_data['_backward_asp_query']
self._backward_opi_query = pre_data['_backward_opi_query']
self._backward_asp_answer_start = pre_data['_backward_asp_answer_start']
self._backward_asp_answer_end = pre_data['_backward_asp_answer_end']
self._backward_opi_answer_start = pre_data['_backward_opi_answer_start']
self._backward_opi_answer_end = pre_data['_backward_opi_answer_end']
self._backward_asp_query_mask = pre_data[
'_backward_asp_query_mask']
self._backward_opi_query_mask = pre_data[
'_backward_opi_query_mask']
self._backward_asp_query_seg = pre_data['_backward_asp_query_seg']
self._backward_opi_query_seg = pre_data['_backward_opi_query_seg']
self._sentiment_query = pre_data['_sentiment_query']
self._sentiment_answer = pre_data['_sentiment_answer']
self._sentiment_query_mask = pre_data['_sentiment_query_mask']
self._sentiment_query_seg = pre_data['_sentiment_query_seg']
self._aspect_num = pre_data['_aspect_num']
self._opinion_num = pre_data['_opinion_num']
def test(model, t, batch_generator, standard, beta, logger):
model.eval()
all_target = []
all_pred = []
triplet_target_num = 0
asp_target_num = 0
opi_target_num = 0
asp_opi_target_num = 0
asp_pol_target_num = 0
triplet_predict_num = 0
asp_predict_num = 0
opi_predict_num = 0
asp_opi_predict_num = 0
asp_pol_predict_num = 0
triplet_match_num = 0
asp_match_num = 0
opi_match_num = 0
asp_opi_match_num = 0
asp_pol_match_num = 0
for batch_index, batch_dict in enumerate(batch_generator):
triplets_target = standard[batch_index]['triplet']
asp_target = standard[batch_index]['asp_target']
opi_target = standard[batch_index]['opi_target']
asp_opi_target = standard[batch_index]['asp_opi_target']
asp_pol_target = standard[batch_index]['asp_pol_target']
# 预测三元组
triplets_predict = []
asp_predict = []
opi_predict = []
asp_opi_predict = []
asp_pol_predict = []
forward_pair_list = []
forward_pair_prob = []
forward_pair_ind_list = []
backward_pair_list = []
backward_pair_prob = []
backward_pair_ind_list = []
final_asp_list = []
final_opi_list = []
final_asp_ind_list = []
final_opi_ind_list = []
# forward q_1
passenge_index = batch_dict['forward_asp_answer_start'][0].gt(-1).float().nonzero()
passenge = batch_dict['forward_asp_query'][0][passenge_index].squeeze(1)
f_asp_start_scores, f_asp_end_scores = model(batch_dict['forward_asp_query'],
batch_dict['forward_asp_query_mask'],
batch_dict['forward_asp_query_seg'], 0)
f_asp_start_scores = F.softmax(f_asp_start_scores[0], dim=1)
f_asp_end_scores = F.softmax(f_asp_end_scores[0], dim=1)
f_asp_start_prob, f_asp_start_ind = torch.max(f_asp_start_scores, dim=1)
f_asp_end_prob, f_asp_end_ind = torch.max(f_asp_end_scores, dim=1)
f_asp_start_prob_temp = []
f_asp_end_prob_temp = []
f_asp_start_index_temp = []
f_asp_end_index_temp = []
for i in range(f_asp_start_ind.size(0)):
if batch_dict['forward_asp_answer_start'][0, i] != -1:
if f_asp_start_ind[i].item() == 1:
f_asp_start_index_temp.append(i)
f_asp_start_prob_temp.append(f_asp_start_prob[i].item())
if f_asp_end_ind[i].item() == 1:
f_asp_end_index_temp.append(i)
f_asp_end_prob_temp.append(f_asp_end_prob[i].item())
f_asp_start_index, f_asp_end_index, f_asp_prob = utils.filter_unpaired(
f_asp_start_prob_temp, f_asp_end_prob_temp, f_asp_start_index_temp, f_asp_end_index_temp)
for i in range(len(f_asp_start_index)):
opinion_query = t.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in
'[CLS] What opinion given the aspect'.split(' ')])
for j in range(f_asp_start_index[i], f_asp_end_index[i] + 1):
opinion_query.append(batch_dict['forward_asp_query'][0][j].item())
opinion_query.append(t.convert_tokens_to_ids('?'))
opinion_query.append(t.convert_tokens_to_ids('[SEP]'))
opinion_query_seg = [0] * len(opinion_query)
f_opi_length = len(opinion_query)
opinion_query = torch.tensor(opinion_query).long().cuda()
opinion_query = torch.cat([opinion_query, passenge], -1).unsqueeze(0)
opinion_query_seg += [1] * passenge.size(0)
opinion_query_mask = torch.ones(opinion_query.size(1)).float().cuda().unsqueeze(0)
opinion_query_seg = torch.tensor(opinion_query_seg).long().cuda().unsqueeze(0)
f_opi_start_scores, f_opi_end_scores = model(opinion_query, opinion_query_mask, opinion_query_seg, 0)
f_opi_start_scores = F.softmax(f_opi_start_scores[0], dim=1)
f_opi_end_scores = F.softmax(f_opi_end_scores[0], dim=1)
f_opi_start_prob, f_opi_start_ind = torch.max(f_opi_start_scores, dim=1)
f_opi_end_prob, f_opi_end_ind = torch.max(f_opi_end_scores, dim=1)
f_opi_start_prob_temp = []
f_opi_end_prob_temp = []
f_opi_start_index_temp = []
f_opi_end_index_temp = []
for k in range(f_opi_start_ind.size(0)):
if opinion_query_seg[0, k] == 1:
if f_opi_start_ind[k].item() == 1:
f_opi_start_index_temp.append(k)
f_opi_start_prob_temp.append(f_opi_start_prob[k].item())
if f_opi_end_ind[k].item() == 1:
f_opi_end_index_temp.append(k)
f_opi_end_prob_temp.append(f_opi_end_prob[k].item())
f_opi_start_index, f_opi_end_index, f_opi_prob = utils.filter_unpaired(
f_opi_start_prob_temp, f_opi_end_prob_temp, f_opi_start_index_temp, f_opi_end_index_temp)
for idx in range(len(f_opi_start_index)):
asp = [batch_dict['forward_asp_query'][0][j].item() for j in range(f_asp_start_index[i], f_asp_end_index[i] + 1)]
opi = [opinion_query[0][j].item() for j in range(f_opi_start_index[idx], f_opi_end_index[idx] + 1)]
asp_ind = [f_asp_start_index[i]-5, f_asp_end_index[i]-5]
opi_ind = [f_opi_start_index[idx]-f_opi_length, f_opi_end_index[idx]-f_opi_length]
temp_prob = f_asp_prob[i] * f_opi_prob[idx]
if asp_ind + opi_ind not in forward_pair_ind_list:
forward_pair_list.append([asp] + [opi])
forward_pair_prob.append(temp_prob)
forward_pair_ind_list.append(asp_ind + opi_ind)
else:
print('erro')
exit(1)
# backward q_1
b_opi_start_scores, b_opi_end_scores = model(batch_dict['backward_opi_query'],
batch_dict['backward_opi_query_mask'],
batch_dict['backward_opi_query_seg'], 0)
b_opi_start_scores = F.softmax(b_opi_start_scores[0], dim=1)
b_opi_end_scores = F.softmax(b_opi_end_scores[0], dim=1)
b_opi_start_prob, b_opi_start_ind = torch.max(b_opi_start_scores, dim=1)
b_opi_end_prob, b_opi_end_ind = torch.max(b_opi_end_scores, dim=1)
b_opi_start_prob_temp = []
b_opi_end_prob_temp = []
b_opi_start_index_temp = []
b_opi_end_index_temp = []
for i in range(b_opi_start_ind.size(0)):
if batch_dict['backward_opi_answer_start'][0, i] != -1:
if b_opi_start_ind[i].item() == 1:
b_opi_start_index_temp.append(i)
b_opi_start_prob_temp.append(b_opi_start_prob[i].item())
if b_opi_end_ind[i].item() == 1:
b_opi_end_index_temp.append(i)
b_opi_end_prob_temp.append(b_opi_end_prob[i].item())
b_opi_start_index, b_opi_end_index, b_opi_prob = utils.filter_unpaired(
b_opi_start_prob_temp, b_opi_end_prob_temp, b_opi_start_index_temp, b_opi_end_index_temp)
# backward q_2
for i in range(len(b_opi_start_index)):
aspect_query = t.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in
'[CLS] What aspect does the opinion'.split(' ')])
for j in range(b_opi_start_index[i], b_opi_end_index[i] + 1):
aspect_query.append(batch_dict['backward_opi_query'][0][j].item())
aspect_query.append(t.convert_tokens_to_ids('describe'))
aspect_query.append(t.convert_tokens_to_ids('?'))
aspect_query.append(t.convert_tokens_to_ids('[SEP]'))
aspect_query_seg = [0] * len(aspect_query)
b_asp_length = len(aspect_query)
aspect_query = torch.tensor(aspect_query).long().cuda()
aspect_query = torch.cat([aspect_query, passenge], -1).unsqueeze(0)
aspect_query_seg += [1] * passenge.size(0)
aspect_query_mask = torch.ones(aspect_query.size(1)).float().cuda().unsqueeze(0)
aspect_query_seg = torch.tensor(aspect_query_seg).long().cuda().unsqueeze(0)
b_asp_start_scores, b_asp_end_scores = model(aspect_query, aspect_query_mask, aspect_query_seg, 0)
b_asp_start_scores = F.softmax(b_asp_start_scores[0], dim=1)
b_asp_end_scores = F.softmax(b_asp_end_scores[0], dim=1)
b_asp_start_prob, b_asp_start_ind = torch.max(b_asp_start_scores, dim=1)
b_asp_end_prob, b_asp_end_ind = torch.max(b_asp_end_scores, dim=1)
b_asp_start_prob_temp = []
b_asp_end_prob_temp = []
b_asp_start_index_temp = []
b_asp_end_index_temp = []
for k in range(b_asp_start_ind.size(0)):
if aspect_query_seg[0, k] == 1:
if b_asp_start_ind[k].item() == 1:
b_asp_start_index_temp.append(k)
b_asp_start_prob_temp.append(b_asp_start_prob[k].item())
if b_asp_end_ind[k].item() == 1:
b_asp_end_index_temp.append(k)
b_asp_end_prob_temp.append(b_asp_end_prob[k].item())
b_asp_start_index, b_asp_end_index, b_asp_prob = utils.filter_unpaired(
b_asp_start_prob_temp, b_asp_end_prob_temp, b_asp_start_index_temp, b_asp_end_index_temp)
for idx in range(len(b_asp_start_index)):
opi = [batch_dict['backward_opi_query'][0][j].item() for j in
range(b_opi_start_index[i], b_opi_end_index[i] + 1)]
asp = [aspect_query[0][j].item() for j in range(b_asp_start_index[idx], b_asp_end_index[idx] + 1)]
asp_ind = [b_asp_start_index[idx]-b_asp_length, b_asp_end_index[idx]-b_asp_length]
opi_ind = [b_opi_start_index[i]-5, b_opi_end_index[i]-5]
temp_prob = b_asp_prob[idx] * b_opi_prob[i]
if asp_ind + opi_ind not in backward_pair_ind_list:
backward_pair_list.append([asp] + [opi])
backward_pair_prob.append(temp_prob)
backward_pair_ind_list.append(asp_ind + opi_ind)
else:
print('erro')
exit(1)
# filter triplet
# forward
for idx in range(len(forward_pair_list)):
if forward_pair_list[idx] in backward_pair_list:
if forward_pair_list[idx][0] not in final_asp_list:
final_asp_list.append(forward_pair_list[idx][0])
final_opi_list.append([forward_pair_list[idx][1]])
final_asp_ind_list.append(forward_pair_ind_list[idx][:2])
final_opi_ind_list.append([forward_pair_ind_list[idx][2:]])
else:
asp_index = final_asp_list.index(forward_pair_list[idx][0])
if forward_pair_list[idx][1] not in final_opi_list[asp_index]:
final_opi_list[asp_index].append(forward_pair_list[idx][1])
final_opi_ind_list[asp_index].append(forward_pair_ind_list[idx][2:])
else:
if forward_pair_prob[idx] >= beta:
if forward_pair_list[idx][0] not in final_asp_list:
final_asp_list.append(forward_pair_list[idx][0])
final_opi_list.append([forward_pair_list[idx][1]])
final_asp_ind_list.append(forward_pair_ind_list[idx][:2])
final_opi_ind_list.append([forward_pair_ind_list[idx][2:]])
else:
asp_index = final_asp_list.index(forward_pair_list[idx][0])
if forward_pair_list[idx][1] not in final_opi_list[asp_index]:
final_opi_list[asp_index].append(forward_pair_list[idx][1])
final_opi_ind_list[asp_index].append(forward_pair_ind_list[idx][2:])
# backward
for idx in range(len(backward_pair_list)):
if backward_pair_list[idx] not in forward_pair_list:
if backward_pair_prob[idx] >= beta:
if backward_pair_list[idx][0] not in final_asp_list:
final_asp_list.append(backward_pair_list[idx][0])
final_opi_list.append([backward_pair_list[idx][1]])
final_asp_ind_list.append(backward_pair_ind_list[idx][:2])
final_opi_ind_list.append([backward_pair_ind_list[idx][2:]])
else:
asp_index = final_asp_list.index(backward_pair_list[idx][0])
if backward_pair_list[idx][1] not in final_opi_list[asp_index]:
final_opi_list[asp_index].append(backward_pair_list[idx][1])
final_opi_ind_list[asp_index].append(backward_pair_ind_list[idx][2:])
# sentiment
for idx in range(len(final_asp_list)):
predict_opinion_num = len(final_opi_list[idx])
sentiment_query = t.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in
'[CLS] What sentiment given the aspect'.split(' ')])
sentiment_query+=final_asp_list[idx]
sentiment_query += t.convert_tokens_to_ids([word.lower() for word in 'and the opinion'.split(' ')])
# # 拼接所有的opinion
for idy in range(predict_opinion_num):
sentiment_query+=final_opi_list[idx][idy]
if idy < predict_opinion_num - 1:
sentiment_query.append(t.convert_tokens_to_ids('/'))
sentiment_query.append(t.convert_tokens_to_ids('?'))
sentiment_query.append(t.convert_tokens_to_ids('[SEP]'))
sentiment_query_seg = [0] * len(sentiment_query)
sentiment_query = torch.tensor(sentiment_query).long().cuda()
sentiment_query = torch.cat([sentiment_query, passenge], -1).unsqueeze(0)
sentiment_query_seg += [1] * passenge.size(0)
sentiment_query_mask = torch.ones(sentiment_query.size(1)).float().cuda().unsqueeze(0)
sentiment_query_seg = torch.tensor(sentiment_query_seg).long().cuda().unsqueeze(0)
sentiment_scores = model(sentiment_query, sentiment_query_mask, sentiment_query_seg, 1)
sentiment_predicted = torch.argmax(sentiment_scores[0], dim=0).item()
# 每个opinion对应一个三元组
for idy in range(predict_opinion_num):
asp_f = []
opi_f = []
asp_f.append(final_asp_ind_list[idx][0])
asp_f.append(final_asp_ind_list[idx][1])
opi_f.append(final_opi_ind_list[idx][idy][0])
opi_f.append(final_opi_ind_list[idx][idy][1])
triplet_predict = asp_f + opi_f + [sentiment_predicted]
triplets_predict.append(triplet_predict)
if opi_f not in opi_predict:
opi_predict.append(opi_f)
if asp_f + opi_f not in asp_opi_predict:
asp_opi_predict.append(asp_f + opi_f)
if asp_f + [sentiment_predicted] not in asp_pol_predict:
asp_pol_predict.append(asp_f + [sentiment_predicted])
if asp_f not in asp_predict:
asp_predict.append(asp_f)
all_target.append(triplets_target)
all_pred.append(triplets_predict)
triplet_target_num += len(triplets_target)
asp_target_num += len(asp_target)
opi_target_num += len(opi_target)
asp_opi_target_num += len(asp_opi_target)
asp_pol_target_num += len(asp_pol_target)
triplet_predict_num += len(triplets_predict)
asp_predict_num += len(asp_predict)
opi_predict_num += len(opi_predict)
asp_opi_predict_num += len(asp_opi_predict)
asp_pol_predict_num += len(asp_pol_predict)
for trip in triplets_target:
for trip_ in triplets_predict:
if trip_ == trip:
triplet_match_num += 1
for trip in asp_target:
for trip_ in asp_predict:
if trip_ == trip:
asp_match_num += 1
for trip in opi_target:
for trip_ in opi_predict:
if trip_ == trip:
opi_match_num += 1
for trip in asp_opi_target:
for trip_ in asp_opi_predict:
if trip_ == trip:
asp_opi_match_num += 1
for trip in asp_pol_target:
for trip_ in asp_pol_predict:
if trip_ == trip:
asp_pol_match_num += 1
precision = float(triplet_match_num) / float(triplet_predict_num+1e-6)
recall = float(triplet_match_num) / float(triplet_target_num+1e-6)
f1 = 2 * precision * recall / (precision + recall+1e-6)
logger.info('Triplet - Precision: {}\tRecall: {}\tF1: {}'.format(precision, recall, f1))
precision_aspect = float(asp_match_num) / float(asp_predict_num+1e-6)
recall_aspect = float(asp_match_num) / float(asp_target_num+1e-6)
f1_aspect = 2 * precision_aspect * recall_aspect / (precision_aspect + recall_aspect+1e-6)
logger.info('Aspect - Precision: {}\tRecall: {}\tF1: {}'.format(precision_aspect, recall_aspect, f1_aspect))
precision_opinion = float(opi_match_num) / float(opi_predict_num+1e-6)
recall_opinion = float(opi_match_num) / float(opi_target_num+1e-6)
f1_opinion = 2 * precision_opinion * recall_opinion / (precision_opinion + recall_opinion+1e-6)
logger.info('Opinion - Precision: {}\tRecall: {}\tF1: {}'.format(precision_opinion, recall_opinion, f1_opinion))
precision_aspect_sentiment = float(asp_pol_match_num) / float(asp_pol_predict_num+1e-6)
recall_aspect_sentiment = float(asp_pol_match_num) / float(asp_pol_target_num+1e-6)
f1_aspect_sentiment = 2 * precision_aspect_sentiment * recall_aspect_sentiment / (
precision_aspect_sentiment + recall_aspect_sentiment+1e-6)
logger.info('Aspect-Sentiment - Precision: {}\tRecall: {}\tF1: {}'.format(precision_aspect_sentiment,
recall_aspect_sentiment,
f1_aspect_sentiment))
precision_aspect_opinion = float(asp_opi_match_num) / float(asp_opi_predict_num+1e-6)
recall_aspect_opinion = float(asp_opi_match_num) / float(asp_opi_target_num+1e-6)
f1_aspect_opinion = 2 * precision_aspect_opinion * recall_aspect_opinion / (
precision_aspect_opinion + recall_aspect_opinion+1e-6)
logger.info(
'Aspect-Opinion - Precision: {}\tRecall: {}\tF1: {}'.format(precision_aspect_opinion, recall_aspect_opinion,
f1_aspect_opinion))
metric = {'triplet': {'p': precision, 'r': recall, 'f1': f1},
'aspect': {'p': precision_aspect, 'r': recall_aspect, 'f1': f1_aspect},
'opinion': {'p': precision_opinion, 'r': recall_opinion, 'f1': f1_opinion},
'aspect-sentiment': {'p': precision_aspect_sentiment, 'r': recall_aspect_sentiment, 'f1': f1_aspect_sentiment},
'aspect-opinion': {'p': precision_aspect_opinion, 'r': recall_aspect_opinion, 'f1': f1_aspect_opinion}}
triplets = {'pred': all_pred, 'target': all_target}
return metric, triplets
def main(args, tokenize):
os.makedirs(os.path.join(args.log_dir, args.model_name), exist_ok=True)
os.makedirs(os.path.join(args.tmp_log), exist_ok=True)
os.makedirs(args.model_dir, exist_ok=True)
# init logger
logger = utils.get_logger(os.path.join(args.tmp_log, args.model_name + '.log'))
# load data
logger.info('loading data......')
# init model
logger.info('initial model......')
model = Model.BERTModel(args)
if args.ifgpu:
model = model.cuda()
# print args
logger.info(args)
if args.mode == 'test':
logger.info('start testing......')
target_data_path = os.path.join(args.data_dir, args.target + '.pt')
target_standard_data_path = os.path.join(args.data_dir, args.target + '_standard.pt')
target_total_data = torch.load(target_data_path)
target_standard_data = torch.load(target_standard_data_path)
test_standard = target_standard_data['test']
test_data = target_total_data['test']
test_dataset = Data.ReviewDataset(None, None, test_data, 'test')
# load checkpoint
logger.info('loading checkpoint......')
checkpoint = torch.load(os.path.join(args.model_dir, args.model_name + '.pt'))
model.load_state_dict(checkpoint['net'])
model.eval()
batch_generator_test = Data.generate_fi_batches(dataset=test_dataset, batch_size=1, shuffle=False,
ifgpu=args.ifgpu)
# eval
logger.info('evaluating......')
metric, triplets = test(model, tokenize, batch_generator_test, test_standard, args.beta, logger)
with open(os.path.join(args.log_dir, args.model_name, 'metric.txt'), 'w') as f:
f.write(str(metric) + '\n')
with open(os.path.join(args.log_dir, args.model_name, 'pred.txt'), 'w') as f:
for p, t in zip(triplets['pred'], triplets['target']):
f.write(str({'pred': p, 'target': t}) + '\n')
elif args.mode == 'train':
source_data_path = os.path.join(args.data_dir, args.source + '.pt')
source_standard_data_path = os.path.join(args.data_dir, args.source + '_standard.pt')
source_total_data = torch.load(source_data_path)
source_standard_data = torch.load(source_standard_data_path)
train_data = source_total_data['train']
dev_data = source_total_data['dev']
dev_standard = source_standard_data['dev']
train_dataset = Data.ReviewDataset(train_data, dev_data, None, 'train')
dev_dataset = Data.ReviewDataset(train_data, dev_data, None, 'dev')
batch_num_train = train_dataset.get_batch_num(args.batch_size)
# optimizer
logger.info('initial optimizer......')
param_optimizer = list(model.named_parameters())
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if "_bert" in n], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if "_bert" not in n],
'lr': args.learning_rate, 'weight_decay': 0.01}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.tuning_bert_rate, correct_bias=False)
# load saved model, optimizer and epoch num
if args.reload and os.path.exists(args.checkpoint_path):
checkpoint = torch.load(args.checkpoint_path)
model.load_state_dict(checkpoint['net'])
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch'] + 1
logger.info('Reload model and optimizer after training epoch {}'.format(checkpoint['epoch']))
else:
start_epoch = 1
logger.info('New model and optimizer from epoch 0')
# scheduler
training_steps = args.epoch_num * batch_num_train
warmup_steps = int(training_steps * args.warm_up)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=training_steps)
# training
logger.info('begin training......')
best_dev_f1 = 0.
for epoch in range(start_epoch, args.epoch_num+1):
model.train()
model.zero_grad()
batch_generator = Data.generate_fi_batches(dataset=train_dataset, batch_size=args.batch_size,
ifgpu=args.ifgpu)
for batch_index, batch_dict in enumerate(batch_generator):
optimizer.zero_grad()
# q1_a
f_aspect_start_scores, f_aspect_end_scores = model(batch_dict['forward_asp_query'],
batch_dict['forward_asp_query_mask'],
batch_dict['forward_asp_query_seg'], 0)
f_asp_loss = utils.calculate_entity_loss(f_aspect_start_scores, f_aspect_end_scores,
batch_dict['forward_asp_answer_start'],
batch_dict['forward_asp_answer_end'])
# q1_b
b_opi_start_scores, b_opi_end_scores = model(batch_dict['backward_opi_query'],
batch_dict['backward_opi_query_mask'],
batch_dict['backward_opi_query_seg'], 0)
b_opi_loss = utils.calculate_entity_loss(b_opi_start_scores, b_opi_end_scores,
batch_dict['backward_opi_answer_start'],
batch_dict['backward_opi_answer_end'])
# q2_a
f_opi_start_scores, f_opi_end_scores = model(
batch_dict['forward_opi_query'].view(-1, batch_dict['forward_opi_query'].size(-1)),
batch_dict['forward_opi_query_mask'].view(-1, batch_dict['forward_opi_query_mask'].size(-1)),
batch_dict['forward_opi_query_seg'].view(-1, batch_dict['forward_opi_query_seg'].size(-1)),
0)
f_opi_loss = utils.calculate_entity_loss(f_opi_start_scores, f_opi_end_scores,
batch_dict['forward_opi_answer_start'].view(-1, batch_dict['forward_opi_answer_start'].size(-1)),
batch_dict['forward_opi_answer_end'].view(-1, batch_dict['forward_opi_answer_end'].size(-1)))
# q2_b
b_asp_start_scores, b_asp_end_scores = model(
batch_dict['backward_asp_query'].view(-1, batch_dict['backward_asp_query'].size(-1)),
batch_dict['backward_asp_query_mask'].view(-1, batch_dict['backward_asp_query_mask'].size(-1)),
batch_dict['backward_asp_query_seg'].view(-1, batch_dict['backward_asp_query_seg'].size(-1)),
0)
b_asp_loss = utils.calculate_entity_loss(b_asp_start_scores, b_asp_end_scores,
batch_dict['backward_asp_answer_start'].view(-1, batch_dict['backward_asp_answer_start'].size(-1)),
batch_dict['backward_asp_answer_end'].view(-1, batch_dict['backward_asp_answer_end'].size(-1)))
# q_3
sentiment_scores = model(batch_dict['sentiment_query'].view(-1, batch_dict['sentiment_query'].size(-1)),
batch_dict['sentiment_query_mask'].view(-1, batch_dict['sentiment_query_mask'].size(-1)),
batch_dict['sentiment_query_seg'].view(-1, batch_dict['sentiment_query_seg'].size(-1)),
1)
sentiment_loss = utils.calculate_sentiment_loss(sentiment_scores, batch_dict['sentiment_answer'].view(-1))
# loss
loss_sum = f_asp_loss + f_opi_loss + b_opi_loss + b_asp_loss + args.beta*sentiment_loss
loss_sum.backward()
optimizer.step()
scheduler.step()
# train logger
if batch_index % 10 == 0:
logger.info('Epoch:[{}/{}]\t Batch:[{}/{}]\t Loss Sum:{}\t '
'forward Loss:{};{}\t backward Loss:{};{}\t Sentiment Loss:{}'.
format(epoch, args.epoch_num, batch_index, batch_num_train,
round(loss_sum.item(), 4),
round(f_asp_loss.item(), 4), round(f_opi_loss.item(), 4),
round(b_asp_loss.item(), 4), round(b_opi_loss.item(), 4),
round(sentiment_loss.item(), 4)))
# validation
batch_generator_dev = Data.generate_fi_batches(dataset=dev_dataset, batch_size=1, shuffle=False,
ifgpu=args.ifgpu)
metric, _ = test(model, tokenize, batch_generator_dev, dev_standard, args.inference_beta, logger)
f1 = metric['triplet']['f1']
# save model and optimizer
if f1 > best_dev_f1:
best_dev_f1 = f1
logger.info('best dev f1: {}\t epoch: {}'.format(best_dev_f1, epoch))
logger.info('Model saved after epoch {}'.format(epoch))
state = {'net': model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch}
torch.save(state, os.path.join(args.model_dir, args.model_name + '.pt'))
# test
# batch_generator_test = Data.generate_fi_batches(dataset=test_dataset, batch_size=1, shuffle=False,
# ifgpu=args.ifgpu)
# f1 = test(model, tokenize, batch_generator_test, test_standard, args.inference_beta, logger)
# logger.info('start testing......')
# test_dataset = Data.ReviewDataset(train_data, dev_data, test_data, 'test')
# # load checkpoint
# logger.info('loading checkpoint......')
# checkpoint = torch.load(args.save_model_path)
# model.load_state_dict(checkpoint['net'])
# model.eval()
# batch_generator_test = Data.generate_fi_batches(dataset=test_dataset, batch_size=1, shuffle=False,
# ifgpu=args.ifgpu)
# # eval
# logger.info('evaluating......')
# f1 = test(model, tokenize, batch_generator_test, test_standard, args.beta, logger)
else:
logger.info('Error mode!')
exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Bidirectional MRC-based sentiment triplet extraction')
parser.add_argument('--data_dir', type=str, default="./data/preprocess/")
parser.add_argument('--log_dir', type=str, default="./log/")
parser.add_argument('--tmp_log', type=str)
parser.add_argument('--source', type=str)
parser.add_argument('--target', type=str)
parser.add_argument('--mode', type=str, default="train", choices=["train", "test"])
parser.add_argument('--reload', type=bool, default=False)
parser.add_argument('--checkpoint_path', type=str, default="./model/14lap/modelFinal.model")
parser.add_argument('--model_dir', type=str, default="./model/")
parser.add_argument('--model_name', type=str, default="1")
# model hyper-parameter
parser.add_argument('--bert_model_type', type=str, default="bert-base-uncased")
parser.add_argument('--hidden_size', type=int, default=768)
parser.add_argument('--inference_beta', type=float, default=0.8)
# training hyper-parameter
parser.add_argument('--ifgpu', type=bool, default=True)
parser.add_argument('--epoch_num', type=int, default=40)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--tuning_bert_rate', type=float, default=1e-5)
parser.add_argument('--warm_up', type=float, default=0.1)
parser.add_argument('--beta', type=float, default=1)
args = parser.parse_args()
t = BertTokenizer.from_pretrained(args.bert_model_type)
main(args, t)
| 34,866 | 52.3951 | 156 | py |
DMASTE | DMASTE-main/BMRC/DANN_main.py | # coding: UTF-8
# @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
import argparse
import Data
import DANN_Model as Model
import utils
import torch
from torch.nn import functional as F
from transformers import AdamW, get_linear_schedule_with_warmup, BertTokenizer
import os
from torch.utils.data import Dataset, DataLoader
import random
import numpy as np
from data_utils import Unlabeled_Dataset, Domain
class OriginalDataset(Dataset):
def __init__(self, pre_data):
self._forward_asp_query = pre_data['_forward_asp_query']
self._forward_opi_query = pre_data['_forward_opi_query']
self._forward_asp_answer_start = pre_data['_forward_asp_answer_start']
self._forward_asp_answer_end = pre_data['_forward_asp_answer_end']
self._forward_opi_answer_start = pre_data['_forward_opi_answer_start']
self._forward_opi_answer_end = pre_data['_forward_opi_answer_end']
self._forward_asp_query_mask = pre_data['_forward_asp_query_mask']
self._forward_opi_query_mask = pre_data['_forward_opi_query_mask']
self._forward_asp_query_seg = pre_data['_forward_asp_query_seg']
self._forward_opi_query_seg = pre_data['_forward_opi_query_seg']
self._backward_asp_query = pre_data['_backward_asp_query']
self._backward_opi_query = pre_data['_backward_opi_query']
self._backward_asp_answer_start = pre_data['_backward_asp_answer_start']
self._backward_asp_answer_end = pre_data['_backward_asp_answer_end']
self._backward_opi_answer_start = pre_data['_backward_opi_answer_start']
self._backward_opi_answer_end = pre_data['_backward_opi_answer_end']
self._backward_asp_query_mask = pre_data[
'_backward_asp_query_mask']
self._backward_opi_query_mask = pre_data[
'_backward_opi_query_mask']
self._backward_asp_query_seg = pre_data['_backward_asp_query_seg']
self._backward_opi_query_seg = pre_data['_backward_opi_query_seg']
self._sentiment_query = pre_data['_sentiment_query']
self._sentiment_answer = pre_data['_sentiment_answer']
self._sentiment_query_mask = pre_data['_sentiment_query_mask']
self._sentiment_query_seg = pre_data['_sentiment_query_seg']
self._aspect_num = pre_data['_aspect_num']
self._opinion_num = pre_data['_opinion_num']
def test(model, t, batch_generator, standard, beta, logger):
model.eval()
all_target = []
all_pred = []
triplet_target_num = 0
asp_target_num = 0
opi_target_num = 0
asp_opi_target_num = 0
asp_pol_target_num = 0
triplet_predict_num = 0
asp_predict_num = 0
opi_predict_num = 0
asp_opi_predict_num = 0
asp_pol_predict_num = 0
triplet_match_num = 0
asp_match_num = 0
opi_match_num = 0
asp_opi_match_num = 0
asp_pol_match_num = 0
for batch_index, batch_dict in enumerate(batch_generator):
triplets_target = standard[batch_index]['triplet']
asp_target = standard[batch_index]['asp_target']
opi_target = standard[batch_index]['opi_target']
asp_opi_target = standard[batch_index]['asp_opi_target']
asp_pol_target = standard[batch_index]['asp_pol_target']
# 预测三元组
triplets_predict = []
asp_predict = []
opi_predict = []
asp_opi_predict = []
asp_pol_predict = []
forward_pair_list = []
forward_pair_prob = []
forward_pair_ind_list = []
backward_pair_list = []
backward_pair_prob = []
backward_pair_ind_list = []
final_asp_list = []
final_opi_list = []
final_asp_ind_list = []
final_opi_ind_list = []
# forward q_1
passenge_index = batch_dict['forward_asp_answer_start'][0].gt(-1).float().nonzero()
passenge = batch_dict['forward_asp_query'][0][passenge_index].squeeze(1)
f_asp_start_scores, f_asp_end_scores = model(batch_dict['forward_asp_query'],
batch_dict['forward_asp_query_mask'],
batch_dict['forward_asp_query_seg'], 0)
f_asp_start_scores = F.softmax(f_asp_start_scores[0], dim=1)
f_asp_end_scores = F.softmax(f_asp_end_scores[0], dim=1)
f_asp_start_prob, f_asp_start_ind = torch.max(f_asp_start_scores, dim=1)
f_asp_end_prob, f_asp_end_ind = torch.max(f_asp_end_scores, dim=1)
f_asp_start_prob_temp = []
f_asp_end_prob_temp = []
f_asp_start_index_temp = []
f_asp_end_index_temp = []
for i in range(f_asp_start_ind.size(0)):
if batch_dict['forward_asp_answer_start'][0, i] != -1:
if f_asp_start_ind[i].item() == 1:
f_asp_start_index_temp.append(i)
f_asp_start_prob_temp.append(f_asp_start_prob[i].item())
if f_asp_end_ind[i].item() == 1:
f_asp_end_index_temp.append(i)
f_asp_end_prob_temp.append(f_asp_end_prob[i].item())
f_asp_start_index, f_asp_end_index, f_asp_prob = utils.filter_unpaired(
f_asp_start_prob_temp, f_asp_end_prob_temp, f_asp_start_index_temp, f_asp_end_index_temp)
for i in range(len(f_asp_start_index)):
opinion_query = t.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in
'[CLS] What opinion given the aspect'.split(' ')])
for j in range(f_asp_start_index[i], f_asp_end_index[i] + 1):
opinion_query.append(batch_dict['forward_asp_query'][0][j].item())
opinion_query.append(t.convert_tokens_to_ids('?'))
opinion_query.append(t.convert_tokens_to_ids('[SEP]'))
opinion_query_seg = [0] * len(opinion_query)
f_opi_length = len(opinion_query)
opinion_query = torch.tensor(opinion_query).long().cuda()
opinion_query = torch.cat([opinion_query, passenge], -1).unsqueeze(0)
opinion_query_seg += [1] * passenge.size(0)
opinion_query_mask = torch.ones(opinion_query.size(1)).float().cuda().unsqueeze(0)
opinion_query_seg = torch.tensor(opinion_query_seg).long().cuda().unsqueeze(0)
f_opi_start_scores, f_opi_end_scores = model(opinion_query, opinion_query_mask, opinion_query_seg, 0)
f_opi_start_scores = F.softmax(f_opi_start_scores[0], dim=1)
f_opi_end_scores = F.softmax(f_opi_end_scores[0], dim=1)
f_opi_start_prob, f_opi_start_ind = torch.max(f_opi_start_scores, dim=1)
f_opi_end_prob, f_opi_end_ind = torch.max(f_opi_end_scores, dim=1)
f_opi_start_prob_temp = []
f_opi_end_prob_temp = []
f_opi_start_index_temp = []
f_opi_end_index_temp = []
for k in range(f_opi_start_ind.size(0)):
if opinion_query_seg[0, k] == 1:
if f_opi_start_ind[k].item() == 1:
f_opi_start_index_temp.append(k)
f_opi_start_prob_temp.append(f_opi_start_prob[k].item())
if f_opi_end_ind[k].item() == 1:
f_opi_end_index_temp.append(k)
f_opi_end_prob_temp.append(f_opi_end_prob[k].item())
f_opi_start_index, f_opi_end_index, f_opi_prob = utils.filter_unpaired(
f_opi_start_prob_temp, f_opi_end_prob_temp, f_opi_start_index_temp, f_opi_end_index_temp)
for idx in range(len(f_opi_start_index)):
asp = [batch_dict['forward_asp_query'][0][j].item() for j in range(f_asp_start_index[i], f_asp_end_index[i] + 1)]
opi = [opinion_query[0][j].item() for j in range(f_opi_start_index[idx], f_opi_end_index[idx] + 1)]
asp_ind = [f_asp_start_index[i]-5, f_asp_end_index[i]-5]
opi_ind = [f_opi_start_index[idx]-f_opi_length, f_opi_end_index[idx]-f_opi_length]
temp_prob = f_asp_prob[i] * f_opi_prob[idx]
if asp_ind + opi_ind not in forward_pair_ind_list:
forward_pair_list.append([asp] + [opi])
forward_pair_prob.append(temp_prob)
forward_pair_ind_list.append(asp_ind + opi_ind)
else:
print('erro')
exit(1)
# backward q_1
b_opi_start_scores, b_opi_end_scores = model(batch_dict['backward_opi_query'],
batch_dict['backward_opi_query_mask'],
batch_dict['backward_opi_query_seg'], 0)
b_opi_start_scores = F.softmax(b_opi_start_scores[0], dim=1)
b_opi_end_scores = F.softmax(b_opi_end_scores[0], dim=1)
b_opi_start_prob, b_opi_start_ind = torch.max(b_opi_start_scores, dim=1)
b_opi_end_prob, b_opi_end_ind = torch.max(b_opi_end_scores, dim=1)
b_opi_start_prob_temp = []
b_opi_end_prob_temp = []
b_opi_start_index_temp = []
b_opi_end_index_temp = []
for i in range(b_opi_start_ind.size(0)):
if batch_dict['backward_opi_answer_start'][0, i] != -1:
if b_opi_start_ind[i].item() == 1:
b_opi_start_index_temp.append(i)
b_opi_start_prob_temp.append(b_opi_start_prob[i].item())
if b_opi_end_ind[i].item() == 1:
b_opi_end_index_temp.append(i)
b_opi_end_prob_temp.append(b_opi_end_prob[i].item())
b_opi_start_index, b_opi_end_index, b_opi_prob = utils.filter_unpaired(
b_opi_start_prob_temp, b_opi_end_prob_temp, b_opi_start_index_temp, b_opi_end_index_temp)
# backward q_2
for i in range(len(b_opi_start_index)):
aspect_query = t.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in
'[CLS] What aspect does the opinion'.split(' ')])
for j in range(b_opi_start_index[i], b_opi_end_index[i] + 1):
aspect_query.append(batch_dict['backward_opi_query'][0][j].item())
aspect_query.append(t.convert_tokens_to_ids('describe'))
aspect_query.append(t.convert_tokens_to_ids('?'))
aspect_query.append(t.convert_tokens_to_ids('[SEP]'))
aspect_query_seg = [0] * len(aspect_query)
b_asp_length = len(aspect_query)
aspect_query = torch.tensor(aspect_query).long().cuda()
aspect_query = torch.cat([aspect_query, passenge], -1).unsqueeze(0)
aspect_query_seg += [1] * passenge.size(0)
aspect_query_mask = torch.ones(aspect_query.size(1)).float().cuda().unsqueeze(0)
aspect_query_seg = torch.tensor(aspect_query_seg).long().cuda().unsqueeze(0)
b_asp_start_scores, b_asp_end_scores = model(aspect_query, aspect_query_mask, aspect_query_seg, 0)
b_asp_start_scores = F.softmax(b_asp_start_scores[0], dim=1)
b_asp_end_scores = F.softmax(b_asp_end_scores[0], dim=1)
b_asp_start_prob, b_asp_start_ind = torch.max(b_asp_start_scores, dim=1)
b_asp_end_prob, b_asp_end_ind = torch.max(b_asp_end_scores, dim=1)
b_asp_start_prob_temp = []
b_asp_end_prob_temp = []
b_asp_start_index_temp = []
b_asp_end_index_temp = []
for k in range(b_asp_start_ind.size(0)):
if aspect_query_seg[0, k] == 1:
if b_asp_start_ind[k].item() == 1:
b_asp_start_index_temp.append(k)
b_asp_start_prob_temp.append(b_asp_start_prob[k].item())
if b_asp_end_ind[k].item() == 1:
b_asp_end_index_temp.append(k)
b_asp_end_prob_temp.append(b_asp_end_prob[k].item())
b_asp_start_index, b_asp_end_index, b_asp_prob = utils.filter_unpaired(
b_asp_start_prob_temp, b_asp_end_prob_temp, b_asp_start_index_temp, b_asp_end_index_temp)
for idx in range(len(b_asp_start_index)):
opi = [batch_dict['backward_opi_query'][0][j].item() for j in
range(b_opi_start_index[i], b_opi_end_index[i] + 1)]
asp = [aspect_query[0][j].item() for j in range(b_asp_start_index[idx], b_asp_end_index[idx] + 1)]
asp_ind = [b_asp_start_index[idx]-b_asp_length, b_asp_end_index[idx]-b_asp_length]
opi_ind = [b_opi_start_index[i]-5, b_opi_end_index[i]-5]
temp_prob = b_asp_prob[idx] * b_opi_prob[i]
if asp_ind + opi_ind not in backward_pair_ind_list:
backward_pair_list.append([asp] + [opi])
backward_pair_prob.append(temp_prob)
backward_pair_ind_list.append(asp_ind + opi_ind)
else:
print('erro')
exit(1)
# filter triplet
# forward
for idx in range(len(forward_pair_list)):
if forward_pair_list[idx] in backward_pair_list:
if forward_pair_list[idx][0] not in final_asp_list:
final_asp_list.append(forward_pair_list[idx][0])
final_opi_list.append([forward_pair_list[idx][1]])
final_asp_ind_list.append(forward_pair_ind_list[idx][:2])
final_opi_ind_list.append([forward_pair_ind_list[idx][2:]])
else:
asp_index = final_asp_list.index(forward_pair_list[idx][0])
if forward_pair_list[idx][1] not in final_opi_list[asp_index]:
final_opi_list[asp_index].append(forward_pair_list[idx][1])
final_opi_ind_list[asp_index].append(forward_pair_ind_list[idx][2:])
else:
if forward_pair_prob[idx] >= beta:
if forward_pair_list[idx][0] not in final_asp_list:
final_asp_list.append(forward_pair_list[idx][0])
final_opi_list.append([forward_pair_list[idx][1]])
final_asp_ind_list.append(forward_pair_ind_list[idx][:2])
final_opi_ind_list.append([forward_pair_ind_list[idx][2:]])
else:
asp_index = final_asp_list.index(forward_pair_list[idx][0])
if forward_pair_list[idx][1] not in final_opi_list[asp_index]:
final_opi_list[asp_index].append(forward_pair_list[idx][1])
final_opi_ind_list[asp_index].append(forward_pair_ind_list[idx][2:])
# backward
for idx in range(len(backward_pair_list)):
if backward_pair_list[idx] not in forward_pair_list:
if backward_pair_prob[idx] >= beta:
if backward_pair_list[idx][0] not in final_asp_list:
final_asp_list.append(backward_pair_list[idx][0])
final_opi_list.append([backward_pair_list[idx][1]])
final_asp_ind_list.append(backward_pair_ind_list[idx][:2])
final_opi_ind_list.append([backward_pair_ind_list[idx][2:]])
else:
asp_index = final_asp_list.index(backward_pair_list[idx][0])
if backward_pair_list[idx][1] not in final_opi_list[asp_index]:
final_opi_list[asp_index].append(backward_pair_list[idx][1])
final_opi_ind_list[asp_index].append(backward_pair_ind_list[idx][2:])
# sentiment
for idx in range(len(final_asp_list)):
predict_opinion_num = len(final_opi_list[idx])
sentiment_query = t.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in
'[CLS] What sentiment given the aspect'.split(' ')])
sentiment_query+=final_asp_list[idx]
sentiment_query += t.convert_tokens_to_ids([word.lower() for word in 'and the opinion'.split(' ')])
# # 拼接所有的opinion
for idy in range(predict_opinion_num):
sentiment_query+=final_opi_list[idx][idy]
if idy < predict_opinion_num - 1:
sentiment_query.append(t.convert_tokens_to_ids('/'))
sentiment_query.append(t.convert_tokens_to_ids('?'))
sentiment_query.append(t.convert_tokens_to_ids('[SEP]'))
sentiment_query_seg = [0] * len(sentiment_query)
sentiment_query = torch.tensor(sentiment_query).long().cuda()
sentiment_query = torch.cat([sentiment_query, passenge], -1).unsqueeze(0)
sentiment_query_seg += [1] * passenge.size(0)
sentiment_query_mask = torch.ones(sentiment_query.size(1)).float().cuda().unsqueeze(0)
sentiment_query_seg = torch.tensor(sentiment_query_seg).long().cuda().unsqueeze(0)
sentiment_scores = model(sentiment_query, sentiment_query_mask, sentiment_query_seg, 1)
sentiment_predicted = torch.argmax(sentiment_scores[0], dim=0).item()
# 每个opinion对应一个三元组
for idy in range(predict_opinion_num):
asp_f = []
opi_f = []
asp_f.append(final_asp_ind_list[idx][0])
asp_f.append(final_asp_ind_list[idx][1])
opi_f.append(final_opi_ind_list[idx][idy][0])
opi_f.append(final_opi_ind_list[idx][idy][1])
triplet_predict = asp_f + opi_f + [sentiment_predicted]
triplets_predict.append(triplet_predict)
if opi_f not in opi_predict:
opi_predict.append(opi_f)
if asp_f + opi_f not in asp_opi_predict:
asp_opi_predict.append(asp_f + opi_f)
if asp_f + [sentiment_predicted] not in asp_pol_predict:
asp_pol_predict.append(asp_f + [sentiment_predicted])
if asp_f not in asp_predict:
asp_predict.append(asp_f)
all_target.append(triplets_target)
all_pred.append(triplets_predict)
triplet_target_num += len(triplets_target)
asp_target_num += len(asp_target)
opi_target_num += len(opi_target)
asp_opi_target_num += len(asp_opi_target)
asp_pol_target_num += len(asp_pol_target)
triplet_predict_num += len(triplets_predict)
asp_predict_num += len(asp_predict)
opi_predict_num += len(opi_predict)
asp_opi_predict_num += len(asp_opi_predict)
asp_pol_predict_num += len(asp_pol_predict)
for trip in triplets_target:
for trip_ in triplets_predict:
if trip_ == trip:
triplet_match_num += 1
for trip in asp_target:
for trip_ in asp_predict:
if trip_ == trip:
asp_match_num += 1
for trip in opi_target:
for trip_ in opi_predict:
if trip_ == trip:
opi_match_num += 1
for trip in asp_opi_target:
for trip_ in asp_opi_predict:
if trip_ == trip:
asp_opi_match_num += 1
for trip in asp_pol_target:
for trip_ in asp_pol_predict:
if trip_ == trip:
asp_pol_match_num += 1
precision = float(triplet_match_num) / float(triplet_predict_num+1e-6)
recall = float(triplet_match_num) / float(triplet_target_num+1e-6)
f1 = 2 * precision * recall / (precision + recall+1e-6)
logger.info('Triplet - Precision: {}\tRecall: {}\tF1: {}'.format(precision, recall, f1))
precision_aspect = float(asp_match_num) / float(asp_predict_num+1e-6)
recall_aspect = float(asp_match_num) / float(asp_target_num+1e-6)
f1_aspect = 2 * precision_aspect * recall_aspect / (precision_aspect + recall_aspect+1e-6)
logger.info('Aspect - Precision: {}\tRecall: {}\tF1: {}'.format(precision_aspect, recall_aspect, f1_aspect))
precision_opinion = float(opi_match_num) / float(opi_predict_num+1e-6)
recall_opinion = float(opi_match_num) / float(opi_target_num+1e-6)
f1_opinion = 2 * precision_opinion * recall_opinion / (precision_opinion + recall_opinion+1e-6)
logger.info('Opinion - Precision: {}\tRecall: {}\tF1: {}'.format(precision_opinion, recall_opinion, f1_opinion))
precision_aspect_sentiment = float(asp_pol_match_num) / float(asp_pol_predict_num+1e-6)
recall_aspect_sentiment = float(asp_pol_match_num) / float(asp_pol_target_num+1e-6)
f1_aspect_sentiment = 2 * precision_aspect_sentiment * recall_aspect_sentiment / (
precision_aspect_sentiment + recall_aspect_sentiment+1e-6)
logger.info('Aspect-Sentiment - Precision: {}\tRecall: {}\tF1: {}'.format(precision_aspect_sentiment,
recall_aspect_sentiment,
f1_aspect_sentiment))
precision_aspect_opinion = float(asp_opi_match_num) / float(asp_opi_predict_num+1e-6)
recall_aspect_opinion = float(asp_opi_match_num) / float(asp_opi_target_num+1e-6)
f1_aspect_opinion = 2 * precision_aspect_opinion * recall_aspect_opinion / (
precision_aspect_opinion + recall_aspect_opinion+1e-6)
logger.info(
'Aspect-Opinion - Precision: {}\tRecall: {}\tF1: {}'.format(precision_aspect_opinion, recall_aspect_opinion,
f1_aspect_opinion))
metric = {'triplet': {'p': precision, 'r': recall, 'f1': f1},
'aspect': {'p': precision_aspect, 'r': recall_aspect, 'f1': f1_aspect},
'opinion': {'p': precision_opinion, 'r': recall_opinion, 'f1': f1_opinion},
'aspect-sentiment': {'p': precision_aspect_sentiment, 'r': recall_aspect_sentiment, 'f1': f1_aspect_sentiment},
'aspect-opinion': {'p': precision_aspect_opinion, 'r': recall_aspect_opinion, 'f1': f1_aspect_opinion}}
triplets = {'pred': all_pred, 'target': all_target}
return metric, triplets
def main(args, tokenize):
os.makedirs(os.path.join(args.log_dir, args.model_name), exist_ok=True)
os.makedirs(os.path.join(args.tmp_log), exist_ok=True)
os.makedirs(args.model_dir, exist_ok=True)
# init logger
logger = utils.get_logger(os.path.join(args.tmp_log, args.model_name + '.log'))
# load data
logger.info('loading data......')
# init model
logger.info('initial model......')
model = Model.BERTModel(args)
if args.ifgpu:
model = model.cuda()
# print args
logger.info(args)
if args.mode == 'test':
logger.info('start dev......')
target_data_path = os.path.join(args.data_dir, args.target + '.pt')
target_standard_data_path = os.path.join(args.data_dir, args.target + '_standard.pt')
target_total_data = torch.load(target_data_path)
target_standard_data = torch.load(target_standard_data_path)
dev_standard = target_standard_data['dev']
dev_data = target_total_data['dev']
dev_dataset = Data.ReviewDataset(None, dev_data, None, 'dev')
# load checkpoint
logger.info('loading checkpoint......')
checkpoint = torch.load(os.path.join(args.model_dir, args.model_name + '.pt'))
model.load_state_dict(checkpoint['net'])
model.eval()
batch_generator_dev = Data.generate_fi_batches(dataset=dev_dataset, batch_size=1, shuffle=False,
ifgpu=args.ifgpu)
# eval
logger.info('evaluating......')
metric, triplets = test(model, tokenize, batch_generator_dev, dev_standard, args.beta, logger)
with open(os.path.join(args.log_dir, args.model_name, 'dev_metric.txt'), 'w') as f:
f.write(str(metric) + '\n')
with open(os.path.join(args.log_dir, args.model_name, 'dev_pred.txt'), 'w') as f:
for p, t in zip(triplets['pred'], triplets['target']):
f.write(str({'pred': p, 'target': t}) + '\n')
logger.info('start testing......')
target_data_path = os.path.join(args.data_dir, args.target + '.pt')
target_standard_data_path = os.path.join(args.data_dir, args.target + '_standard.pt')
target_total_data = torch.load(target_data_path)
target_standard_data = torch.load(target_standard_data_path)
test_standard = target_standard_data['test']
test_data = target_total_data['test']
test_dataset = Data.ReviewDataset(None, None, test_data, 'test')
# load checkpoint
logger.info('loading checkpoint......')
checkpoint = torch.load(os.path.join(args.model_dir, args.model_name + '.pt'))
model.load_state_dict(checkpoint['net'])
model.eval()
batch_generator_test = Data.generate_fi_batches(dataset=test_dataset, batch_size=1, shuffle=False,
ifgpu=args.ifgpu)
# eval
logger.info('evaluating......')
metric, triplets = test(model, tokenize, batch_generator_test, test_standard, args.beta, logger)
with open(os.path.join(args.log_dir, args.model_name, 'test_metric.txt'), 'w') as f:
f.write(str(metric) + '\n')
with open(os.path.join(args.log_dir, args.model_name, 'test_pred.txt'), 'w') as f:
for p, t in zip(triplets['pred'], triplets['target']):
f.write(str({'pred': p, 'target': t}) + '\n')
elif args.mode == 'train':
source_data_path = os.path.join(args.data_dir, args.source + '.pt')
source_standard_data_path = os.path.join(args.data_dir, args.source + '_standard.pt')
source_total_data = torch.load(source_data_path)
source_standard_data = torch.load(source_standard_data_path)
train_data = source_total_data['train']
dev_data = source_total_data['dev']
dev_standard = source_standard_data['dev']
train_dataset = Data.ReviewDataset(train_data, dev_data, None, 'train')
dev_dataset = Data.ReviewDataset(train_data, dev_data, None, 'dev')
batch_num_train = train_dataset.get_batch_num(args.batch_size)
unlabeled_dataset = Unlabeled_Dataset(os.path.join(args.unlabeled_data, args.target + '.txt'), tokenize)
# optimizer
logger.info('initial optimizer......')
param_optimizer = list(model.named_parameters())
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if "_bert" in n], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if "_bert" not in n],
'lr': args.learning_rate, 'weight_decay': 0.01}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.tuning_bert_rate, correct_bias=False)
# load saved model, optimizer and epoch num
if args.reload and os.path.exists(args.checkpoint_path):
checkpoint = torch.load(args.checkpoint_path)
model.load_state_dict(checkpoint['net'])
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch'] + 1
logger.info('Reload model and optimizer after training epoch {}'.format(checkpoint['epoch']))
else:
start_epoch = 1
logger.info('New model and optimizer from epoch 0')
# scheduler
training_steps = args.epoch_num * batch_num_train
warmup_steps = int(training_steps * args.warm_up)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=training_steps)
# training
logger.info('begin training......')
best_dev_f1 = 0.
# unlabeled_generator = Data.generate_fi_batches(dataset=unlabeled_dataset, batch_size=args.batch_size,
# ifgpu=args.ifgpu)
unlabled_dataloader = DataLoader(dataset=unlabeled_dataset, batch_size=args.batch_size,
shuffle=True, drop_last=True)
it = iter(unlabled_dataloader)
step = 0
for epoch in range(start_epoch, args.epoch_num+1):
model.train()
model.zero_grad()
batch_generator = Data.generate_fi_batches(dataset=train_dataset, batch_size=args.batch_size,
ifgpu=args.ifgpu)
for batch_index, batch_dict in enumerate(batch_generator):
step += 1
p = step / training_steps
alpha = 2. / (1. + np.exp(-10 * p)) - 1
optimizer.zero_grad()
# q1_a
zero = torch.zeros_like(batch_dict['forward_asp_answer_start'])
domain_label = torch.where(batch_dict['forward_asp_answer_start'] != -1, zero + Domain.Source, zero - 1)
ret = model(batch_dict['forward_asp_query'],
batch_dict['forward_asp_query_mask'],
batch_dict['forward_asp_query_seg'], 0, alpha=alpha, domain=domain_label)
(f_aspect_start_scores, f_aspect_end_scores), f_aspect_domain_scores = ret['cls'], ret['domain_scores']
f_asp_loss = utils.calculate_entity_loss(f_aspect_start_scores, f_aspect_end_scores,
batch_dict['forward_asp_answer_start'],
batch_dict['forward_asp_answer_end'])
f_asp_domain_loss = utils.calculate_domain_loss(f_aspect_domain_scores, domain_label)
# q1_b
zero = torch.zeros_like(batch_dict['backward_opi_answer_start'])
domain_label = torch.where(batch_dict['backward_opi_answer_start'] != -1, zero + Domain.Source, zero - 1)
ret = model(batch_dict['backward_opi_query'],
batch_dict['backward_opi_query_mask'],
batch_dict['backward_opi_query_seg'], 0, alpha=alpha, domain=domain_label)
(b_opi_start_scores, b_opi_end_scores), b_opi_domain_scores = ret['cls'], ret['domain_scores']
b_opi_loss = utils.calculate_entity_loss(b_opi_start_scores, b_opi_end_scores,
batch_dict['backward_opi_answer_start'],
batch_dict['backward_opi_answer_end'])
b_opi_domain_loss = utils.calculate_domain_loss(b_opi_domain_scores, domain_label)
# q2_a
zero = torch.zeros_like(batch_dict['forward_opi_answer_start'].view(-1, batch_dict['forward_opi_answer_start'].size(-1)))
domain_label = torch.where(batch_dict['forward_opi_answer_start'].view(-1, batch_dict['forward_opi_answer_start'].size(-1)) != -1, zero + Domain.Source, zero - 1)
ret = model(
batch_dict['forward_opi_query'].view(-1, batch_dict['forward_opi_query'].size(-1)),
batch_dict['forward_opi_query_mask'].view(-1, batch_dict['forward_opi_query_mask'].size(-1)),
batch_dict['forward_opi_query_seg'].view(-1, batch_dict['forward_opi_query_seg'].size(-1)),
0, alpha=alpha, domain=domain_label)
(f_opi_start_scores, f_opi_end_scores), f_opi_domain_scores = ret['cls'], ret['domain_scores']
f_opi_loss = utils.calculate_entity_loss(f_opi_start_scores, f_opi_end_scores,
batch_dict['forward_opi_answer_start'].view(-1, batch_dict['forward_opi_answer_start'].size(-1)),
batch_dict['forward_opi_answer_end'].view(-1, batch_dict['forward_opi_answer_end'].size(-1)))
f_opi_domain_loss = utils.calculate_domain_loss(f_opi_domain_scores, domain_label)
# q2_b
zero = torch.zeros_like(batch_dict['backward_asp_answer_start'].view(-1, batch_dict['backward_asp_answer_start'].size(-1)))
domain_label = torch.where(batch_dict['backward_asp_answer_start'].view(-1, batch_dict['backward_asp_answer_start'].size(-1)) != -1, zero + Domain.Source, zero - 1)
ret = model(
batch_dict['backward_asp_query'].view(-1, batch_dict['backward_asp_query'].size(-1)),
batch_dict['backward_asp_query_mask'].view(-1, batch_dict['backward_asp_query_mask'].size(-1)),
batch_dict['backward_asp_query_seg'].view(-1, batch_dict['backward_asp_query_seg'].size(-1)),
0, alpha=alpha, domain=domain_label)
(b_asp_start_scores, b_asp_end_scores), b_asp_domain_scores = ret['cls'], ret['domain_scores']
b_asp_loss = utils.calculate_entity_loss(b_asp_start_scores, b_asp_end_scores,
batch_dict['backward_asp_answer_start'].view(-1, batch_dict['backward_asp_answer_start'].size(-1)),
batch_dict['backward_asp_answer_end'].view(-1, batch_dict['backward_asp_answer_end'].size(-1)))
b_asp_domain_loss = utils.calculate_domain_loss(b_asp_domain_scores, domain_label)
# q_3
zero = torch.zeros_like(batch_dict['sentiment_answer'].view(-1))
domain_label = torch.where(batch_dict['sentiment_answer'].view(-1) != -1, zero + Domain.Source, zero - 1)
ret = model(batch_dict['sentiment_query'].view(-1, batch_dict['sentiment_query'].size(-1)),
batch_dict['sentiment_query_mask'].view(-1, batch_dict['sentiment_query_mask'].size(-1)),
batch_dict['sentiment_query_seg'].view(-1, batch_dict['sentiment_query_seg'].size(-1)),
1, alpha=alpha, domain=domain_label)
sentiment_scores, sentiment_domain_scores = ret['cls'], ret['domain_scores']
sentiment_loss = utils.calculate_sentiment_loss(sentiment_scores, batch_dict['sentiment_answer'].view(-1))
sentiment_domain_loss = utils.calculate_sentiment_domain_loss(sentiment_domain_scores, domain_label)
# loss
loss_sum = f_asp_loss + f_opi_loss + b_opi_loss + b_asp_loss + args.beta*sentiment_loss
if step % args.ad_steps == 0:
domain_loss = 0.2 * (f_asp_domain_loss + f_opi_domain_loss + b_opi_domain_loss + b_asp_domain_loss + sentiment_domain_loss)
try:
unlabeled = it.next()
except StopIteration:
it = iter(unlabled_dataloader)
unlabeled = it.next()
# for i in range(len(unlabeled)):
# unlabeled[i] = unlabeled[i].cuda()
for k in unlabeled:
unlabeled[k] = unlabeled[k].cuda()
# domain_scores = model(unlabeled[0], unlabeled[2], unlabeled[1], step=-1, alpha=alpha, domain=unlabeled[3])['domain_scores']
domain_scores = model(query_tensor=unlabeled['input_ids'], query_mask=unlabeled['attention_mask'], query_seg=unlabeled['token_type_ids'], step=0, alpha=alpha, domain=unlabeled['domain_label'])['domain_scores']
target_domain_loss = utils.calculate_domain_loss(domain_scores, unlabeled['domain_label'])
domain_loss += target_domain_loss
loss_sum += domain_loss
loss_sum.backward()
optimizer.step()
scheduler.step()
# train logger
if batch_index % 10 == 0:
logger.info('Epoch:[{}/{}]\t Batch:[{}/{}]\t Loss Sum:{}\t '
'forward Loss:{};{}\t backward Loss:{};{}\t Sentiment Loss:{}'.
format(epoch, args.epoch_num, batch_index, batch_num_train,
round(loss_sum.item(), 4),
round(f_asp_loss.item(), 4), round(f_opi_loss.item(), 4),
round(b_asp_loss.item(), 4), round(b_opi_loss.item(), 4),
round(sentiment_loss.item(), 4)))
# validation
batch_generator_dev = Data.generate_fi_batches(dataset=dev_dataset, batch_size=1, shuffle=False,
ifgpu=args.ifgpu)
metric, _ = test(model, tokenize, batch_generator_dev, dev_standard, args.inference_beta, logger)
f1 = metric['triplet']['f1']
# save model and optimizer
if f1 > best_dev_f1:
best_dev_f1 = f1
logger.info('best dev f1: {}\t epoch: {}'.format(best_dev_f1, epoch))
logger.info('Model saved after epoch {}'.format(epoch))
state = {'net': model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch}
torch.save(state, os.path.join(args.model_dir, args.model_name + '.pt'))
# test
# batch_generator_test = Data.generate_fi_batches(dataset=test_dataset, batch_size=1, shuffle=False,
# ifgpu=args.ifgpu)
# f1 = test(model, tokenize, batch_generator_test, test_standard, args.inference_beta, logger)
# logger.info('start testing......')
# test_dataset = Data.ReviewDataset(train_data, dev_data, test_data, 'test')
# # load checkpoint
# logger.info('loading checkpoint......')
# checkpoint = torch.load(args.save_model_path)
# model.load_state_dict(checkpoint['net'])
# model.eval()
# batch_generator_test = Data.generate_fi_batches(dataset=test_dataset, batch_size=1, shuffle=False,
# ifgpu=args.ifgpu)
# # eval
# logger.info('evaluating......')
# f1 = test(model, tokenize, batch_generator_test, test_standard, args.beta, logger)
else:
logger.info('Error mode!')
exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Bidirectional MRC-based sentiment triplet extraction')
parser.add_argument('--data_dir', type=str, default="./data/preprocess/")
parser.add_argument('--log_dir', type=str, default="./log/")
parser.add_argument('--tmp_log', type=str)
parser.add_argument('--source', type=str)
parser.add_argument('--target', type=str)
parser.add_argument('--mode', type=str, default="train", choices=["train", "test"])
parser.add_argument('--reload', type=bool, default=False)
parser.add_argument('--checkpoint_path', type=str, default="./model/14lap/modelFinal.model")
parser.add_argument('--model_dir', type=str, default="./model/")
parser.add_argument('--model_name', type=str, default="1")
# model hyper-parameter
parser.add_argument('--bert_model_type', type=str, default="bert-base-uncased")
parser.add_argument('--hidden_size', type=int, default=768)
parser.add_argument('--inference_beta', type=float, default=0.8)
# training hyper-parameter
parser.add_argument('--ifgpu', type=bool, default=True)
parser.add_argument('--epoch_num', type=int, default=40)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--tuning_bert_rate', type=float, default=1e-5)
parser.add_argument('--warm_up', type=float, default=0.1)
parser.add_argument('--beta', type=float, default=1)
parser.add_argument('--unlabeled_data', type=str, default='../amazon')
parser.add_argument('--ad_steps', type=int)
args = parser.parse_args()
if os.path.exists(os.path.join(args.log_dir, args.model_name, 'test_metric.txt')):
print('-' * 20, 'file exists', '-' * 20)
else:
t = BertTokenizer.from_pretrained(args.bert_model_type)
main(args, t)
| 40,827 | 54.928767 | 229 | py |
DMASTE | DMASTE-main/BMRC/DANN_Model.py | # coding: UTF-8
# @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
from transformers import BertTokenizer, BertModel, BertConfig
import torch.nn as nn
from functions import ReverseLayerF
class BERTModel(nn.Module):
def __init__(self, args):
hidden_size = args.hidden_size
super(BERTModel, self).__init__()
# BERT模型
# if args.bert_model_type == 'bert-base-uncased':
self._bert = BertModel.from_pretrained(args.bert_model_type)
self._tokenizer = BertTokenizer.from_pretrained(args.bert_model_type)
print('Bertbase model loaded')
# else:
# raise KeyError('Config.args.bert_model_type should be bert-based-uncased. ')
self.classifier_start = nn.Linear(hidden_size, 2)
self.classifier_end = nn.Linear(hidden_size, 2)
self._classifier_sentiment = nn.Linear(hidden_size, 3)
self.domain_classifier = nn.Linear(hidden_size, 2)
def forward(self, query_tensor, query_mask, query_seg, step, alpha=None, domain=None):
hidden_states = self._bert(query_tensor, attention_mask=query_mask, token_type_ids=query_seg)[0]
ret = dict()
if step == 0: # predict entity
out_scores_start = self.classifier_start(hidden_states)
out_scores_end = self.classifier_end(hidden_states)
ret['cls'] = [out_scores_start, out_scores_end]
# return out_scores_start, out_scores_end
else: # predict sentiment
cls_hidden_states = hidden_states[:, 0, :]
cls_hidden_scores = self._classifier_sentiment(cls_hidden_states)
ret['cls'] = cls_hidden_scores
# return cls_hidden_scores
if domain is not None:
reverse_feature = ReverseLayerF.apply(hidden_states if step == 0 else hidden_states[:, 0, :], alpha)
domain_scores = self.domain_classifier(reverse_feature)
ret['domain_scores'] = domain_scores
return ret
else:
return ret['cls']
| 2,062 | 38.673077 | 112 | py |
DMASTE | DMASTE-main/BMRC/Data.py | # coding: UTF-8
# @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
from torch.utils.data import Dataset, DataLoader
import numpy as np
class OriginalDataset(Dataset):
def __init__(self, pre_data):
self._forward_asp_query = pre_data['_forward_asp_query']
self._forward_opi_query = pre_data['_forward_opi_query']
self._forward_asp_answer_start = pre_data['_forward_asp_answer_start']
self._forward_asp_answer_end = pre_data['_forward_asp_answer_end']
self._forward_opi_answer_start = pre_data['_forward_opi_answer_start']
self._forward_opi_answer_end = pre_data['_forward_opi_answer_end']
self._forward_asp_query_mask = pre_data['_forward_asp_query_mask']
self._forward_opi_query_mask = pre_data['_forward_opi_query_mask']
self._forward_asp_query_seg = pre_data['_forward_asp_query_seg']
self._forward_opi_query_seg = pre_data['_forward_opi_query_seg']
self._backward_asp_query = pre_data['_backward_asp_query']
self._backward_opi_query = pre_data['_backward_opi_query']
self._backward_asp_answer_start = pre_data['_backward_asp_answer_start']
self._backward_asp_answer_end = pre_data['_backward_asp_answer_end']
self._backward_opi_answer_start = pre_data['_backward_opi_answer_start']
self._backward_opi_answer_end = pre_data['_backward_opi_answer_end']
self._backward_asp_query_mask = pre_data['_backward_asp_query_mask']
self._backward_opi_query_mask = pre_data['_backward_opi_query_mask']
self._backward_asp_query_seg = pre_data['_backward_asp_query_seg']
self._backward_opi_query_seg = pre_data['_backward_opi_query_seg']
self._sentiment_query = pre_data['_sentiment_query']
self._sentiment_answer = pre_data['_sentiment_answer']
self._sentiment_query_mask = pre_data['_sentiment_query_mask']
self._sentiment_query_seg = pre_data['_sentiment_query_seg']
self._aspect_num = pre_data['_aspect_num']
self._opinion_num = pre_data['_opinion_num']
class ReviewDataset(Dataset):
def __init__(self, train, dev, test, set):
'''
评论数据集
:param train: list, training set of 14 lap, 14 res, 15 res, 16 res
:param dev: list, the same
:param test: list, the same
'''
self._train_set = train
self._dev_set = dev
self._test_set = test
if set == 'train':
self._dataset = self._train_set
elif set == 'dev':
self._dataset = self._dev_set
elif set == 'test':
self._dataset = self._test_set
self._forward_asp_query = self._dataset._forward_asp_query
self._forward_opi_query = self._dataset._forward_opi_query
self._forward_asp_answer_start = self._dataset._forward_asp_answer_start
self._forward_asp_answer_end = self._dataset._forward_asp_answer_end
self._forward_opi_answer_start = self._dataset._forward_opi_answer_start
self._forward_opi_answer_end = self._dataset._forward_opi_answer_end
self._forward_asp_query_mask = self._dataset._forward_asp_query_mask
self._forward_opi_query_mask = self._dataset._forward_opi_query_mask
self._forward_asp_query_seg = self._dataset._forward_asp_query_seg
self._forward_opi_query_seg = self._dataset._forward_opi_query_seg
self._backward_asp_query = self._dataset._backward_asp_query
self._backward_opi_query = self._dataset._backward_opi_query
self._backward_asp_answer_start = self._dataset._backward_asp_answer_start
self._backward_asp_answer_end = self._dataset._backward_asp_answer_end
self._backward_opi_answer_start = self._dataset._backward_opi_answer_start
self._backward_opi_answer_end = self._dataset._backward_opi_answer_end
self._backward_asp_query_mask = self._dataset._backward_asp_query_mask
self._backward_opi_query_mask = self._dataset._backward_opi_query_mask
self._backward_asp_query_seg = self._dataset._backward_asp_query_seg
self._backward_opi_query_seg = self._dataset._backward_opi_query_seg
self._sentiment_query = self._dataset._sentiment_query
self._sentiment_answer = self._dataset._sentiment_answer
self._sentiment_query_mask = self._dataset._sentiment_query_mask
self._sentiment_query_seg = self._dataset._sentiment_query_seg
self._aspect_num = self._dataset._aspect_num
self._opinion_num = self._dataset._opinion_num
def get_batch_num(self, batch_size):
return len(self._forward_asp_query) // batch_size
def __len__(self):
return len(self._forward_asp_query)
def __getitem__(self, item):
forward_asp_query = self._forward_asp_query[item]
forward_opi_query = self._forward_opi_query[item]
forward_asp_answer_start = self._forward_asp_answer_start[item]
forward_asp_answer_end = self._forward_asp_answer_end[item]
forward_opi_answer_start = self._forward_opi_answer_start[item]
forward_opi_answer_end = self._forward_opi_answer_end[item]
forward_asp_query_mask = self._forward_asp_query_mask[item]
forward_opi_query_mask = self._forward_opi_query_mask[item]
forward_asp_query_seg = self._forward_asp_query_seg[item]
forward_opi_query_seg = self._forward_opi_query_seg[item]
backward_asp_query = self._backward_asp_query[item]
backward_opi_query = self._backward_opi_query[item]
backward_asp_answer_start = self._backward_asp_answer_start[item]
backward_asp_answer_end = self._backward_asp_answer_end[item]
backward_opi_answer_start = self._backward_opi_answer_start[item]
backward_opi_answer_end = self._backward_opi_answer_end[item]
backward_asp_query_mask = self._backward_asp_query_mask[item]
backward_opi_query_mask = self._backward_opi_query_mask[item]
backward_asp_query_seg = self._backward_asp_query_seg[item]
backward_opi_query_seg = self._backward_opi_query_seg[item]
sentiment_query = self._sentiment_query[item]
sentiment_answer = self._sentiment_answer[item]
sentiment_query_mask = self._sentiment_query_mask[item]
sentiment_query_seg = self._sentiment_query_seg[item]
aspect_num = self._aspect_num[item]
opinion_num = self._opinion_num[item]
return {"forward_asp_query": np.array(forward_asp_query),
"forward_opi_query": np.array(forward_opi_query),
"forward_asp_answer_start": np.array(forward_asp_answer_start),
"forward_asp_answer_end": np.array(forward_asp_answer_end),
"forward_opi_answer_start": np.array(forward_opi_answer_start),
"forward_opi_answer_end": np.array(forward_opi_answer_end),
"forward_asp_query_mask": np.array(forward_asp_query_mask),
"forward_opi_query_mask": np.array(forward_opi_query_mask),
"forward_asp_query_seg": np.array(forward_asp_query_seg),
"forward_opi_query_seg": np.array(forward_opi_query_seg),
"backward_asp_query": np.array(backward_asp_query),
"backward_opi_query": np.array(backward_opi_query),
"backward_asp_answer_start": np.array(backward_asp_answer_start),
"backward_asp_answer_end": np.array(backward_asp_answer_end),
"backward_opi_answer_start": np.array(backward_opi_answer_start),
"backward_opi_answer_end": np.array(backward_opi_answer_end),
"backward_asp_query_mask": np.array(backward_asp_query_mask),
"backward_opi_query_mask": np.array(backward_opi_query_mask),
"backward_asp_query_seg": np.array(backward_asp_query_seg),
"backward_opi_query_seg": np.array(backward_opi_query_seg),
"sentiment_query": np.array(sentiment_query),
"sentiment_answer": np.array(sentiment_answer),
"sentiment_query_mask": np.array(sentiment_query_mask),
"sentiment_query_seg": np.array(sentiment_query_seg),
"aspect_num": np.array(aspect_num),
"opinion_num": np.array(opinion_num)
}
def generate_fi_batches(dataset, batch_size, shuffle=True, drop_last=True, ifgpu=True):
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
out_dict = {}
for name, tensor in data_dict.items():
if ifgpu:
out_dict[name] = data_dict[name].cuda()
else:
out_dict[name] = data_dict[name]
yield out_dict
| 8,838 | 53.561728 | 87 | py |
DMASTE | DMASTE-main/BMRC/functions.py | from torch.autograd import Function
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
output = grad_output.neg() * ctx.alpha
return output, None | 305 | 18.125 | 46 | py |
DMASTE | DMASTE-main/BMRC/dataProcess.py | # @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
import pickle
import torch
import os
class dual_sample(object):
def __init__(self,
original_sample,
text,
forward_querys,
forward_answers,
backward_querys,
backward_answers,
sentiment_querys,
sentiment_answers):
self.original_sample = original_sample #
self.text = text #
self.forward_querys=forward_querys
self.forward_answers=forward_answers
self.backward_querys=backward_querys
self.backward_answers=backward_answers
self.sentiment_querys=sentiment_querys
self.sentiment_answers=sentiment_answers
def get_text(lines):
# Line sample:
# It is always reliable , never bugged and responds well .####It=O is=O always=O reliable=O ,=O never=O bugged=O and=O responds=T-POS well=O .=O####It=O is=O always=O reliable=O ,=O never=O bugged=O and=O responds=O well=S .=O
text_list = []
aspect_list = []
opinion_list = []
triplet_data = []
sentiment_map = {'POS': 0, 'NEG': 1, 'NEU': 2}
for f in lines:
temp = f.split("####")
assert len(temp) == 2
word_list = temp[0].split()
ts = eval(temp[1])
ts = [(t[0], t[1], sentiment_map[t[2]])for t in ts]
triplet_data.append(ts)
# aspect_label_list = [t.split("=")[-1] for t in temp[1].split()]
# opinion_label_list = [t.split("=")[-1] for t in temp[2].split()]
# aspect_label_list = ['O']
# assert len(word_list) == len(aspect_label_list) == len(opinion_label_list)
text_list.append(word_list)
# aspect_list.append(aspect_label_list)
# opinion_list.append(opinion_label_list)
return text_list, aspect_list, opinion_list, triplet_data
def valid_data(triplet, aspect, opinion):
for t in triplet[0][0]:
assert aspect[t] != ["O"]
for t in triplet[0][1]:
assert opinion[t] != ["O"]
def fusion_dual_triplet(triplet):
triplet_aspect = []
triplet_opinion = []
triplet_sentiment = []
dual_opinion = []
dual_aspect = []
for t in triplet:
if t[0] not in triplet_aspect:
triplet_aspect.append(t[0])
triplet_opinion.append([t[1]])
triplet_sentiment.append(t[2])
else:
idx = triplet_aspect.index(t[0])
triplet_opinion[idx].append(t[1])
# assert triplet_sentiment[idx] == sentiment_map[t[2]], f'{triplet_sentiment[idx]} {sentiment_map[t[2]]}'
if t[1] not in dual_opinion:
dual_opinion.append(t[1])
dual_aspect.append([t[0]])
else:
idx = dual_opinion.index(t[1])
dual_aspect[idx].append(t[0])
return triplet_aspect, triplet_opinion, triplet_sentiment, dual_opinion, dual_aspect
if __name__ == '__main__':
home_path = "../ia-dataset/"
dataset_name_list = os.listdir(home_path)
dataset_name_list = [x for x in dataset_name_list if '.' not in x]
print(dataset_name_list)
# dataset_type_list = ["train", "test", "dev"]
for dataset_name in dataset_name_list:
dataset_dir = os.path.join(home_path, dataset_name)
dataset_type_list = os.listdir(dataset_dir)
dataset_type_list = [x.split('.')[0] for x in dataset_type_list]
for dataset_type in dataset_type_list:
output_path = "./data/preprocess/" + dataset_name + "_" + dataset_type + "_dual.pt"
# read triple
# f = open(home_path + dataset_name + "/" + dataset_name + "_pair/" + dataset_type + "_pair.pkl", "rb")
# triple_data = pickle.load(f)
# f.close()
# read text
f = open(home_path + dataset_name + "/" + dataset_type + ".txt", "r", encoding="utf-8")
text_lines = f.readlines()
f.close()
# get text
text_list, _, _, triple_data = get_text(text_lines)
sample_list = []
for k in range(len(text_list)):
triplet = triple_data[k]
text = text_list[k]
# valid_data(triplet, aspect_list[k], opinion_list[k])
triplet_aspect, triplet_opinion, triplet_sentiment, dual_opinion, dual_aspect = fusion_dual_triplet(triplet)
forward_query_list = []
backward_query_list = []
sentiment_query_list = []
forward_answer_list = []
backward_answer_list = []
sentiment_answer_list = []
forward_query_list.append(["What", "aspects", "?"])
start = [0] * len(text)
end = [0] * len(text)
for ta in triplet_aspect:
start[ta[0]] = 1
end[ta[-1]] = 1
forward_answer_list.append([start, end])
backward_query_list.append(["What", "opinions", "?"])
start = [0] * len(text)
end = [0] * len(text)
for to in dual_opinion:
start[to[0]] = 1
end[to[-1]] = 1
backward_answer_list.append([start, end])
for idx in range(len(triplet_aspect)):
ta = triplet_aspect[idx]
# opinion query
query = ["What", "opinion", "given", "the", "aspect"] + text[ta[0]:ta[-1] + 1] + ["?"]
forward_query_list.append(query)
start = [0] * len(text)
end = [0] * len(text)
for to in triplet_opinion[idx]:
start[to[0]] = 1
end[to[-1]] = 1
forward_answer_list.append([start, end])
# sentiment query
query = ["What", "sentiment", "given", "the", "aspect"] + text[ta[0]:ta[-1] + 1] + ["and", "the",
"opinion"]
for idy in range(len(triplet_opinion[idx]) - 1):
to = triplet_opinion[idx][idy]
query += text[to[0]:to[-1] + 1] + ["/"]
to = triplet_opinion[idx][-1]
query += text[to[0]:to[-1] + 1] + ["?"]
sentiment_query_list.append(query)
sentiment_answer_list.append(triplet_sentiment[idx])
for idx in range(len(dual_opinion)):
ta = dual_opinion[idx]
# opinion query
query = ["What", "aspect", "does", "the", "opinion"] + text[ta[0]:ta[-1] + 1] + ["describe", "?"]
backward_query_list.append(query)
start = [0] * len(text)
end = [0] * len(text)
for to in dual_aspect[idx]:
start[to[0]] = 1
end[to[-1]] = 1
backward_answer_list.append([start, end])
temp_sample = dual_sample(text_lines[k], text, forward_query_list, forward_answer_list, backward_query_list, backward_answer_list, sentiment_query_list, sentiment_answer_list)
sample_list.append(temp_sample)
torch.save(sample_list, output_path)
| 7,431 | 42.209302 | 230 | py |
DMASTE | DMASTE-main/BMRC/utils.py | # coding: UTF-8
# @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
import torch
from torch.nn import functional as F
import logging
def normalize_size(tensor):
if len(tensor.size()) == 3:
tensor = tensor.contiguous().view(-1, tensor.size(2))
elif len(tensor.size()) == 2:
tensor = tensor.contiguous().view(-1)
return tensor
def calculate_entity_loss(pred_start, pred_end, gold_start, gold_end):
pred_start = normalize_size(pred_start)
pred_end = normalize_size(pred_end)
gold_start = normalize_size(gold_start)
gold_end = normalize_size(gold_end)
weight = torch.tensor([1, 3]).float().cuda()
loss_start = F.cross_entropy(pred_start, gold_start.long(), size_average=False, weight=weight, ignore_index=-1)
loss_end = F.cross_entropy(pred_end, gold_end.long(), size_average=False, weight=weight, ignore_index=-1)
return 0.5 * loss_start + 0.5 * loss_end
def calculate_domain_loss(pred_domain, gold_domain):
return F.cross_entropy(normalize_size(pred_domain), normalize_size(gold_domain).long(), size_average=False, ignore_index=-1)
def calculate_sentiment_domain_loss(pred_domain, gold_domain):
return F.cross_entropy(pred_domain, gold_domain.long(), size_average=False, ignore_index=-1)
def calculate_sentiment_loss(pred_sentiment, gold_sentiment):
return F.cross_entropy(pred_sentiment, gold_sentiment.long(), size_average=False, ignore_index=-1)
def get_logger(filename, verbosity=1, name=None):
level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
formatter = logging.Formatter(
"[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] %(message)s"
)
logger = logging.getLogger(name)
logger.setLevel(level_dict[verbosity])
fh = logging.FileHandler(filename, "w")
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
def filter_prob(f_asp_prob, f_opi_prob, f_opi_start_index, f_opi_end_index, beta):
filter_start = []
filter_end = []
for idx in range(len(f_opi_prob)):
if f_asp_prob*f_opi_prob[idx]>=beta:
filter_start.append(f_opi_start_index[idx])
filter_end.append(f_opi_end_index[idx])
return filter_start, filter_end
def filter_unpaired(start_prob, end_prob, start, end):
filtered_start = []
filtered_end = []
filtered_prob = []
if len(start)>0 and len(end)>0:
length = start[-1]+1 if start[-1]>=end[-1] else end[-1]+1
temp_seq = [0]*length
for s in start:
temp_seq[s]+=1
for e in end:
temp_seq[e]+=2
last_start = -1
for idx in range(len(temp_seq)):
assert temp_seq[idx]<4
if temp_seq[idx] == 1:
last_start = idx
elif temp_seq[idx] == 2:
if last_start!=-1 and idx-last_start<5:
filtered_start.append(last_start)
filtered_end.append(idx)
prob = start_prob[start.index(last_start)] * end_prob[end.index(idx)]
filtered_prob.append(prob)
last_start = -1
elif temp_seq[idx] == 3:
filtered_start.append(idx)
filtered_end.append(idx)
prob = start_prob[start.index(idx)] * end_prob[end.index(idx)]
filtered_prob.append(prob)
last_start = -1
return filtered_start, filtered_end, filtered_prob | 3,575 | 35.121212 | 128 | py |
DMASTE | DMASTE-main/BMRC/data_utils.py | from torch.utils.data import Dataset
import random
import torch
class Domain:
Target = 1
Source = 0
class Unlabeled_Dataset(Dataset):
def __init__(self, path, tokenizer, max_len=256):
self.data = []
self.max_len = max_len
with open(path) as f:
for line in f:
sent = line.split('####')[-1].strip()
words = ['[ia]'] + sent.split()
idx_list1 = random.sample(range(len(words)), 4)
idx_list2 = random.sample(range(1, 6), 4)
sample_words = [words[i: i + j] for i, j in zip(idx_list1, idx_list2)]
query_list = [["What", "aspects", "?"],
["What", "opinions", "?"],
["What", "opinion", "given", "the", "aspect"] + sample_words[0] + ["?"],
["What", "sentiment", "given", "the", "aspect"] + sample_words[1] + ["and", "the", "opinion"] + sample_words[2] + ['?'],
["What", "aspect", "does", "the", "opinion"] + sample_words[3] + ["describe", "?"]]
for query in query_list:
input_token = ['[CLS]'] + query + ['[SEP]'] + words
seg = [0] * (len(query) + 2) + [1] * len(words)
domain_label = [-1] * (len(query) + 2) + [Domain.Target] * len(words)
input_ids = tokenizer.convert_tokens_to_ids([word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in input_token])
self.data.append({'input_ids': input_ids, 'token_type_ids': seg, 'domain_label': domain_label})
def __getitem__(self, i):
self.data[i]['attention_mask'] = [1] * len(self.data[i]['input_ids'])
ret = dict()
for k in self.data[i]:
ret[k] = self.data[i][k][: self.max_len]
pad = 0 if k != 'domain_label' else -1
ret[k] = ret[k] + [pad] * (self.max_len - len(ret[k]))
ret[k] = torch.tensor(ret[k])
# return ret['input_ids'], ret['token_type_ids'], ret['attention_mask'], ret['domain_label']
return ret
def __len__(self):
return len(self.data)
if __name__ == '__main__':
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
dataset = Unlabeled_Dataset('../amazon/home.txt', tokenizer)
for i in range(10):
print(dataset[i])
print(tokenizer.convert_ids_to_tokens(dataset[i]['input_ids']))
print()
| 2,601 | 45.464286 | 150 | py |
DMASTE | DMASTE-main/BMRC/makeData_dual.py | # @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
import torch
from torch.utils.data import Dataset
from transformers import BertTokenizer
import numpy as np
_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
class dual_sample(object):
def __init__(self,
original_sample,
text,
forward_querys,
forward_answers,
backward_querys,
backward_answers,
sentiment_querys,
sentiment_answers):
self.original_sample = original_sample
self.text = text #
self.forward_querys = forward_querys
self.forward_answers = forward_answers
self.backward_querys = backward_querys
self.backward_answers = backward_answers
self.sentiment_querys = sentiment_querys
self.sentiment_answers = sentiment_answers
class sample_tokenized(object):
def __init__(self,
original_sample,
forward_querys,
forward_answers,
backward_querys,
backward_answers,
sentiment_querys,
sentiment_answers,
forward_seg,
backward_seg,
sentiment_seg):
self.original_sample = original_sample
self.forward_querys = forward_querys
self.forward_answers = forward_answers
self.backward_querys = backward_querys
self.backward_answers = backward_answers
self.sentiment_querys = sentiment_querys
self.sentiment_answers = sentiment_answers
self.forward_seg = forward_seg
self.backward_seg = backward_seg
self.sentiment_seg = sentiment_seg
class OriginalDataset(Dataset):
def __init__(self, pre_data):
self._forward_asp_query = pre_data['_forward_asp_query']
self._forward_opi_query = pre_data['_forward_opi_query'] # [max_aspect_num, max_opinion_query_length]
self._forward_asp_answer_start = pre_data['_forward_asp_answer_start']
self._forward_asp_answer_end = pre_data['_forward_asp_answer_end']
self._forward_opi_answer_start = pre_data['_forward_opi_answer_start']
self._forward_opi_answer_end = pre_data['_forward_opi_answer_end']
self._forward_asp_query_mask = pre_data['_forward_asp_query_mask'] # [max_aspect_num, max_opinion_query_length]
self._forward_opi_query_mask = pre_data['_forward_opi_query_mask'] # [max_aspect_num, max_opinion_query_length]
self._forward_asp_query_seg = pre_data['_forward_asp_query_seg'] # [max_aspect_num, max_opinion_query_length]
self._forward_opi_query_seg = pre_data['_forward_opi_query_seg'] # [max_aspect_num, max_opinion_query_length]
self._backward_asp_query = pre_data['_backward_asp_query']
self._backward_opi_query = pre_data['_backward_opi_query'] # [max_aspect_num, max_opinion_query_length]
self._backward_asp_answer_start = pre_data['_backward_asp_answer_start']
self._backward_asp_answer_end = pre_data['_backward_asp_answer_end']
self._backward_opi_answer_start = pre_data['_backward_opi_answer_start']
self._backward_opi_answer_end = pre_data['_backward_opi_answer_end']
self._backward_asp_query_mask = pre_data[
'_backward_asp_query_mask'] # [max_aspect_num, max_opinion_query_length]
self._backward_opi_query_mask = pre_data[
'_backward_opi_query_mask'] # [max_aspect_num, max_opinion_query_length]
self._backward_asp_query_seg = pre_data['_backward_asp_query_seg'] # [max_aspect_num, max_opinion_query_length]
self._backward_opi_query_seg = pre_data['_backward_opi_query_seg'] # [max_aspect_num, max_opinion_query_length]
self._sentiment_query = pre_data['_sentiment_query'] # [max_aspect_num, max_sentiment_query_length]
self._sentiment_answer = pre_data['_sentiment_answer']
self._sentiment_query_mask = pre_data['_sentiment_query_mask'] # [max_aspect_num, max_sentiment_query_length]
self._sentiment_query_seg = pre_data['_sentiment_query_seg'] # [max_aspect_num, max_sentiment_query_length]
self._aspect_num = pre_data['_aspect_num']
self._opinion_num = pre_data['_opinion_num']
def pre_processing(sample_list, max_len):
_forward_asp_query = []
_forward_opi_query = []
_forward_asp_answer_start = []
_forward_asp_answer_end = []
_forward_opi_answer_start = []
_forward_opi_answer_end = []
_forward_asp_query_mask = []
_forward_opi_query_mask = []
_forward_asp_query_seg = []
_forward_opi_query_seg = []
_backward_asp_query = []
_backward_opi_query = []
_backward_asp_answer_start = []
_backward_asp_answer_end = []
_backward_opi_answer_start = []
_backward_opi_answer_end = []
_backward_asp_query_mask = []
_backward_opi_query_mask = []
_backward_asp_query_seg = []
_backward_opi_query_seg = []
_sentiment_query = []
_sentiment_answer = []
_sentiment_query_mask = []
_sentiment_query_seg = []
_aspect_num = []
_opinion_num = []
for instance in sample_list:
f_query_list = instance.forward_querys
f_answer_list = instance.forward_answers
f_query_seg_list = instance.forward_seg
b_query_list = instance.backward_querys
b_answer_list = instance.backward_answers
b_query_seg_list = instance.backward_seg
s_query_list = instance.sentiment_querys
s_answer_list = instance.sentiment_answers
s_query_seg_list = instance.sentiment_seg
# _aspect_num: 1/2/3/...
_aspect_num.append(int(len(f_query_list) - 1))
_opinion_num.append(int(len(b_query_list) - 1))
# Forward
# Aspect
# query
assert len(f_query_list[0]) == len(f_answer_list[0][0]) == len(f_answer_list[0][1])
f_asp_pad_num = max_len['mfor_asp_len'] - len(f_query_list[0])
_forward_asp_query.append(_tokenizer.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in f_query_list[0]]))
_forward_asp_query[-1].extend([0] * f_asp_pad_num)
# query_mask
_forward_asp_query_mask.append([1 for i in range(len(f_query_list[0]))])
_forward_asp_query_mask[-1].extend([0] * f_asp_pad_num)
# answer
_forward_asp_answer_start.append(f_answer_list[0][0])
_forward_asp_answer_start[-1].extend([-1] * f_asp_pad_num)
_forward_asp_answer_end.append(f_answer_list[0][1])
_forward_asp_answer_end[-1].extend([-1] * f_asp_pad_num)
# seg
_forward_asp_query_seg.append(f_query_seg_list[0])
_forward_asp_query_seg[-1].extend([1] * f_asp_pad_num)
# Opinion
single_opinion_query = []
single_opinion_query_mask = []
single_opinion_query_seg = []
single_opinion_answer_start = []
single_opinion_answer_end = []
for i in range(1, len(f_query_list)):
assert len(f_query_list[i]) == len(f_answer_list[i][0]) == len(f_answer_list[i][1])
pad_num = max_len['mfor_opi_len'] - len(f_query_list[i])
# query
single_opinion_query.append(_tokenizer.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in f_query_list[i]]))
single_opinion_query[-1].extend([0] * pad_num)
# query_mask
single_opinion_query_mask.append([1 for i in range(len(f_query_list[i]))])
single_opinion_query_mask[-1].extend([0] * pad_num)
# query_seg
single_opinion_query_seg.append(f_query_seg_list[i])
single_opinion_query_seg[-1].extend([1] * pad_num)
# answer
single_opinion_answer_start.append(f_answer_list[i][0])
single_opinion_answer_start[-1].extend([-1] * pad_num)
single_opinion_answer_end.append(f_answer_list[i][1])
single_opinion_answer_end[-1].extend([-1] * pad_num)
# PAD: max_aspect_num
_forward_opi_query.append(single_opinion_query)
_forward_opi_query[-1].extend([[0 for i in range(max_len['mfor_opi_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_forward_opi_query_mask.append(single_opinion_query_mask)
_forward_opi_query_mask[-1].extend([[0 for i in range(max_len['mfor_opi_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_forward_opi_query_seg.append(single_opinion_query_seg)
_forward_opi_query_seg[-1].extend([[0 for i in range(max_len['mfor_opi_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_forward_opi_answer_start.append(single_opinion_answer_start)
_forward_opi_answer_start[-1].extend([[-1 for i in range(max_len['mfor_opi_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_forward_opi_answer_end.append(single_opinion_answer_end)
_forward_opi_answer_end[-1].extend([[-1 for i in range(max_len['mfor_opi_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
# Backward
# opinion
# query
assert len(b_query_list[0]) == len(b_answer_list[0][0]) == len(b_answer_list[0][1])
b_opi_pad_num = max_len['mback_opi_len'] - len(b_query_list[0])
_backward_opi_query.append(_tokenizer.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in b_query_list[0]]))
_backward_opi_query[-1].extend([0] * b_opi_pad_num)
# mask
_backward_opi_query_mask.append([1 for i in range(len(b_query_list[0]))])
_backward_opi_query_mask[-1].extend([0] * b_opi_pad_num)
# answer
_backward_opi_answer_start.append(b_answer_list[0][0])
_backward_opi_answer_start[-1].extend([-1] * b_opi_pad_num)
_backward_opi_answer_end.append(b_answer_list[0][1])
_backward_opi_answer_end[-1].extend([-1] * b_opi_pad_num)
# seg
_backward_opi_query_seg.append(b_query_seg_list[0])
_backward_opi_query_seg[-1].extend([1] * b_opi_pad_num)
# Aspect
single_aspect_query = []
single_aspect_query_mask = []
single_aspect_query_seg = []
single_aspect_answer_start = []
single_aspect_answer_end = []
for i in range(1, len(b_query_list)):
assert len(b_query_list[i]) == len(b_answer_list[i][0]) == len(b_answer_list[i][1])
pad_num = max_len['mback_asp_len'] - len(b_query_list[i])
# query
single_aspect_query.append(_tokenizer.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in b_query_list[i]]))
single_aspect_query[-1].extend([0] * pad_num)
# query_mask
single_aspect_query_mask.append([1 for i in range(len(b_query_list[i]))])
single_aspect_query_mask[-1].extend([0] * pad_num)
# query_seg
single_aspect_query_seg.append(b_query_seg_list[i])
single_aspect_query_seg[-1].extend([1] * pad_num)
# answer
single_aspect_answer_start.append(b_answer_list[i][0])
single_aspect_answer_start[-1].extend([-1] * pad_num)
single_aspect_answer_end.append(b_answer_list[i][1])
single_aspect_answer_end[-1].extend([-1] * pad_num)
# PAD: max_opinion_num
_backward_asp_query.append(single_aspect_query)
_backward_asp_query[-1].extend([[0 for i in range(max_len['mback_asp_len'])]] * (max_len['max_opinion_num'] - _opinion_num[-1]))
_backward_asp_query_mask.append(single_aspect_query_mask)
_backward_asp_query_mask[-1].extend([[0 for i in range(max_len['mback_asp_len'])]] * (max_len['max_opinion_num'] - _opinion_num[-1]))
_backward_asp_query_seg.append(single_aspect_query_seg)
_backward_asp_query_seg[-1].extend([[0 for i in range(max_len['mback_asp_len'])]] * (max_len['max_opinion_num'] - _opinion_num[-1]))
_backward_asp_answer_start.append(single_aspect_answer_start)
_backward_asp_answer_start[-1].extend([[-1 for i in range(max_len['mback_asp_len'])]] * (max_len['max_opinion_num'] - _opinion_num[-1]))
_backward_asp_answer_end.append(single_aspect_answer_end)
_backward_asp_answer_end[-1].extend([[-1 for i in range(max_len['mback_asp_len'])]] * (max_len['max_opinion_num'] - _opinion_num[-1]))
# Sentiment
single_sentiment_query = []
single_sentiment_query_mask = []
single_sentiment_query_seg = []
single_sentiment_answer = []
for j in range(len(s_query_list)):
sent_pad_num = max_len['max_sent_len'] - len(s_query_list[j])
single_sentiment_query.append(_tokenizer.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in s_query_list[j]]))
single_sentiment_query[-1].extend([0] * sent_pad_num)
single_sentiment_query_mask.append([1 for i in range(len(s_query_list[j]))])
single_sentiment_query_mask[-1].extend([0] * sent_pad_num)
# query_seg
single_sentiment_query_seg.append(s_query_seg_list[j])
single_sentiment_query_seg[-1].extend([1] * sent_pad_num)
single_sentiment_answer.append(s_answer_list[j])
_sentiment_query.append(single_sentiment_query)
_sentiment_query[-1].extend([[0 for i in range(max_len['max_sent_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_sentiment_query_mask.append(single_sentiment_query_mask)
_sentiment_query_mask[-1].extend([[0 for i in range(max_len['max_sent_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_sentiment_query_seg.append(single_sentiment_query_seg)
_sentiment_query_seg[-1].extend([[0 for i in range(max_len['max_sent_len'])]] * (max_len['max_aspect_num'] - _aspect_num[-1]))
_sentiment_answer.append(single_sentiment_answer)
_sentiment_answer[-1].extend([-1] * (max_len['max_aspect_num'] - _aspect_num[-1]))
def truncate(dataset, max_length):
for i in range(len(dataset)):
ins = dataset[i]
if isinstance(ins[0], list):
new_ins = []
for t in ins:
assert not isinstance(t[0], list)
new_ins.append(t[: max_length - 1] + [t[-1]])
dataset[i] = new_ins
else:
dataset[i] = ins[: max_length - 1] + [ins[-1]]
trunc_objects = [
(max_len['mfor_asp_len'], [_forward_asp_query, _forward_asp_answer_start, _forward_asp_answer_end,
_forward_asp_query_mask, _forward_asp_query_seg]),
(max_len['mfor_opi_len'], [ _forward_opi_query,_forward_opi_answer_start, _forward_opi_answer_end,
_forward_opi_query_mask,_forward_opi_query_seg]),
(max_len['mback_asp_len'],[ _backward_asp_query,_backward_asp_answer_start, _backward_asp_answer_end,
_backward_asp_query_mask, _backward_asp_query_seg]),
(max_len['mback_opi_len'],[_backward_opi_query,_backward_opi_answer_start, _backward_opi_answer_end,
_backward_opi_query_mask, _backward_opi_query_seg]),
(max_len['max_sent_len'], [_sentiment_query, _sentiment_query_mask, _sentiment_query_seg]),
(max_len['max_aspect_num'],[_sentiment_answer]),
]
for k, vs in trunc_objects:
for v in vs:
truncate(v, k)
if isinstance(v[0][0], list):
len_list = [len(x) for xx in v for x in xx]
print(k, sum(len_list) / len(len_list))
for l in len_list:
assert l == len_list[0], len_list
else:
len_list = [len(x) for x in v]
print(k, sum(len_list) / len(len_list))
for l in len_list:
assert l == len_list[0], len_list
result = {"_forward_asp_query":_forward_asp_query, "_forward_opi_query":_forward_opi_query,
"_forward_asp_answer_start":_forward_asp_answer_start, "_forward_asp_answer_end":_forward_asp_answer_end,
"_forward_opi_answer_start":_forward_opi_answer_start, "_forward_opi_answer_end":_forward_opi_answer_end,
"_forward_asp_query_mask":_forward_asp_query_mask, "_forward_opi_query_mask":_forward_opi_query_mask,
"_forward_asp_query_seg":_forward_asp_query_seg, "_forward_opi_query_seg":_forward_opi_query_seg,
"_backward_asp_query":_backward_asp_query, "_backward_opi_query":_backward_opi_query,
"_backward_asp_answer_start":_backward_asp_answer_start, "_backward_asp_answer_end":_backward_asp_answer_end,
"_backward_opi_answer_start":_backward_opi_answer_start, "_backward_opi_answer_end":_backward_opi_answer_end,
"_backward_asp_query_mask":_backward_asp_query_mask, "_backward_opi_query_mask":_backward_opi_query_mask,
"_backward_asp_query_seg":_backward_asp_query_seg, "_backward_opi_query_seg":_backward_opi_query_seg,
"_sentiment_query":_sentiment_query, "_sentiment_answer":_sentiment_answer, "_sentiment_query_mask":_sentiment_query_mask,
"_sentiment_query_seg":_sentiment_query_seg, "_aspect_num":_aspect_num, "_opinion_num":_opinion_num}
return OriginalDataset(result)
def tokenized_data(data):
max_forward_asp_query_length = 0
max_forward_opi_query_length = 0
max_backward_asp_query_length = 0
max_backward_opi_query_length = 0
max_sentiment_query_length = 0
max_aspect_num = 0
max_opinion_num = 0
tokenized_sample_list = []
for sample in data:
forward_querys = []
forward_answers = []
backward_querys = []
backward_answers = []
sentiment_querys = []
sentiment_answers = []
forward_querys_seg = []
backward_querys_seg = []
sentiment_querys_seg = []
if int(len(sample.forward_querys) - 1) > max_aspect_num:
max_aspect_num = int(len(sample.forward_querys) - 1)
if int(len(sample.backward_querys) - 1) > max_opinion_num:
max_opinion_num = int(len(sample.backward_querys) - 1)
for idx in range(len(sample.forward_querys)):
temp_query = sample.forward_querys[idx]
temp_text = sample.text
temp_answer = sample.forward_answers[idx]
temp_query_to = ['[CLS]'] + temp_query + ['[SEP]'] + temp_text
temp_query_seg = [0] * (len(temp_query) + 2) + [1] * len(temp_text)
temp_answer[0] = [-1] * (len(temp_query) + 2) + temp_answer[0]
temp_answer[1] = [-1] * (len(temp_query) + 2) + temp_answer[1]
assert len(temp_answer[0]) == len(temp_answer[1]) == len(temp_query_to) == len(temp_query_seg)
if idx == 0:
if len(temp_query_to) > max_forward_asp_query_length:
max_forward_asp_query_length = len(temp_query_to)
else:
if len(temp_query_to) > max_forward_opi_query_length:
max_forward_opi_query_length = len(temp_query_to)
forward_querys.append(temp_query_to)
forward_answers.append(temp_answer)
forward_querys_seg.append(temp_query_seg)
for idx in range(len(sample.backward_querys)):
temp_query = sample.backward_querys[idx]
temp_text = sample.text
temp_answer = sample.backward_answers[idx]
temp_query_to = ['[CLS]'] + temp_query + ['[SEP]'] + temp_text
temp_query_seg = [0] * (len(temp_query) + 2) + [1] * len(temp_text)
temp_answer[0] = [-1] * (len(temp_query) + 2) + temp_answer[0]
temp_answer[1] = [-1] * (len(temp_query) + 2) + temp_answer[1]
assert len(temp_answer[0]) == len(temp_answer[1]) == len(temp_query_to) == len(temp_query_seg)
if idx == 0:
if len(temp_query_to) > max_backward_opi_query_length:
max_backward_opi_query_length = len(temp_query_to)
else:
if len(temp_query_to) > max_backward_asp_query_length:
max_backward_asp_query_length = len(temp_query_to)
backward_querys.append(temp_query_to)
backward_answers.append(temp_answer)
backward_querys_seg.append(temp_query_seg)
for idx in range(len(sample.sentiment_querys)):
temp_query = sample.sentiment_querys[idx]
temp_text = sample.text
temp_answer = sample.sentiment_answers[idx]
temp_query_to = ['[CLS]'] + temp_query + ['[SEP]'] + temp_text
temp_query_seg = [0] * (len(temp_query) + 2) + [1] * len(temp_text)
assert len(temp_query_to) == len(temp_query_seg)
if len(temp_query_to) > max_sentiment_query_length:
max_sentiment_query_length = len(temp_query_to)
sentiment_querys.append(temp_query_to)
sentiment_answers.append(temp_answer)
sentiment_querys_seg.append(temp_query_seg)
temp_sample = sample_tokenized(sample.original_sample, forward_querys, forward_answers, backward_querys,
backward_answers, sentiment_querys, sentiment_answers, forward_querys_seg,
backward_querys_seg, sentiment_querys_seg)
tokenized_sample_list.append(temp_sample)
max_len = 256
return tokenized_sample_list, {'mfor_asp_len': min(max_len, max_forward_asp_query_length),
'mfor_opi_len': min(max_len, max_forward_opi_query_length),
'mback_asp_len': min(max_len, max_backward_asp_query_length),
'mback_opi_len': min(max_len, max_backward_opi_query_length),
'max_sent_len': min(max_len, max_sentiment_query_length),
'max_aspect_num': max_aspect_num,
'max_opinion_num': max_opinion_num}
if __name__ == '__main__':
sources = ['electronics', 'beauty', 'fashion', 'home', '14res', '15res', '16res', '14lap', 'all']
targets = ['book', 'grocery', 'office', 'pet', 'toy']
for dataset_name in sources + targets:
output_path = './data/preprocess/' + dataset_name + '.pt'
if dataset_name in sources:
train_data = torch.load("./data/preprocess/" + dataset_name + "_train_dual.pt")
dev_data = torch.load("./data/preprocess/" + dataset_name + "_dev_dual.pt")
test_data = torch.load("./data/preprocess/" + dataset_name + "_test_dual.pt")
train_tokenized, train_max_len = tokenized_data(train_data)
dev_tokenized, dev_max_len = tokenized_data(dev_data)
test_tokenized, test_max_len = tokenized_data(test_data)
print('preprocessing_data')
train_preprocess = pre_processing(train_tokenized, train_max_len)
dev_preprocess = pre_processing(dev_tokenized, dev_max_len)
test_preprocess = pre_processing(test_tokenized, test_max_len)
print('save_data')
torch.save({'train': train_preprocess, 'dev': dev_preprocess, 'test': test_preprocess}, output_path)
else:
test_data = torch.load("./data/preprocess/" + dataset_name + "_test_dual.pt")
test_tokenized, test_max_len = tokenized_data(test_data)
print('preprocessing_data')
test_preprocess = pre_processing(test_tokenized, test_max_len)
dev_data = torch.load("./data/preprocess/" + dataset_name + "_dev_dual.pt")
dev_tokenized, dev_max_len = tokenized_data(dev_data)
dev_preprocess = pre_processing(dev_tokenized, dev_max_len)
print('save_data')
torch.save({'train': None, 'dev': dev_preprocess, 'test': test_preprocess}, output_path)
| 24,242 | 50.037895 | 144 | py |
DMASTE | DMASTE-main/BMRC/Model.py | # coding: UTF-8
# @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
from transformers import BertTokenizer, BertModel, BertConfig
import torch.nn as nn
class BERTModel(nn.Module):
def __init__(self, args):
hidden_size = args.hidden_size
super(BERTModel, self).__init__()
# BERT模型
# if args.bert_model_type == 'bert-base-uncased':
self._bert = BertModel.from_pretrained(args.bert_model_type)
self._tokenizer = BertTokenizer.from_pretrained(args.bert_model_type)
print('Bertbase model loaded')
# else:
# raise KeyError('Config.args.bert_model_type should be bert-based-uncased. ')
self.classifier_start = nn.Linear(hidden_size, 2)
self.classifier_end = nn.Linear(hidden_size, 2)
self._classifier_sentiment = nn.Linear(hidden_size, 3)
def forward(self, query_tensor, query_mask, query_seg, step):
hidden_states = self._bert(query_tensor, attention_mask=query_mask, token_type_ids=query_seg)[0]
if step == 0: # predict entity
out_scores_start = self.classifier_start(hidden_states)
out_scores_end = self.classifier_end(hidden_states)
return out_scores_start, out_scores_end
else: # predict sentiment
cls_hidden_states = hidden_states[:, 0, :]
cls_hidden_scores = self._classifier_sentiment(cls_hidden_states)
return cls_hidden_scores
| 1,477 | 35.04878 | 104 | py |
DMASTE | DMASTE-main/BMRC/makeData_standard.py | # @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
import torch
import pickle
from dataProcess import get_text
def make_standard(home_path, dataset_name, dataset_type):
# read triple
f = open(home_path + dataset_name + "/" + dataset_type + ".txt", "r", encoding="utf-8")
text_lines = f.readlines()
f.close()
# get text
_, _, _, triple_data = get_text(text_lines)
standard_list = []
for triplet in triple_data:
aspect_temp = []
opinion_temp = []
pair_temp = []
triplet_temp = []
asp_pol_temp = []
for temp_t in triplet:
triplet_temp.append([temp_t[0][0], temp_t[0][-1], temp_t[1][0], temp_t[1][-1], temp_t[2]])
ap = [temp_t[0][0], temp_t[0][-1], temp_t[2]]
if ap not in asp_pol_temp:
asp_pol_temp.append(ap)
a = [temp_t[0][0], temp_t[0][-1]]
if a not in aspect_temp:
aspect_temp.append(a)
o = [temp_t[1][0], temp_t[1][-1]]
if o not in opinion_temp:
opinion_temp.append(o)
p = [temp_t[0][0], temp_t[0][-1], temp_t[1][0], temp_t[1][-1]]
if p not in pair_temp:
pair_temp.append(p)
standard_list.append({'asp_target': aspect_temp, 'opi_target': opinion_temp, 'asp_opi_target': pair_temp,
'asp_pol_target': asp_pol_temp, 'triplet': triplet_temp})
return standard_list
if __name__ == '__main__':
home_path = "../ia-dataset/"
sources = ['electronics', 'beauty', 'fashion', 'home', '14res', '15res', '16res', '14lap', 'all']
targets = ['book', 'grocery', 'office', 'pet', 'toy']
for dataset_name in sources + targets:
output_path = "./data/preprocess/" + dataset_name + "_standard.pt"
dev_standard = make_standard(home_path, dataset_name, 'dev')
test_standard = make_standard(home_path, dataset_name, 'test')
torch.save({'dev': dev_standard, 'test': test_standard}, output_path)
# else:
# test_standard = make_standard(home_path, dataset_name, 'test')
# torch.save({'dev': None, 'test': test_standard}, output_path)
| 2,219 | 35.393443 | 113 | py |
DMASTE | DMASTE-main/Generative-ABSA/main.py | import argparse
import os
import logging
import time
import pickle
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
from transformers import AdamW, T5ForConditionalGeneration, T5Tokenizer
from transformers import get_linear_schedule_with_warmup
from data_utils import ABSADataset
from data_utils import write_results_to_log, read_line_examples_from_file
from eval_utils import compute_scores
from convert_to_triplets import convert
logger = logging.getLogger(__name__)
def init_args():
parser = argparse.ArgumentParser()
# basic settings
parser.add_argument("--task", default='uabsa', type=str, required=True,
help="The name of the task, selected from: [uabsa, aste, tasd, aope]")
parser.add_argument("--dataset", default='rest14', type=str, required=True,
help="The name of the dataset, selected from: [laptop14, rest14, rest15, rest16]")
parser.add_argument('--source', type=str)
parser.add_argument('--target', type=str)
parser.add_argument("--model_name_or_path", default='t5-base', type=str,
help="Path to pre-trained model or shortcut name")
parser.add_argument("--paradigm", default='annotation', type=str, required=True,
help="The way to construct target sentence, selected from: [annotation, extraction]")
parser.add_argument('--model_name', type=str)
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev/test set.")
parser.add_argument("--do_direct_eval", action='store_true',
help="Whether to run direct eval on the dev/test set.")
# Other parameters
parser.add_argument("--max_seq_length", default=128, type=int)
parser.add_argument("--n_gpu", default=0)
parser.add_argument("--train_batch_size", default=16, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=16, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=3e-4, type=float)
parser.add_argument("--num_train_epochs", default=20, type=int,
help="Total number of training epochs to perform.")
parser.add_argument('--seed', type=int, default=42, help="random seed for initialization")
# parser.add_argument('--all_output_dir', type=str)
parser.add_argument('--model_dir', type=str)
parser.add_argument('--log_dir', type=str)
parser.add_argument('--tmp_dir', type=str)
# training details
parser.add_argument("--weight_decay", default=0.0, type=float)
parser.add_argument("--adam_epsilon", default=1e-8, type=float)
parser.add_argument("--warmup_steps", default=0.0, type=float)
args = parser.parse_args()
# set up output dir which looks like './aste/rest14/extraction/'
# if not os.path.exists(os.path.join(args.all_output_dir, 'model')):
# os.mkdir(os.path.join(args.all_output_dir, 'model'))
# task_dir = f"{args.all_output_dir}/temp/"
# if not os.path.exists(task_dir):
# os.mkdir(task_dir)
# os.makedirs(task_dir, exist_ok=True)
# args.output_dir = task_dir
os.makedirs(os.path.join(args.log_dir, args.model_name), exist_ok=True)
os.makedirs(os.path.join(args.model_dir, args.model_name), exist_ok=True)
os.makedirs(os.path.join(args.tmp_dir, args.model_name), exist_ok=True)
return args
def get_dataset(tokenizer, data_dir, domain, type_path, args):
return ABSADataset(tokenizer=tokenizer, data_dir=data_dir, domain=domain, data_type=type_path,
paradigm=args.paradigm, task=args.task, max_len=args.max_seq_length)
class T5FineTuner(pl.LightningModule):
def __init__(self, hparams):
super(T5FineTuner, self).__init__()
self.hparams = hparams
self.model = T5ForConditionalGeneration.from_pretrained(hparams.model_name_or_path)
self.tokenizer = T5Tokenizer.from_pretrained(hparams.model_name_or_path)
def is_logger(self):
return True
def forward(self, input_ids, attention_mask=None, decoder_input_ids=None,
decoder_attention_mask=None, labels=None):
return self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
labels=labels,
)
def _step(self, batch):
lm_labels = batch["target_ids"]
lm_labels[lm_labels[:, :] == self.tokenizer.pad_token_id] = -100
outputs = self(
input_ids=batch["source_ids"],
attention_mask=batch["source_mask"],
labels=lm_labels,
decoder_attention_mask=batch['target_mask']
)
loss = outputs[0]
return loss
def training_step(self, batch, batch_idx):
loss = self._step(batch)
tensorboard_logs = {"train_loss": loss}
return {"loss": loss, "log": tensorboard_logs}
def training_epoch_end(self, outputs):
avg_train_loss = torch.stack([x["loss"] for x in outputs]).mean()
tensorboard_logs = {"avg_train_loss": avg_train_loss}
return {"avg_train_loss": avg_train_loss, "log": tensorboard_logs, 'progress_bar': tensorboard_logs}
def validation_step(self, batch, batch_idx):
loss = self._step(batch)
return {"val_loss": loss}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
tensorboard_logs = {"val_loss": avg_loss}
return {"avg_val_loss": avg_loss, "log": tensorboard_logs, 'progress_bar': tensorboard_logs}
def configure_optimizers(self):
'''Prepare optimizer and schedule (linear warmup and decay)'''
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
self.opt = optimizer
return [optimizer]
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, second_order_closure=None):
if self.trainer.use_tpu:
xm.optimizer_step(optimizer)
else:
optimizer.step()
optimizer.zero_grad()
self.lr_scheduler.step()
def get_tqdm_dict(self):
tqdm_dict = {"loss": "{:.4f}".format(self.trainer.avg_loss), "lr": self.lr_scheduler.get_last_lr()[-1]}
return tqdm_dict
def train_dataloader(self):
train_dataset = get_dataset(tokenizer=self.tokenizer, data_dir=self.hparams.dataset, domain=self.hparams.source,
type_path="train", args=self.hparams)
dataloader = DataLoader(train_dataset, batch_size=self.hparams.train_batch_size, drop_last=True, shuffle=True, num_workers=4)
t_total = (
(len(dataloader.dataset) // (self.hparams.train_batch_size * max(1, len(self.hparams.n_gpu))))
// self.hparams.gradient_accumulation_steps
* float(self.hparams.num_train_epochs)
)
scheduler = get_linear_schedule_with_warmup(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=t_total
)
self.lr_scheduler = scheduler
return dataloader
def val_dataloader(self):
val_dataset = get_dataset(tokenizer=self.tokenizer, data_dir=self.hparams.dataset, domain=self.hparams.source,
type_path="dev", args=self.hparams)
return DataLoader(val_dataset, batch_size=self.hparams.eval_batch_size, num_workers=4)
class LoggingCallback(pl.Callback):
def on_validation_end(self, trainer, pl_module):
logger.info("***** Validation results *****")
if pl_module.is_logger():
metrics = trainer.callback_metrics
# Log results
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
logger.info("{} = {}\n".format(key, str(metrics[key])))
def on_test_end(self, trainer, pl_module):
logger.info("***** Test results *****")
if pl_module.is_logger():
metrics = trainer.callback_metrics
# Log and save results to file
output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
logger.info("{} = {}\n".format(key, str(metrics[key])))
writer.write("{} = {}\n".format(key, str(metrics[key])))
def evaluate(data_loader, model, paradigm, task, sents):
"""
Compute scores given the predictions and gold labels
"""
device = torch.device(f'cuda:{args.n_gpu}')
model.model.to(device)
model.model.eval()
outputs, targets = [], []
for batch in tqdm(data_loader):
# need to push the data to device
outs = model.model.generate(input_ids=batch['source_ids'].to(device),
attention_mask=batch['source_mask'].to(device),
max_length=128)
dec = [tokenizer.decode(ids, skip_special_tokens=True) for ids in outs]
target = [tokenizer.decode(ids, skip_special_tokens=True) for ids in batch["target_ids"]]
outputs.extend(dec)
targets.extend(target)
raw_scores, fixed_scores, all_labels, all_preds, all_preds_fixed = compute_scores(outputs, targets, sents, paradigm, task)
return raw_scores, fixed_scores, all_preds_fixed, targets
# initialization
args = init_args()
print("\n", "="*30, f"NEW EXP: {args.task.upper()} on {args.dataset}", "="*30, "\n")
seed_everything(args.seed)
tokenizer = T5Tokenizer.from_pretrained(args.model_name_or_path)
# show one sample to check the sanity of the code and the expected output
print(f"Here is an example (from dev set) under `{args.paradigm}` paradigm:")
dataset = ABSADataset(tokenizer=tokenizer, data_dir=args.dataset, domain=args.source, data_type='dev',
paradigm=args.paradigm, task=args.task, max_len=args.max_seq_length)
data_sample = dataset[2] # a random data sample
print('Input :', tokenizer.decode(data_sample['source_ids'], skip_special_tokens=True))
print('Output:', tokenizer.decode(data_sample['target_ids'], skip_special_tokens=True))
# training process
if args.do_train:
print("\n****** Conduct Training ******")
model = T5FineTuner(args)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
filepath=args.tmp_dir, prefix="ckt", monitor='val_loss', mode='min', save_top_k=3
)
# prepare for trainer
train_params = dict(
default_root_dir=os.path.join(args.tmp_dir, args.model_name),
accumulate_grad_batches=args.gradient_accumulation_steps,
gpus=args.n_gpu,
gradient_clip_val=1.0,
#amp_level='O1',
max_epochs=args.num_train_epochs,
checkpoint_callback=checkpoint_callback,
callbacks=[LoggingCallback()],
)
trainer = pl.Trainer(**train_params)
trainer.fit(model)
torch.save(model.model, os.path.join(args.model_dir, args.model_name + '.pt'))
# save the final model
# model.model.save_pretrained(args.output_dir)
print("Finish training and saving the model!")
if args.do_eval:
print("\n****** Conduct Evaluating ******")
# model = T5FineTuner(args)
dev_results, test_results = {}, {}
best_f1, best_checkpoint, best_epoch = -999999.0, None, None
all_checkpoints, all_epochs = [], []
# retrieve all the saved checkpoints for model selection
# saved_model_dir = args.output_dir
# for f in os.listdir(saved_model_dir):
# file_name = os.path.join(saved_model_dir, f)
# if 'cktepoch' in file_name:
# all_checkpoints.append(file_name)
# conduct some selection (or not)
# print(f"We will perform validation on the following checkpoints: {all_checkpoints}")
# load dev and test datasets
# dev_dataset = ABSADataset(tokenizer, data_dir=args.dataset, domain=args.source, data_type='dev',
# paradigm=args.paradigm, task=args.task, max_len=args.max_seq_length)
# dev_loader = DataLoader(dev_dataset, batch_size=32, num_workers=4)
# dev_sents, dev_labels = read_line_examples_from_file(f'{args.dataset}/{args.source}/dev.txt')
test_sents, test_labels = read_line_examples_from_file(f'{args.dataset}/{args.target}/test.txt')
test_dataset = ABSADataset(tokenizer, data_dir=args.dataset, domain=args.target, data_type='test',
paradigm=args.paradigm, task=args.task, max_len=args.max_seq_length)
test_loader = DataLoader(test_dataset, batch_size=32, num_workers=4)
print('model_dir', os.path.join(args.model_dir, args.model_name + '.pt'))
model = T5FineTuner(args)
model.model = torch.load(os.path.join(args.model_dir, args.model_name + '.pt'))
# dev_raw, dev_fixed, _ = evaluate(dev_loader, model, args.paradigm, args.task, dev_sents)
test_raw, test_fixed, test_pred, test_targets = evaluate(test_loader, model, args.paradigm, args.task, test_sents)
# parsed_pred = convert(test_sents, test_pred, test_labels)
os.makedirs(args.log_dir, exist_ok=True)
log_file_path = os.path.join(args.log_dir, args.model_name, 'metric.txt')
local_time = time.asctime(time.localtime(time.time()))
exp_settings = f"{args.task} on {args.source}2{args.target} under {args.paradigm}; Train bs={args.train_batch_size}, num_epochs = {args.num_train_epochs}"
exp_results = f"Raw TEST F1 = {test_raw['f1']:.4f}, Fixed F1 = {test_fixed['f1']:.4f}"
log_str = f'============================================================\n'
log_str += f"{local_time}\n{exp_settings}\n{exp_results}\n\n"
with open(log_file_path, "w") as f:
f.write(log_str)
with open(os.path.join(args.log_dir, args.model_name, 'pred.txt'), 'w') as f:
for p, tgt in zip(test_pred, test_targets):
f.write(str({'pred': p, 'target': tgt}) + '\n')
# evaluation process
if args.do_direct_eval:
print("\n****** Conduct Evaluating with the last state ******")
# model = T5FineTuner(args)
# print("Reload the model")
# model.model.from_pretrained(args.output_dir)
sents, _ = read_line_examples_from_file(f'{args.dataset}/{args.target}/test.txt')
print()
test_dataset = ABSADataset(tokenizer, data_dir=args.dataset, domain=args.target, data_type='test',
paradigm=args.paradigm, task=args.task, max_len=args.max_seq_length)
test_loader = DataLoader(test_dataset, batch_size=32, num_workers=4)
# print(test_loader.device)
raw_scores, fixed_scores = evaluate(test_loader, model, args.paradigm, args.task, sents)
# print(scores)
# write to file
os.makedirs(f'{args.all_output_dir}/log/', exist_ok=True)
log_file_path = f"{args.all_output_dir}/log/{args.task}-{args.dataset}.txt"
local_time = time.asctime(time.localtime(time.time()))
exp_settings = f"{args.task} on {args.dataset} under {args.paradigm}; Train bs={args.train_batch_size}, num_epochs = {args.num_train_epochs}"
exp_results = f"Raw F1 = {raw_scores['f1']:.4f}, Fixed F1 = {fixed_scores['f1']:.4f}"
log_str = f'============================================================\n'
log_str += f"{local_time}\n{exp_settings}\n{exp_results}\n\n"
with open(log_file_path, "a+") as f:
f.write(log_str)
| 16,407 | 42.638298 | 158 | py |
DMASTE | DMASTE-main/Generative-ABSA/data_utils.py | # This file contains all data loading and transformation functions
import time
from torch.utils.data import Dataset
senttag2word = {'POS': 'positive', 'NEG': 'negative', 'NEU': 'neutral'}
def read_line_examples_from_file(data_path):
"""
Read data from file, each line is: sent####labels
Return List[List[word]], List[Tuple]
"""
sents, labels = [], []
with open(data_path, 'r', encoding='UTF-8') as fp:
words, labels = [], []
for line in fp:
line = line.strip()
if line != '':
words, tuples = line.split('####')
sents.append(words.split())
labels.append(eval(tuples))
print(f"Total examples = {len(sents)}")
return sents, labels
def get_annotated_uabsa_targets(sents, labels):
annotated_targets = []
num_sents = len(sents)
for i in range(num_sents):
tuples = labels[i]
if tuples != []:
# tup: ([3, 4], POS)
for tup in tuples:
ap, sent = tup[0], tup[1]
if len(ap) == 1:
sents[i][ap[0]] = f"[{sents[i][ap[0]]}|{senttag2word[sent]}]"
else:
sents[i][ap[0]] = f"[{sents[i][ap[0]]}"
sents[i][ap[-1]] = f"{sents[i][ap[-1]]}|{senttag2word[sent]}]"
annotated_targets.append(sents[i])
return annotated_targets
def get_annotated_aope_targets(sents, labels):
annotated_targets = []
num_sents = len(sents)
for i in range(num_sents):
tuples = labels[i]
# tup: ([3, 4], [2])
for tup in tuples:
ap, op = tup[0], tup[1]
opt = [sents[i][j] for j in op]
# multiple OT for one AP
if '[' in sents[i][ap[0]]:
if len(ap) == 1:
sents[i][ap[0]] = f"{sents[i][ap[0]][:-1]}, {' '.join(opt)}]"
else:
sents[i][ap[-1]] = f"{sents[i][ap[-1]][:-1]}, {' '.join(opt)}]"
else:
annotation = f"{' '.join(opt)}"
if len(ap) == 1:
sents[i][ap[0]] = f"[{sents[i][ap[0]]}|{annotation}]"
else:
sents[i][ap[0]] = f"[{sents[i][ap[0]]}"
sents[i][ap[-1]] = f"{sents[i][ap[-1]]}|{annotation}]"
annotated_targets.append(sents[i])
return annotated_targets
def get_annotated_aste_targets(sents, labels):
annotated_targets = []
num_sents = len(sents)
for i in range(num_sents):
tuples = labels[i]
# tup: ([2], [5], 'NEG')
for tup in tuples:
ap, op, sent = tup[0], tup[1], tup[2]
op = [sents[i][j] for j in op]
# multiple OT for one AP
if '[' in sents[i][ap[0]]:
# print(i)
if len(ap) == 1:
sents[i][ap[0]] = f"{sents[i][ap[0]][:-1]}, {' '.join(op)}]"
else:
sents[i][ap[-1]] = f"{sents[i][ap[-1]][:-1]}, {' '.join(op)}]"
else:
annotation = f"{senttag2word[sent]}|{' '.join(op)}"
if len(ap) == 1:
sents[i][ap[0]] = f"[{sents[i][ap[0]]}|{annotation}]"
else:
sents[i][ap[0]] = f"[{sents[i][ap[0]]}"
sents[i][ap[-1]] = f"{sents[i][ap[-1]]}|{annotation}]"
annotated_targets.append(sents[i])
return annotated_targets
def get_annotated_tasd_targets(sents, labels):
targets = []
num_sents = len(sents)
sents_str = [' '.join(s) for s in sents]
for i in range(num_sents):
s_str = sents_str[i]
at_dict = {}
for triplet in labels[i]:
at, ac, polarity = triplet[0], triplet[1], triplet[2]
if at in at_dict:
at_dict[at][0].append(ac)
else:
at_dict[at] = [[ac], polarity]
for at, ac_pol in at_dict.items():
if len(ac_pol[0]) == 1:
annotated_at = f"[{at}|{ac_pol[0][0]}|{ac_pol[1]}]"
else:
annotated_at = f"[{at}|{', '.join(ac_pol[0])}|{ac_pol[1]}]"
if at != 'NULL':
# print('at:', at, 'replaced_at:', annotated_at)
s_str = s_str.replace(at, annotated_at)
else:
s_str += f" {annotated_at}"
targets.append(s_str)
return targets
def get_extraction_uabsa_targets(sents, labels):
targets = []
for i, label in enumerate(labels):
if label == []:
targets.append('None')
else:
all_tri = []
for tri in label:
if len(tri[0]) == 1:
a = sents[i][tri[0][0]]
else:
start_idx, end_idx = tri[0][0], tri[0][-1]
a = ' '.join(sents[i][start_idx:end_idx+1])
c = senttag2word[tri[1]]
all_tri.append((a, c))
label_strs = ['('+', '.join(l)+')' for l in all_tri]
targets.append('; '.join(label_strs))
return targets
def get_extraction_aope_targets(sents, labels):
targets = []
for i, label in enumerate(labels):
all_tri = []
for tri in label:
if len(tri[0]) == 1:
a = sents[i][tri[0][0]]
else:
start_idx, end_idx = tri[0][0], tri[0][-1]
a = ' '.join(sents[i][start_idx:end_idx+1])
if len(tri[1]) == 1:
b = sents[i][tri[1][0]]
else:
start_idx, end_idx = tri[1][0], tri[1][-1]
b = ' '.join(sents[i][start_idx:end_idx+1])
all_tri.append((a, b))
label_strs = ['('+', '.join(l)+')' for l in all_tri]
targets.append('; '.join(label_strs))
return targets
def get_extraction_tasd_targets(sents, labels):
targets = []
for label in labels:
label_strs = ['('+', '.join(l)+')' for l in label]
target = '; '.join(label_strs)
targets.append(target)
return targets
def get_extraction_aste_targets(sents, labels):
targets = []
for i, label in enumerate(labels):
all_tri = []
for tri in label:
if len(tri[0]) == 1:
a = sents[i][tri[0][0]]
else:
start_idx, end_idx = tri[0][0], tri[0][-1]
a = ' '.join(sents[i][start_idx:end_idx+1])
if len(tri[1]) == 1:
b = sents[i][tri[1][0]]
else:
start_idx, end_idx = tri[1][0], tri[1][-1]
b = ' '.join(sents[i][start_idx:end_idx+1])
c = senttag2word[tri[2]]
all_tri.append((a, b, c))
label_strs = ['('+', '.join(l)+')' for l in all_tri]
targets.append('; '.join(label_strs))
return targets
def get_transformed_io(data_path, paradigm, task):
"""
The main function to transform the Input & Output according to
the specified paradigm and task
"""
sents, labels = read_line_examples_from_file(data_path)
# the input is just the raw sentence
inputs = [s.copy() for s in sents]
# Get target according to the paradigm
# annotate the sents (with label info) as targets
if paradigm == 'annotation':
if task == 'uabsa':
targets = get_annotated_uabsa_targets(sents, labels)
elif task == 'aste':
targets = get_annotated_aste_targets(sents, labels)
elif task == 'tasd':
targets = get_annotated_tasd_targets(sents, labels)
elif task == 'aope':
targets = get_annotated_aope_targets(sents, labels)
else:
raise NotImplementedError
# directly treat label infor as the target
elif paradigm == 'extraction':
if task == 'uabsa':
targets = get_extraction_uabsa_targets(sents, labels)
elif task == 'aste':
targets = get_extraction_aste_targets(sents, labels)
elif task == 'tasd':
targets = get_extraction_tasd_targets(sents, labels)
elif task == 'aope':
targets = get_extraction_aope_targets(sents, labels)
else:
raise NotImplementedError
else:
print('Unsupported paradigm!')
raise NotImplementedError
return inputs, targets
class ABSADataset(Dataset):
def __init__(self, tokenizer, data_dir, domain, data_type, paradigm, task, max_len=128):
# 'data/aste/rest16/train.txt'
self.data_path = f'{data_dir}/{domain}/{data_type}.txt'
self.paradigm = paradigm
self.task = task
self.max_len = max_len
self.tokenizer = tokenizer
self.inputs = []
self.targets = []
self._build_examples()
def __len__(self):
return len(self.inputs)
def __getitem__(self, index):
source_ids = self.inputs[index]["input_ids"].squeeze()
target_ids = self.targets[index]["input_ids"].squeeze()
src_mask = self.inputs[index]["attention_mask"].squeeze() # might need to squeeze
target_mask = self.targets[index]["attention_mask"].squeeze() # might need to squeeze
return {"source_ids": source_ids, "source_mask": src_mask,
"target_ids": target_ids, "target_mask": target_mask}
def _build_examples(self):
inputs, targets = get_transformed_io(self.data_path, self.paradigm, self.task)
for i in range(len(inputs)):
input = ' '.join(inputs[i])
if self.paradigm == 'annotation':
if self.task != 'tasd':
target = ' '.join(targets[i])
else:
target = targets[i]
else:
target = targets[i]
tokenized_input = self.tokenizer.batch_encode_plus(
[input], max_length=self.max_len, pad_to_max_length=True, truncation=True,
return_tensors="pt",
)
tokenized_target = self.tokenizer.batch_encode_plus(
[target], max_length=self.max_len, pad_to_max_length=True, truncation=True,
return_tensors="pt"
)
self.inputs.append(tokenized_input)
self.targets.append(tokenized_target)
def write_results_to_log(log_file_path, best_test_result, args, dev_results, test_results, global_steps):
"""
Record dev and test results to log file
"""
local_time = time.asctime(time.localtime(time.time()))
exp_settings = "Exp setting: {0} on {1} under {2} | {3:.4f} | ".format(
args.task, args.dataset, args.paradigm, best_test_result
)
train_settings = "Train setting: bs={0}, lr={1}, num_epochs={2}".format(
args.train_batch_size, args.learning_rate, args.num_train_epochs
)
results_str = "\n* Results *: Dev / Test \n"
metric_names = ['f1', 'precision', 'recall']
for gstep in global_steps:
results_str += f"Step-{gstep}:\n"
for name in metric_names:
name_step = f'{name}_{gstep}'
results_str += f"{name:<8}: {dev_results[name_step]:.4f} / {test_results[name_step]:.4f}"
results_str += ' '*5
results_str += '\n'
log_str = f"{local_time}\n{exp_settings}\n{train_settings}\n{results_str}\n\n"
with open(log_file_path, "a+") as f:
f.write(log_str) | 11,367 | 34.304348 | 105 | py |
DMASTE | DMASTE-main/GTS/code/NNModel/main.py | #coding utf-8
import json, os
import random
import argparse
import numpy
import torch
import torch.nn.functional as F
from tqdm import trange
import numpy as np
from data import load_data_instances, DataIterator
from model import MultiInferRNNModel, MultiInferCNNModel
import utils
def train(args):
# load double embedding
word2index = json.load(open(args.prefix + 'doubleembedding/word_idx.json'))
general_embedding = numpy.load(args.prefix +'doubleembedding/gen.vec.npy')
general_embedding = torch.from_numpy(general_embedding)
domain_embedding = numpy.load(args.prefix +'doubleembedding/'+args.dataset+'_emb.vec.npy')
domain_embedding = torch.from_numpy(domain_embedding)
# load dataset
train_sentence_packs = json.load(open(args.prefix + args.dataset + '/train.json'))
random.shuffle(train_sentence_packs)
dev_sentence_packs = json.load(open(args.prefix + args.dataset + '/dev.json'))
instances_train = load_data_instances(train_sentence_packs, word2index, args)
instances_dev = load_data_instances(dev_sentence_packs, word2index, args)
random.shuffle(instances_train)
trainset = DataIterator(instances_train, args)
devset = DataIterator(instances_dev, args)
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
# build model
if args.model == 'bilstm':
model = MultiInferRNNModel(general_embedding, domain_embedding, args).to(args.device)
elif args.model == 'cnn':
model = MultiInferCNNModel(general_embedding, domain_embedding, args).to(args.device)
parameters = list(model.parameters())
parameters = filter(lambda x: x.requires_grad, parameters)
optimizer = torch.optim.Adam(parameters, lr=args.lr)
# training
best_joint_f1 = 0
best_joint_epoch = 0
for i in range(args.epochs):
print('Epoch:{}'.format(i))
for j in trange(trainset.batch_count):
_, sentence_tokens, lengths, masks, aspect_tags, _, tags = trainset.get_batch(j)
predictions = model(sentence_tokens, lengths, masks)
loss = 0.
tags_flatten = tags[:, :lengths[0], :lengths[0]].reshape([-1])
for k in range(len(predictions)):
prediction_flatten = predictions[k].reshape([-1, predictions[k].shape[3]])
loss = loss + F.cross_entropy(prediction_flatten, tags_flatten, ignore_index=-1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
joint_precision, joint_recall, joint_f1 = eval(model, devset, args)
if joint_f1 > best_joint_f1:
model_path = args.model_dir + args.model + args.task + '.pt'
torch.save(model, model_path)
best_joint_f1 = joint_f1
best_joint_epoch = i
print('best epoch: {}\tbest dev {} f1: {:.5f}\n\n'.format(best_joint_epoch, args.task, best_joint_f1))
def eval(model, dataset, args):
model.eval()
with torch.no_grad():
predictions=[]
labels=[]
all_ids = []
all_lengths = []
for i in range(dataset.batch_count):
sentence_ids, sentence_tokens, lengths, mask, aspect_tags, _, tags = dataset.get_batch(i)
prediction = model.forward(sentence_tokens,lengths, mask)
prediction = prediction[-1]
prediction = torch.argmax(prediction, dim=3)
prediction_padded = torch.zeros(prediction.shape[0], args.max_sequence_len, args.max_sequence_len)
prediction_padded[:, :prediction.shape[1], :prediction.shape[1]] = prediction
predictions.append(prediction_padded)
all_ids.extend(sentence_ids)
labels.append(tags)
all_lengths.append(lengths)
predictions = torch.cat(predictions,dim=0).cpu().tolist()
labels = torch.cat(labels,dim=0).cpu().tolist()
all_lengths = torch.cat(all_lengths, dim=0).cpu().tolist()
precision, recall, f1 = utils.score_uniontags(args, predictions, labels, all_lengths, ignore_index=-1)
aspect_results = utils.score_aspect(predictions, labels, all_lengths, ignore_index=-1)
opinion_results = utils.score_opinion(predictions, labels, all_lengths, ignore_index=-1)
print('Aspect term\tP:{:.5f}\tR:{:.5f}\tF1:{:.5f}'.format(aspect_results[0], aspect_results[1], aspect_results[2]))
print('Opinion term\tP:{:.5f}\tR:{:.5f}\tF1:{:.5f}'.format(opinion_results[0], opinion_results[1], opinion_results[2]))
print(args.task+'\tP:{:.5f}\tR:{:.5f}\tF1:{:.5f}\n'.format(precision, recall, f1))
model.train()
return precision, recall, f1
def test(args):
print("Evaluation on testset:")
model_path = args.model_dir + args.model + args.task + '.pt'
model = torch.load(model_path).to(args.device)
model.eval()
word2index = json.load(open(args.prefix + 'doubleembedding/word_idx.json'))
sentence_packs = json.load(open(args.prefix + args.dataset + '/test.json'))
instances = load_data_instances(sentence_packs, word2index, args)
testset = DataIterator(instances, args)
eval(model, testset, args)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', type=str, default="../../data/",
help='dataset and embedding path prefix')
parser.add_argument('--model_dir', type=str, default="savemodel/",
help='model path prefix')
parser.add_argument('--task', type=str, default="pair", choices=["pair", "triplet"],
help='option: pair, triplet')
parser.add_argument('--mode', type=str, default="train", choices=["train", "test"],
help='option: train, test')
parser.add_argument('--model', type=str, default="bilstm", choices=["bilstm", "cnn"],
help='option: bilstm, cnn')
parser.add_argument('--dataset', type=str, default="res14",
help='dataset')
parser.add_argument('--max_sequence_len', type=int, default=100,
help='max length of a sentence')
parser.add_argument('--device', type=str, default="cuda",
help='gpu or cpu')
parser.add_argument('--lstm_dim', type=int, default=50,
help='dimension of lstm cell')
parser.add_argument('--cnn_dim', type=int, default=256,
help='dimension of cnn')
parser.add_argument('--nhops', type=int, default=0,
help='inference times')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate')
parser.add_argument('--batch_size', type=int, default=32,
help='bathc size')
parser.add_argument('--epochs', type=int, default=600,
help='training epoch number')
parser.add_argument('--class_num', type=int, default=4,
help='label number')
args = parser.parse_args()
if args.task == 'triplet':
args.class_num = 6
if args.mode == 'train':
train(args)
test(args)
else:
test(args)
| 7,166 | 39.954286 | 127 | py |
DMASTE | DMASTE-main/GTS/code/NNModel/attention_module.py | import copy
import math
import torch
import torch.nn.functional as F
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def clones(module, N):
"Produce N identical layers."
return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class MultiHeadedAttention(torch.nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(torch.nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = torch.nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class SelfAttention(torch.nn.Module):
def __init__(self, args):
super(SelfAttention,self).__init__()
self.args = args
self.linear_q = torch.nn.Linear(args.lstm_dim * 2, args.lstm_dim * 2)
# self.linear_k = torch.nn.Linear(configs.BILSTM_DIM * 2, configs.BILSTM_DIM * 2)
# self.linear_v = torch.nn.Linear(configs.BILSTM_DIM * 2, configs.BILSTM_DIM * 2)
# self.w_query = torch.nn.Linear(configs.BILSTM_DIM * 2, 50)
# self.w_value = torch.nn.Linear(configs.BILSTM_DIM * 2, 50)
self.w_query = torch.nn.Linear(args.cnn_dim, 50)
self.w_value = torch.nn.Linear(args.cnn_dim, 50)
self.v = torch.nn.Linear(50, 1, bias=False)
def forward(self, query, value, mask):
# attention_states = self.linear_q(query)
# attention_states_T = self.linear_k(values)
attention_states = query
attention_states_T = value
attention_states_T = attention_states_T.permute([0, 2, 1])
weights=torch.bmm(attention_states, attention_states_T)
weights = weights.masked_fill(mask.unsqueeze(1).expand_as(weights)==0, float("-inf")) # mask掉每行后面的列
attention = F.softmax(weights,dim=2)
# value=self.linear_v(states)
merged=torch.bmm(attention, value)
merged=merged * mask.unsqueeze(2).float().expand_as(merged)
return merged
def forward_perceptron(self, query, value, mask):
attention_states = query
attention_states = self.w_query(attention_states)
attention_states = attention_states.unsqueeze(2).expand(-1,-1,attention_states.shape[1], -1)
attention_states_T = value
attention_states_T = self.w_value(attention_states_T)
attention_states_T = attention_states_T.unsqueeze(2).expand(-1,-1,attention_states_T.shape[1], -1)
attention_states_T = attention_states_T.permute([0, 2, 1, 3])
weights = torch.tanh(attention_states+attention_states_T)
weights = self.v(weights).squeeze(3)
weights = weights.masked_fill(mask.unsqueeze(1).expand_as(weights)==0, float("-inf")) # mask掉每行后面的列
attention = F.softmax(weights,dim=2)
merged = torch.bmm(attention, value)
merged = merged * mask.unsqueeze(2).float().expand_as(merged)
return merged
| 4,281 | 38.648148 | 112 | py |
DMASTE | DMASTE-main/GTS/code/NNModel/model.py | import torch
import torch.nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from attention_module import MultiHeadedAttention, SelfAttention
class MultiInferRNNModel(torch.nn.Module):
def __init__(self, gen_emb, domain_emb, args):
'''double embedding + lstm encoder + dot self attention'''
super(MultiInferRNNModel, self).__init__()
self.args = args
self.gen_embedding = torch.nn.Embedding(gen_emb.shape[0], gen_emb.shape[1])
self.gen_embedding.weight.data.copy_(gen_emb)
self.gen_embedding.weight.requires_grad = False
self.domain_embedding = torch.nn.Embedding(domain_emb.shape[0], domain_emb.shape[1])
self.domain_embedding.weight.data.copy_(domain_emb)
self.domain_embedding.weight.requires_grad = False
self.dropout1 = torch.nn.Dropout(0.5)
self.dropout2 = torch.nn.Dropout(0)
self.bilstm = torch.nn.LSTM(300+100, args.lstm_dim,
num_layers=1, batch_first=True, bidirectional=True)
self.attention_layer = SelfAttention(args)
self.feature_linear = torch.nn.Linear(args.lstm_dim*4 + args.class_num*3, args.lstm_dim*4)
self.cls_linear = torch.nn.Linear(args.lstm_dim*4, args.class_num)
def _get_embedding(self, sentence_tokens, mask):
gen_embed = self.gen_embedding(sentence_tokens)
domain_embed = self.domain_embedding(sentence_tokens)
embedding = torch.cat([gen_embed, domain_embed], dim=2)
embedding = self.dropout1(embedding)
embedding = embedding * mask.unsqueeze(2).float().expand_as(embedding)
return embedding
def _lstm_feature(self, embedding, lengths):
embedding = pack_padded_sequence(embedding, lengths, batch_first=True)
context, _ = self.bilstm(embedding)
context, _ = pad_packed_sequence(context, batch_first=True)
return context
def _cls_logits(self, features):
# features = self.dropout2(features)
tags = self.cls_linear(features)
return tags
def multi_hops(self, features, lengths, mask, k):
'''generate mask'''
max_length = features.shape[1]
mask = mask[:, :max_length]
mask_a = mask.unsqueeze(1).expand([-1, max_length, -1])
mask_b = mask.unsqueeze(2).expand([-1, -1, max_length])
mask = mask_a * mask_b
mask = torch.triu(mask).unsqueeze(3).expand([-1, -1, -1, self.args.class_num])
'''save all logits'''
logits_list = []
logits = self._cls_logits(features)
logits_list.append(logits)
for i in range(k):
#probs = torch.softmax(logits, dim=3)
probs = logits
logits = probs * mask
logits_a = torch.max(logits, dim=1)[0]
logits_b = torch.max(logits, dim=2)[0]
logits = torch.cat([logits_a.unsqueeze(3), logits_b.unsqueeze(3)], dim=3)
logits = torch.max(logits, dim=3)[0]
logits = logits.unsqueeze(2).expand([-1,-1, max_length, -1])
logits_T = logits.transpose(1, 2)
logits = torch.cat([logits, logits_T], dim=3)
new_features = torch.cat([features, logits, probs], dim=3)
features = self.feature_linear(new_features)
logits = self._cls_logits(features)
logits_list.append(logits)
return logits_list
def forward(self, sentence_tokens, lengths, mask):
embedding = self._get_embedding(sentence_tokens, mask)
lstm_feature = self._lstm_feature(embedding, lengths)
# self attention
lstm_feature_attention = self.attention_layer(lstm_feature, lstm_feature, mask[:,:lengths[0]])
#lstm_feature_attention = self.attention_layer.forward_perceptron(lstm_feature, lstm_feature, mask[:, :lengths[0]])
lstm_feature = lstm_feature + lstm_feature_attention
lstm_feature = lstm_feature.unsqueeze(2).expand([-1,-1, lengths[0], -1])
lstm_feature_T = lstm_feature.transpose(1, 2)
features = torch.cat([lstm_feature, lstm_feature_T], dim=3)
logits = self.multi_hops(features, lengths, mask, self.args.nhops)
return [logits[-1]]
class MultiInferCNNModel(torch.nn.Module):
def __init__(self, gen_emb, domain_emb, args):
super(MultiInferCNNModel, self).__init__()
self.args = args
self.gen_embedding = torch.nn.Embedding(gen_emb.shape[0], gen_emb.shape[1])
self.gen_embedding.weight.data.copy_(gen_emb)
self.gen_embedding.weight.requires_grad = False
self.domain_embedding = torch.nn.Embedding(domain_emb.shape[0], domain_emb.shape[1])
self.domain_embedding.weight.data.copy_(domain_emb)
self.domain_embedding.weight.requires_grad = False
self.attention_layer = SelfAttention(args)
self.conv1 = torch.nn.Conv1d(gen_emb.shape[1] + domain_emb.shape[1], 128, 5, padding=2)
self.conv2 = torch.nn.Conv1d(gen_emb.shape[1] + domain_emb.shape[1], 128, 3, padding=1)
self.dropout = torch.nn.Dropout(0.5)
self.conv3 = torch.nn.Conv1d(256, 256, 5, padding=2)
self.conv4 = torch.nn.Conv1d(256, 256, 5, padding=2)
self.conv5 = torch.nn.Conv1d(256, 256, 5, padding=2)
self.feature_linear = torch.nn.Linear(args.cnn_dim*2 + args.class_num*3, args.cnn_dim*2)
self.cls_linear = torch.nn.Linear(256*2, args.class_num)
def multi_hops(self, features, lengths, mask, k):
'''generate mtraix mask'''
max_length = features.shape[1]
mask = mask[:, :max_length]
mask_a = mask.unsqueeze(1).expand([-1, max_length, -1])
mask_b = mask.unsqueeze(2).expand([-1, -1, max_length])
mask = mask_a * mask_b
mask = torch.triu(mask).unsqueeze(3).expand([-1, -1, -1, self.args.class_num])
'''save all logits'''
logits_list = []
logits = self.cls_linear(features)
logits_list.append(logits)
for i in range(k):
#probs = torch.softmax(logits, dim=3)
probs = logits
logits = probs * mask
logits_a = torch.max(logits, dim=1)[0]
logits_b = torch.max(logits, dim=2)[0]
logits = torch.cat([logits_a.unsqueeze(3), logits_b.unsqueeze(3)], dim=3)
logits = torch.max(logits, dim=3)[0]
logits = logits.unsqueeze(2).expand([-1,-1, max_length, -1])
logits_T = logits.transpose(1, 2)
logits = torch.cat([logits, logits_T], dim=3)
new_features = torch.cat([features, logits, probs], dim=3)
features = self.feature_linear(new_features)
logits = self.cls_linear(features)
logits_list.append(logits)
return logits_list
def forward(self, x, x_len, x_mask):
x_emb = torch.cat((self.gen_embedding(x), self.domain_embedding(x)), dim=2)
x_emb = self.dropout(x_emb).transpose(1, 2)
x_conv = torch.nn.functional.relu(torch.cat((self.conv1(x_emb), self.conv2(x_emb)), dim=1))
x_conv = self.dropout(x_conv)
x_conv = torch.nn.functional.relu(self.conv3(x_conv))
x_conv = self.dropout(x_conv)
x_conv = torch.nn.functional.relu(self.conv4(x_conv))
x_conv = self.dropout(x_conv)
x_conv = torch.nn.functional.relu(self.conv5(x_conv))
x_conv = x_conv.transpose(1, 2)
x_conv = x_conv[:, :x_len[0], :]
feature_attention = self.attention_layer.forward_perceptron(x_conv, x_conv, x_mask[:, :x_len[0]])
x_conv = x_conv + feature_attention
x_conv = x_conv.unsqueeze(2).expand([-1, -1, x_len[0], -1])
x_conv_T = x_conv.transpose(1, 2)
features = torch.cat([x_conv, x_conv_T], dim=3)
logits = self.multi_hops(features, x_len, x_mask, self.args.nhops)
return [logits[-1]]
| 7,886 | 41.632432 | 123 | py |
DMASTE | DMASTE-main/GTS/code/NNModel/data.py | import math
import torch
sentiment2id = {'negative': 3, 'neutral': 4, 'positive': 5}
def get_spans(tags):
'''for BIO tag'''
tags = tags.strip().split()
length = len(tags)
spans = []
start = -1
for i in range(length):
if tags[i].endswith('B'):
if start != -1:
spans.append([start, i - 1])
start = i
elif tags[i].endswith('O'):
if start != -1:
spans.append([start, i - 1])
start = -1
if start != -1:
spans.append([start, length - 1])
return spans
class Instance(object):
def __init__(self, sentence_pack, word2index, args):
self.id = sentence_pack['id']
self.sentence = sentence_pack['sentence']
self.sentence_tokens = torch.zeros(args.max_sequence_len).long()
'''generate sentence tokens'''
words = self.sentence.split()
self.length = len(words)
for i, w in enumerate(words):
# word = w.lower()
word = w
if word in word2index:
self.sentence_tokens[i] = word2index[word]
else:
self.sentence_tokens[i] = word2index['<unk>']
self.aspect_tags = torch.zeros(args.max_sequence_len).long()
self.opinion_tags = torch.zeros(args.max_sequence_len).long()
self.aspect_tags[self.length:] = -1
self.opinion_tags[self.length:] = -1
self.tags = torch.zeros(args.max_sequence_len, args.max_sequence_len).long()
self.tags[:, :] = -1
for i in range(self.length):
for j in range(i, self.length):
self.tags[i][j] = 0
for pair in sentence_pack['triples']:
aspect = pair['target_tags']
opinion = pair['opinion_tags']
aspect_span = get_spans(aspect)
opinion_span = get_spans(opinion)
for l, r in aspect_span:
for i in range(l, r+1):
self.aspect_tags[i] = 1 if i == l else 2
self.tags[i][i] = 1
if i > l: self.tags[i-1][i] = 1
for j in range(i, r+1):
self.tags[i][j] = 1
for l, r in opinion_span:
for i in range(l, r+1):
self.opinion_tags[i] = 1 if i == l else 2
self.tags[i][i] = 2
if i > l: self.tags[i-1][i] = 2
for j in range(i, r+1):
self.tags[i][j] = 2
for al, ar in aspect_span:
for pl, pr in opinion_span:
for i in range(al, ar+1):
for j in range(pl, pr+1):
if args.task == 'pair':
if i > j: self.tags[j][i] = 3
else: self.tags[i][j] = 3
elif args.task == 'triplet':
if i > j: self.tags[j][i] = sentiment2id[pair['sentiment']]
else: self.tags[i][j] = sentiment2id[pair['sentiment']]
'''generate mask of the sentence'''
self.mask = torch.zeros(args.max_sequence_len)
self.mask[:self.length] = 1
def load_data_instances(sentence_packs, word2index, args):
instances = list()
for sentence_pack in sentence_packs:
instances.append(Instance(sentence_pack, word2index, args))
return instances
class DataIterator(object):
def __init__(self, instances, args):
self.instances = instances
self.args = args
self.batch_count = math.ceil(len(instances)/args.batch_size)
def get_batch(self, index):
sentence_ids = []
sentence_tokens = []
lengths = []
masks = []
aspect_tags = []
opinion_tags = []
tags = []
for i in range(index * self.args.batch_size,
min((index + 1) * self.args.batch_size, len(self.instances))):
sentence_ids.append(self.instances[i].id)
sentence_tokens.append(self.instances[i].sentence_tokens)
lengths.append(self.instances[i].length)
masks.append(self.instances[i].mask)
aspect_tags.append(self.instances[i].aspect_tags)
opinion_tags.append(self.instances[i].opinion_tags)
tags.append(self.instances[i].tags)
indexes = list(range(len(sentence_tokens)))
indexes = sorted(indexes, key=lambda x: lengths[x], reverse=True)
sentence_ids = [sentence_ids[i] for i in indexes]
sentence_tokens = torch.stack(sentence_tokens).to(self.args.device)[indexes]
lengths = torch.tensor(lengths).to(self.args.device)[indexes]
masks = torch.stack(masks).to(self.args.device)[indexes]
aspect_tags = torch.stack(aspect_tags).to(self.args.device)[indexes]
opinion_tags = torch.stack(opinion_tags).to(self.args.device)[indexes]
tags = torch.stack(tags).to(self.args.device)[indexes]
return sentence_ids, sentence_tokens, lengths, masks, aspect_tags, opinion_tags, tags
| 5,123 | 36.955556 | 93 | py |
DMASTE | DMASTE-main/GTS/code/BertModel/main.py | #coding utf-8
import json, os
import random
import argparse
import torch
import torch.nn.functional as F
from tqdm import trange
from data import load_data_instances, DataIterator
from model import MultiInferBert
import utils
def train(args):
# load dataset
train_sentence_packs = json.load(open(args.prefix + args.source + '/train.json'))
random.shuffle(train_sentence_packs)
dev_sentence_packs = json.load(open(args.prefix + args.source + '/dev.json'))
instances_train = load_data_instances(train_sentence_packs, args)
instances_dev = load_data_instances(dev_sentence_packs, args)
random.shuffle(instances_train)
trainset = DataIterator(instances_train, args)
devset = DataIterator(instances_dev, args)
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
model = MultiInferBert(args).to(args.device)
optimizer = torch.optim.Adam([
{'params': model.bert.parameters(), 'lr': 5e-5},
{'params': model.cls_linear.parameters()}
], lr=5e-5)
best_joint_f1 = 0
best_joint_epoch = 0
for i in range(args.epochs):
print('Epoch:{}'.format(i))
for j in trange(trainset.batch_count):
_, tokens, lengths, masks, _, _, aspect_tags, tags = trainset.get_batch(j)
preds = model(tokens, masks)
preds_flatten = preds.reshape([-1, preds.shape[3]])
tags_flatten = tags.reshape([-1])
loss = F.cross_entropy(preds_flatten, tags_flatten, ignore_index=-1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
joint_precision, joint_recall, joint_f1 = eval(model, devset, args)
if joint_f1 > best_joint_f1:
model_path = args.model_dir + 'bert' + args.task + '.pt'
torch.save(model, model_path)
best_joint_f1 = joint_f1
best_joint_epoch = i
print('best epoch: {}\tbest dev {} f1: {:.5f}\n\n'.format(best_joint_epoch, args.task, best_joint_f1))
def eval(model, dataset, args):
model.eval()
with torch.no_grad():
all_ids = []
all_preds = []
all_labels = []
all_lengths = []
all_sens_lengths = []
all_token_ranges = []
for i in range(dataset.batch_count):
sentence_ids, tokens, lengths, masks, sens_lens, token_ranges, aspect_tags, tags = dataset.get_batch(i)
preds = model(tokens, masks)
preds = torch.argmax(preds, dim=3)
all_preds.append(preds)
all_labels.append(tags)
all_lengths.append(lengths)
all_sens_lengths.extend(sens_lens)
all_token_ranges.extend(token_ranges)
all_ids.extend(sentence_ids)
all_preds = torch.cat(all_preds, dim=0).cpu().tolist()
all_labels = torch.cat(all_labels, dim=0).cpu().tolist()
all_lengths = torch.cat(all_lengths, dim=0).cpu().tolist()
metric = utils.Metric(args, all_preds, all_labels, all_lengths, all_sens_lengths, all_token_ranges, ignore_index=-1)
precision, recall, f1 = metric.score_uniontags()
aspect_results = metric.score_aspect()
opinion_results = metric.score_opinion()
print('Aspect term\tP:{:.5f}\tR:{:.5f}\tF1:{:.5f}'.format(aspect_results[0], aspect_results[1],
aspect_results[2]))
print('Opinion term\tP:{:.5f}\tR:{:.5f}\tF1:{:.5f}'.format(opinion_results[0], opinion_results[1],
opinion_results[2]))
print(args.task + '\tP:{:.5f}\tR:{:.5f}\tF1:{:.5f}\n'.format(precision, recall, f1))
model.train()
return precision, recall, f1
def test(args):
print("Evaluation on testset:")
model_path = args.model_dir + 'bert' + args.task + '.pt'
model = torch.load(model_path).to(args.device)
model.eval()
sentence_packs = json.load(open(args.prefix + args.target + '/test.json'))
instances = load_data_instances(sentence_packs, args)
testset = DataIterator(instances, args)
eval(model, testset, args)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', type=str, default="../../data/",
help='dataset and embedding path prefix')
parser.add_argument('--model_dir', type=str, default="savemodel/",
help='model path prefix')
parser.add_argument('--task', type=str, default="pair", choices=["pair", "triplet"],
help='option: pair, triplet')
parser.add_argument('--mode', type=str, default="train", choices=["train", "test"],
help='option: train, test')
parser.add_argument('--source', type=str)
parser.add_argument('--target', type=str)
parser.add_argument('--max_sequence_len', type=int, default=100,
help='max length of a sentence')
parser.add_argument('--device', type=str, default="cuda",
help='gpu or cpu')
parser.add_argument('--bert_model_path', type=str,
default="bert-base-uncased",
help='pretrained bert model path')
parser.add_argument('--bert_tokenizer_path', type=str,
default="bert-base-uncased",
help='pretrained bert tokenizer path')
parser.add_argument('--bert_feature_dim', type=int, default=768,
help='dimension of pretrained bert feature')
parser.add_argument('--nhops', type=int, default=1,
help='inference times')
parser.add_argument('--batch_size', type=int, default=32,
help='bathc size')
parser.add_argument('--epochs', type=int, default=100,
help='training epoch number')
parser.add_argument('--class_num', type=int, default=4,
help='label number')
args = parser.parse_args()
if args.task == 'triplet':
args.class_num = 6
if args.mode == 'train':
train(args)
# test(args)
else:
test(args)
| 6,176 | 37.60625 | 124 | py |
DMASTE | DMASTE-main/GTS/code/BertModel/model.py | import torch
import torch.nn
from transformers import BertModel, BertTokenizer
class MultiInferBert(torch.nn.Module):
def __init__(self, args):
super(MultiInferBert, self).__init__()
self.args = args
self.bert = BertModel.from_pretrained(args.bert_model_path)
self.tokenizer = BertTokenizer.from_pretrained(args.bert_tokenizer_path)
self.cls_linear = torch.nn.Linear(args.bert_feature_dim*2, args.class_num)
self.feature_linear = torch.nn.Linear(args.bert_feature_dim*2 + args.class_num*3, args.bert_feature_dim*2)
self.dropout_output = torch.nn.Dropout(0.1)
def multi_hops(self, features, mask, k):
'''generate mask'''
max_length = features.shape[1]
mask = mask[:, :max_length]
mask_a = mask.unsqueeze(1).expand([-1, max_length, -1])
mask_b = mask.unsqueeze(2).expand([-1, -1, max_length])
mask = mask_a * mask_b
mask = torch.triu(mask).unsqueeze(3).expand([-1, -1, -1, self.args.class_num])
'''save all logits'''
logits_list = []
logits = self.cls_linear(features)
logits_list.append(logits)
for i in range(k):
#probs = torch.softmax(logits, dim=3)
probs = logits
logits = probs * mask
logits_a = torch.max(logits, dim=1)[0]
logits_b = torch.max(logits, dim=2)[0]
logits = torch.cat([logits_a.unsqueeze(3), logits_b.unsqueeze(3)], dim=3)
logits = torch.max(logits, dim=3)[0]
logits = logits.unsqueeze(2).expand([-1,-1, max_length, -1])
logits_T = logits.transpose(1, 2)
logits = torch.cat([logits, logits_T], dim=3)
new_features = torch.cat([features, logits, probs], dim=3)
features = self.feature_linear(new_features)
logits = self.cls_linear(features)
logits_list.append(logits)
return logits_list
def forward(self, tokens, masks):
output = self.bert(tokens, masks)
bert_feature = output.last_hidden_state
bert_feature = self.dropout_output(bert_feature)
bert_feature = bert_feature.unsqueeze(2).expand([-1, -1, self.args.max_sequence_len, -1])
bert_feature_T = bert_feature.transpose(1, 2)
features = torch.cat([bert_feature, bert_feature_T], dim=3)
logits = self.multi_hops(features, masks, self.args.nhops)
return logits[-1]
| 2,451 | 37.3125 | 114 | py |
DMASTE | DMASTE-main/GTS/code/BertModel/data.py | import math
import torch
import numpy as np
sentiment2id = {'negative': 3, 'neutral': 4, 'positive': 5}
from transformers import BertTokenizer
def get_spans(tags):
'''for BIO tag'''
tags = tags.strip().split()
length = len(tags)
spans = []
start = -1
for i in range(length):
if tags[i].endswith('B'):
if start != -1:
spans.append([start, i - 1])
start = i
elif tags[i].endswith('O'):
if start != -1:
spans.append([start, i - 1])
start = -1
if start != -1:
spans.append([start, length - 1])
return spans
def get_evaluate_spans(tags, length, token_range):
'''for BIO tag'''
spans = []
start = -1
for i in range(length):
l, r = token_range[i]
if tags[l] == -1:
continue
elif tags[l] == 1:
if start != -1:
spans.append([start, i - 1])
start = i
elif tags[l] == 0:
if start != -1:
spans.append([start, i - 1])
start = -1
if start != -1:
spans.append([start, length - 1])
return spans
class Instance(object):
def __init__(self, tokenizer, sentence_pack, args):
self.id = sentence_pack['id'] if id in sentence_pack else 0
self.sentence = sentence_pack['sentence']
self.tokens = self.sentence.strip().split()
self.sen_length = len(self.tokens)
self.token_range = []
self.bert_tokens = tokenizer.encode(self.sentence)
self.length = len(self.bert_tokens)
self.bert_tokens_padding = torch.zeros(args.max_sequence_len).long()
self.aspect_tags = torch.zeros(args.max_sequence_len).long()
self.opinion_tags = torch.zeros(args.max_sequence_len).long()
self.tags = torch.zeros(args.max_sequence_len, args.max_sequence_len).long()
self.mask = torch.zeros(args.max_sequence_len)
for i in range(self.length):
self.bert_tokens_padding[i] = self.bert_tokens[i]
self.mask[:self.length] = 1
token_start = 1
for i, w, in enumerate(self.tokens):
token_end = token_start + len(tokenizer.encode(w, add_special_tokens=False))
self.token_range.append([token_start, token_end-1])
token_start = token_end
assert self.length == self.token_range[-1][-1]+2
self.aspect_tags[self.length:] = -1
self.aspect_tags[0] = -1
self.aspect_tags[self.length-1] = -1
self.opinion_tags[self.length:] = -1
self.opinion_tags[0] = -1
self.opinion_tags[self.length - 1] = -1
self.tags[:, :] = -1
for i in range(1, self.length-1):
for j in range(i, self.length-1):
self.tags[i][j] = 0
for triple in sentence_pack['triples']:
aspect = triple['target_tags']
opinion = triple['opinion_tags']
aspect_span = get_spans(aspect)
opinion_span = get_spans(opinion)
'''set tag for aspect'''
for l, r in aspect_span:
start = self.token_range[l][0]
end = self.token_range[r][1]
for i in range(start, end+1):
for j in range(i, end+1):
self.tags[i][j] = 1
for i in range(l, r+1):
set_tag = 1 if i == l else 2
al, ar = self.token_range[i]
self.aspect_tags[al] = set_tag
self.aspect_tags[al+1:ar+1] = -1
'''mask positions of sub words'''
self.tags[al+1:ar+1, :] = -1
self.tags[:, al+1:ar+1] = -1
'''set tag for opinion'''
for l, r in opinion_span:
start = self.token_range[l][0]
end = self.token_range[r][1]
for i in range(start, end+1):
for j in range(i, end+1):
self.tags[i][j] = 2
for i in range(l, r+1):
set_tag = 1 if i == l else 2
pl, pr = self.token_range[i]
self.opinion_tags[pl] = set_tag
self.opinion_tags[pl+1:pr+1] = -1
self.tags[pl+1:pr+1, :] = -1
self.tags[:, pl+1:pr+1] = -1
for al, ar in aspect_span:
for pl, pr in opinion_span:
for i in range(al, ar+1):
for j in range(pl, pr+1):
sal, sar = self.token_range[i]
spl, spr = self.token_range[j]
self.tags[sal:sar+1, spl:spr+1] = -1
if args.task == 'pair':
if i > j:
self.tags[spl][sal] = 3
else:
self.tags[sal][spl] = 3
elif args.task == 'triplet':
if i > j:
self.tags[spl][sal] = sentiment2id[triple['sentiment']]
else:
self.tags[sal][spl] = sentiment2id[triple['sentiment']]
def load_data_instances(sentence_packs, args):
instances = list()
tokenizer = BertTokenizer.from_pretrained(args.bert_tokenizer_path)
for sentence_pack in sentence_packs:
instances.append(Instance(tokenizer, sentence_pack, args))
return instances
class DataIterator(object):
def __init__(self, instances, args):
self.instances = instances
self.args = args
self.batch_count = math.ceil(len(instances)/args.batch_size)
def get_batch(self, index):
sentence_ids = []
sentences = []
sens_lens = []
token_ranges = []
bert_tokens = []
lengths = []
masks = []
aspect_tags = []
opinion_tags = []
tags = []
for i in range(index * self.args.batch_size,
min((index + 1) * self.args.batch_size, len(self.instances))):
sentence_ids.append(self.instances[i].id)
sentences.append(self.instances[i].sentence)
sens_lens.append(self.instances[i].sen_length)
token_ranges.append(self.instances[i].token_range)
bert_tokens.append(self.instances[i].bert_tokens_padding)
lengths.append(self.instances[i].length)
masks.append(self.instances[i].mask)
aspect_tags.append(self.instances[i].aspect_tags)
opinion_tags.append(self.instances[i].opinion_tags)
tags.append(self.instances[i].tags)
bert_tokens = torch.stack(bert_tokens).to(self.args.device)
lengths = torch.tensor(lengths).to(self.args.device)
masks = torch.stack(masks).to(self.args.device)
aspect_tags = torch.stack(aspect_tags).to(self.args.device)
opinion_tags = torch.stack(opinion_tags).to(self.args.device)
tags = torch.stack(tags).to(self.args.device)
return sentence_ids, bert_tokens, lengths, masks, sens_lens, token_ranges, aspect_tags, tags
| 7,269 | 36.864583 | 100 | py |
DMASTE | DMASTE-main/BARTABSA/peng/train.py | import sys
sys.path.append('../')
import os
if 'p' in os.environ:
os.environ['CUDA_VISIBLE_DEVICES'] = os.environ['p']
# os.environ['CUDA_VISIBLE_DEVICES'] = '7'
import warnings
warnings.filterwarnings('ignore')
from data.pipe import BartBPEABSAPipe
from peng.model.bart_absa import BartSeq2SeqModel
from fastNLP import Trainer, Tester
from peng.model.metrics import Seq2SeqSpanMetric
from peng.model.losses import Seq2SeqLoss
from torch import optim
from fastNLP import BucketSampler, GradientClipCallback, cache_results, WarmupCallback
from fastNLP import FitlogCallback
from fastNLP.core.sampler import SortedSampler
from peng.model.generator import SequenceGeneratorModel
from peng.convert_to_triplets import convert
import fitlog
# fitlog.debug()
os.makedirs('logs', exist_ok=True)
fitlog.set_log_dir('logs')
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--source', type=str)
parser.add_argument('--target', type=str)
parser.add_argument('--lr', default=5e-5, type=float)
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--num_beams', default=4, type=int)
parser.add_argument('--opinion_first', action='store_true', default=False)
parser.add_argument('--n_epochs', default=50, type=int)
parser.add_argument('--decoder_type', default='avg_score', type=str, choices=['None', 'avg_score'])
parser.add_argument('--length_penalty', default=1.0, type=float)
parser.add_argument('--bart_name', default='facebook/bart-base', type=str)
parser.add_argument('--save_model_dir', type=str)
parser.add_argument('--model_name', type=str)
parser.add_argument('--use_encoder_mlp', type=int, default=1)
parser.add_argument('--save_model', type=int, default=0)
parser.add_argument('--mode', type=str, choices=['train', 'test'])
parser.add_argument('--log_dir', type=str)
args= parser.parse_args()
lr = args.lr
n_epochs = args.n_epochs
batch_size = args.batch_size
num_beams = args.num_beams
source = args.source
target = args.target
opinion_first = args.opinion_first
length_penalty = args.length_penalty
if isinstance(args.decoder_type, str) and args.decoder_type.lower() == 'none':
args.decoder_type = None
decoder_type = args.decoder_type
bart_name = args.bart_name
use_encoder_mlp = args.use_encoder_mlp
save_model = args.save_model
fitlog.add_hyper(args)
#######hyper
#######hyper
# @cache_results(cache_fn, _refresh=False)
def get_data(dataset_name):
demo=False
cache_fn = f"caches/data_{bart_name}_{dataset_name}_{opinion_first}.pt"
@cache_results(cache_fn, _refresh=False)
def func():
pipe = BartBPEABSAPipe(tokenizer=bart_name, opinion_first=opinion_first)
data_bundle = pipe.process_from_file(f'../data/{dataset_name}', demo=demo)
return data_bundle, pipe.tokenizer, pipe.mapping2id
return func()
source_data_bundle, tokenizer, mapping2id = get_data(source)
max_len = 10
max_len_a = 1.5
print("The number of tokens in tokenizer ", len(tokenizer.decoder))
bos_token_id = 0 #
eos_token_id = 1 #
label_ids = list(mapping2id.values())
model = BartSeq2SeqModel.build_model(bart_name, tokenizer, label_ids=label_ids, decoder_type=decoder_type,
copy_gate=False, use_encoder_mlp=use_encoder_mlp, use_recur_pos=False)
vocab_size = len(tokenizer)
print(vocab_size, model.decoder.decoder.embed_tokens.weight.data.size(0))
model = SequenceGeneratorModel(model, bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
max_length=max_len, max_len_a=max_len_a,num_beams=num_beams, do_sample=False,
repetition_penalty=1, length_penalty=length_penalty, pad_token_id=eos_token_id,
restricter=None)
import torch
if torch.cuda.is_available():
# device = list([i for i in range(torch.cuda.device_count())])
device = 'cuda'
else:
device = 'cpu'
parameters = []
params = {'lr':lr, 'weight_decay':1e-2}
params['params'] = [param for name, param in model.named_parameters() if not ('bart_encoder' in name or 'bart_decoder' in name)]
parameters.append(params)
params = {'lr':lr, 'weight_decay':1e-2}
params['params'] = []
for name, param in model.named_parameters():
if ('bart_encoder' in name or 'bart_decoder' in name) and not ('layernorm' in name or 'layer_norm' in name):
params['params'].append(param)
parameters.append(params)
params = {'lr':lr, 'weight_decay':0}
params['params'] = []
for name, param in model.named_parameters():
if ('bart_encoder' in name or 'bart_decoder' in name) and ('layernorm' in name or 'layer_norm' in name):
params['params'].append(param)
parameters.append(params)
optimizer = optim.AdamW(parameters)
callbacks = []
callbacks.append(GradientClipCallback(clip_value=5, clip_type='value'))
callbacks.append(WarmupCallback(warmup=0.01, schedule='linear'))
callbacks.append(FitlogCallback())
sampler = None
# sampler = ConstTokenNumSampler('src_seq_len', max_token=1000)
sampler = BucketSampler(seq_len_field_name='src_seq_len')
metric = Seq2SeqSpanMetric(eos_token_id, num_labels=len(label_ids), opinion_first=opinion_first)
model_path = None
if save_model:
model_path = 'save_models/'
if args.mode == 'train':
trainer = Trainer(train_data=source_data_bundle.get_dataset('train'), model=model, optimizer=optimizer,
loss=Seq2SeqLoss(),
batch_size=batch_size, sampler=sampler, drop_last=False, update_every=1,
num_workers=2, n_epochs=n_epochs, print_every=1,
dev_data=source_data_bundle.get_dataset('dev'), metrics=metric, metric_key='triple_f',
validate_every=-1, save_path=model_path, use_tqdm=True, device=device,
callbacks=callbacks, check_code_level=0, test_use_tqdm=False,
test_sampler=SortedSampler('src_seq_len'), dev_batch_size=batch_size)
trainer.train(load_best_model=True)
os.makedirs(args.save_model_dir, exist_ok=True)
torch.save(model, f'{args.save_model_dir}/{args.model_name}.pt')
elif args.mode == 'test':
target_data_bundle, _, _ = get_data(target)
model = torch.load(f'{args.save_model_dir}/{args.model_name}.pt')
tester = Tester(data=target_data_bundle.get_dataset('test'), model=model, metrics=metric, batch_size=args.batch_size,
num_workers=2, device=device, use_tqdm=True, callbacks=callbacks)
res = tester.test()
fitlog.add_best_metric(value=res, name='test')
os.makedirs(os.path.join(args.log_dir, args.model_name), exist_ok=True)
log_file = f'{args.log_dir}/{args.model_name}/metric.txt'
with open(log_file, 'w') as f:
import json
f.write(json.dumps(res) + '\n')
pred = metric.get_pred()
examples = []
with open(f'../../ia-dataset/{target}/test.txt') as f:
for line in f:
sent, triplets = line.split('####')
triplets = eval(triplets)
examples.append([sent, triplets])
pred = convert(tokenizer, examples, pred)
with open(f'{args.log_dir}/{args.model_name}/pred.txt', 'w') as f:
for ts in pred:
f.write(str(ts) + '\n')
| 7,183 | 37.623656 | 128 | py |
DMASTE | DMASTE-main/BARTABSA/peng/model/losses.py |
from fastNLP import LossBase
import torch.nn.functional as F
from fastNLP import seq_len_to_mask
class Seq2SeqLoss(LossBase):
def __init__(self):
super().__init__()
def get_loss(self, tgt_tokens, tgt_seq_len, pred):
"""
:param tgt_tokens: bsz x max_len, [sos, tokens, eos]
:param pred: bsz x max_len-1 x vocab_size
:return:
"""
tgt_seq_len = tgt_seq_len - 1
mask = seq_len_to_mask(tgt_seq_len, max_len=tgt_tokens.size(1) - 1).eq(0)
tgt_tokens = tgt_tokens[:, 1:].masked_fill(mask, -100)
loss = F.cross_entropy(target=tgt_tokens, input=pred.transpose(1, 2))
return loss
| 671 | 27 | 81 | py |
DMASTE | DMASTE-main/BARTABSA/peng/model/bart_absa.py | import torch
from .modeling_bart import BartEncoder, BartDecoder, BartModel
from transformers import BartTokenizer
from fastNLP import seq_len_to_mask
from fastNLP.modules import Seq2SeqEncoder, Seq2SeqDecoder, State
import torch.nn.functional as F
from fastNLP.models import Seq2SeqModel
from torch import nn
import math
class FBartEncoder(Seq2SeqEncoder):
def __init__(self, encoder):
super().__init__()
assert isinstance(encoder, BartEncoder)
self.bart_encoder = encoder
def forward(self, src_tokens, src_seq_len):
mask = seq_len_to_mask(src_seq_len, max_len=src_tokens.size(1))
dict = self.bart_encoder(input_ids=src_tokens, attention_mask=mask, return_dict=True,
output_hidden_states=True)
encoder_outputs = dict.last_hidden_state
hidden_states = dict.hidden_states
return encoder_outputs, mask, hidden_states
class FBartDecoder(Seq2SeqDecoder):
def __init__(self, decoder, pad_token_id, label_ids, use_encoder_mlp=True):
super().__init__()
assert isinstance(decoder, BartDecoder)
self.decoder = decoder
causal_mask = torch.zeros(512, 512).fill_(float('-inf'))
causal_mask = causal_mask.triu(diagonal=1)
self.register_buffer('causal_masks', causal_mask.float())
self.pad_token_id = pad_token_id
self.label_start_id = label_ids[0]
self.label_end_id = label_ids[-1]+1
# 0th position is <s>, 1st position is </s>
mapping = torch.LongTensor([0, 2]+sorted(label_ids, reverse=False))
self.register_buffer('mapping', mapping)
self.src_start_index = len(mapping) # 加上一个
hidden_size = decoder.embed_tokens.weight.size(1)
if use_encoder_mlp:
self.encoder_mlp = nn.Sequential(nn.Linear(hidden_size, hidden_size),
nn.Dropout(0.3),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size))
def forward(self, tokens, state):
# bsz, max_len = tokens.size()
encoder_outputs = state.encoder_output
encoder_pad_mask = state.encoder_mask
first = state.first
# eos is 1
cumsum = tokens.eq(1).flip(dims=[1]).cumsum(dim=-1)
tgt_pad_mask = cumsum.flip(dims=[1]).ne(cumsum[:, -1:])
# mapping to the BART token index
mapping_token_mask = tokens.lt(self.src_start_index) #
mapped_tokens = tokens.masked_fill(tokens.ge(self.src_start_index), 0)
tag_mapped_tokens = self.mapping[mapped_tokens]
src_tokens_index = tokens - self.src_start_index # bsz x num_src_token
src_tokens_index = src_tokens_index.masked_fill(src_tokens_index.lt(0), 0)
src_tokens = state.src_tokens
if first is not None:
src_tokens = src_tokens.gather(index=first, dim=1)
word_mapped_tokens = src_tokens.gather(index=src_tokens_index, dim=1)
tokens = torch.where(mapping_token_mask, tag_mapped_tokens, word_mapped_tokens)
tokens = tokens.masked_fill(tgt_pad_mask, self.pad_token_id)
if self.training:
tokens = tokens[:, :-1]
decoder_pad_mask = tokens.eq(self.pad_token_id)
dict = self.decoder(input_ids=tokens,
encoder_hidden_states=encoder_outputs,
encoder_padding_mask=encoder_pad_mask,
decoder_padding_mask=decoder_pad_mask,
decoder_causal_mask=self.causal_masks[:tokens.size(1), :tokens.size(1)],
return_dict=True)
else:
past_key_values = state.past_key_values
dict = self.decoder(input_ids=tokens,
encoder_hidden_states=encoder_outputs,
encoder_padding_mask=encoder_pad_mask,
decoder_padding_mask=None,
decoder_causal_mask=None,
past_key_values=past_key_values,
use_cache=True,
return_dict=True)
hidden_state = dict.last_hidden_state # bsz x max_len x hidden_size
if not self.training:
state.past_key_values = dict.past_key_values
logits = hidden_state.new_full((hidden_state.size(0), hidden_state.size(1), self.src_start_index+src_tokens.size(-1)),
fill_value=-1e24)
# first get the
eos_scores = F.linear(hidden_state, self.decoder.embed_tokens.weight[2:3]) # bsz x max_len x 1
tag_scores = F.linear(hidden_state, self.decoder.embed_tokens.weight[self.label_start_id:self.label_end_id]) # bsz x max_len x num_class
# bsz x max_word_len x hidden_size
src_outputs = state.encoder_output
if hasattr(self, 'encoder_mlp'):
src_outputs = self.encoder_mlp(src_outputs)
if first is not None:
mask = first.eq(0) # bsz x 1 x max_word_len, 为1的地方是padding
src_outputs = src_outputs.gather(index=first.unsqueeze(2).repeat(1, 1, src_outputs.size(-1)), dim=1)
else:
mask = state.encoder_mask.eq(0)
mask = mask.unsqueeze(1).__or__(src_tokens.eq(2).cumsum(dim=1).ge(1).unsqueeze(1))
word_scores = torch.einsum('blh,bnh->bln', hidden_state, src_outputs) # bsz x max_len x max_word_len
word_scores = word_scores.masked_fill(mask, -1e32)
logits[:, :, 1:2] = eos_scores
logits[:, :, 2:self.src_start_index] = tag_scores
logits[:, :, self.src_start_index:] = word_scores
return logits
def decode(self, tokens, state):
return self(tokens, state)[:, -1]
class CaGFBartDecoder(FBartDecoder):
# Copy and generate,
def __init__(self, decoder, pad_token_id, label_ids, use_encoder_mlp=False):
super().__init__(decoder, pad_token_id, label_ids, use_encoder_mlp=use_encoder_mlp)
def forward(self, tokens, state):
encoder_outputs = state.encoder_output
encoder_pad_mask = state.encoder_mask
first = state.first
cumsum = tokens.eq(1).flip(dims=[1]).cumsum(dim=-1)
tgt_pad_mask = cumsum.flip(dims=[1]).ne(cumsum[:, -1:])
mapping_token_mask = tokens.lt(self.src_start_index)
mapped_tokens = tokens.masked_fill(tokens.ge(self.src_start_index), 0)
tag_mapped_tokens = self.mapping[mapped_tokens]
src_tokens_index = tokens - self.src_start_index # bsz x num_src_token
src_tokens_index = src_tokens_index.masked_fill(src_tokens_index.lt(0), 0)
src_tokens = state.src_tokens
if first is not None:
src_tokens = src_tokens.gather(index=first, dim=1)
word_mapped_tokens = src_tokens.gather(index=src_tokens_index, dim=1)
tokens = torch.where(mapping_token_mask, tag_mapped_tokens, word_mapped_tokens) # bsz x max_len
tokens = tokens.masked_fill(tgt_pad_mask, self.pad_token_id)
if self.training:
tokens = tokens[:, :-1]
decoder_pad_mask = tokens.eq(self.pad_token_id) # decoder需要让pad位置为1
dict = self.decoder(input_ids=tokens,
encoder_hidden_states=encoder_outputs,
encoder_padding_mask=encoder_pad_mask,
decoder_padding_mask=decoder_pad_mask,
decoder_causal_mask=self.causal_masks[:tokens.size(1), :tokens.size(1)],
return_dict=True)
else:
past_key_values = state.past_key_values
dict = self.decoder(input_ids=tokens,
encoder_hidden_states=encoder_outputs,
encoder_padding_mask=encoder_pad_mask,
decoder_padding_mask=None,
decoder_causal_mask=None,
past_key_values=past_key_values,
use_cache=True,
return_dict=True)
hidden_state = dict.last_hidden_state # bsz x max_len x hidden_size
if not self.training:
state.past_key_values = dict.past_key_values
logits = hidden_state.new_full((hidden_state.size(0), hidden_state.size(1), self.src_start_index+src_tokens.size(-1)),
fill_value=-1e24)
eos_scores = F.linear(hidden_state, self.decoder.embed_tokens.weight[2:3]) # bsz x max_len x 1
tag_scores = F.linear(hidden_state, self.decoder.embed_tokens.weight[self.label_start_id:self.label_end_id]) # bsz x max_len x num_class
# bsz x max_bpe_len x hidden_size
src_outputs = state.encoder_output
if hasattr(self, 'encoder_mlp'):
src_outputs = self.encoder_mlp(src_outputs)
if first is not None:
mask = first.eq(0) # bsz x 1 x max_word_len, 为1的地方是padding
# bsz x max_word_len x hidden_size
src_outputs = src_outputs.gather(index=first.unsqueeze(2).repeat(1, 1, src_outputs.size(-1)), dim=1)
else:
mask = state.encoder_mask.eq(0)
# src_outputs = self.decoder.embed_tokens(src_tokens)
mask = mask.unsqueeze(1)
input_embed = self.decoder.embed_tokens(src_tokens) # bsz x max_word_len x hidden_size
word_scores = torch.einsum('blh,bnh->bln', hidden_state, src_outputs) # bsz x max_len x max_word_len
gen_scores = torch.einsum('blh,bnh->bln', hidden_state, input_embed) # bsz x max_len x max_word_len
word_scores = (gen_scores + word_scores)/2
mask = mask.__or__(src_tokens.eq(2).cumsum(dim=1).ge(1).unsqueeze(1))
word_scores = word_scores.masked_fill(mask, -1e32)
logits[:, :, 1:2] = eos_scores
logits[:, :, 2:self.src_start_index] = tag_scores
logits[:, :, self.src_start_index:] = word_scores
return logits
class BartSeq2SeqModel(Seq2SeqModel):
@classmethod
def build_model(cls, bart_model, tokenizer, label_ids, decoder_type=None, copy_gate=False,
use_encoder_mlp=False, use_recur_pos=False, tag_first=False):
model = BartModel.from_pretrained(bart_model)
num_tokens, _ = model.encoder.embed_tokens.weight.shape
model.resize_token_embeddings(len(tokenizer.unique_no_split_tokens)+num_tokens)
encoder = model.encoder
decoder = model.decoder
if use_recur_pos:
decoder.set_position_embedding(label_ids[0], tag_first)
_tokenizer = BartTokenizer.from_pretrained(bart_model)
for token in tokenizer.unique_no_split_tokens:
if token[:2] == '<<':
index = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(token))
if len(index)>1:
raise RuntimeError(f"{token} wrong split")
else:
index = index[0]
assert index>=num_tokens, (index, num_tokens, token)
indexes = _tokenizer.convert_tokens_to_ids(_tokenizer.tokenize(token[2:-2]))
embed = model.encoder.embed_tokens.weight.data[indexes[0]]
for i in indexes[1:]:
embed += model.decoder.embed_tokens.weight.data[i]
embed /= len(indexes)
model.decoder.embed_tokens.weight.data[index] = embed
encoder = FBartEncoder(encoder)
label_ids = sorted(label_ids)
if decoder_type is None:
assert copy_gate is False
decoder = FBartDecoder(decoder, pad_token_id=tokenizer.pad_token_id, label_ids=label_ids)
elif decoder_type =='avg_score':
decoder = CaGFBartDecoder(decoder, pad_token_id=tokenizer.pad_token_id, label_ids=label_ids,
use_encoder_mlp=use_encoder_mlp)
else:
raise RuntimeError("Unsupported feature.")
return cls(encoder=encoder, decoder=decoder)
def prepare_state(self, src_tokens, src_seq_len=None, first=None, tgt_seq_len=None):
encoder_outputs, encoder_mask, hidden_states = self.encoder(src_tokens, src_seq_len)
src_embed_outputs = hidden_states[0]
state = BartState(encoder_outputs, encoder_mask, src_tokens, first, src_embed_outputs)
# setattr(state, 'tgt_seq_len', tgt_seq_len)
return state
def forward(self, src_tokens, tgt_tokens, src_seq_len, tgt_seq_len, first):
"""
:param torch.LongTensor src_tokens: source的token
:param torch.LongTensor tgt_tokens: target的token
:param torch.LongTensor first: 显示每个, bsz x max_word_len
:param torch.LongTensor src_seq_len: src的长度
:param torch.LongTensor tgt_seq_len: target的长度,默认用不上
:return: {'pred': torch.Tensor}, 其中pred的shape为bsz x max_len x vocab_size
"""
state = self.prepare_state(src_tokens, src_seq_len, first, tgt_seq_len)
decoder_output = self.decoder(tgt_tokens, state)
if isinstance(decoder_output, torch.Tensor):
return {'pred': decoder_output}
elif isinstance(decoder_output, (tuple, list)):
return {'pred': decoder_output[0]}
else:
raise TypeError(f"Unsupported return type from Decoder:{type(self.decoder)}")
class BartState(State):
def __init__(self, encoder_output, encoder_mask, src_tokens, first, src_embed_outputs):
super().__init__(encoder_output, encoder_mask)
self.past_key_values = None
self.src_tokens = src_tokens
self.first = first
self.src_embed_outputs = src_embed_outputs
def reorder_state(self, indices: torch.LongTensor):
super().reorder_state(indices)
self.src_tokens = self._reorder_state(self.src_tokens, indices)
if self.first is not None:
self.first = self._reorder_state(self.first, indices)
self.src_embed_outputs = self._reorder_state(self.src_embed_outputs, indices)
if self.past_key_values is not None:
new = []
for layer in self.past_key_values:
new_layer = {}
for key1 in list(layer.keys()):
new_layer_ = {}
for key2 in list(layer[key1].keys()):
if layer[key1][key2] is not None:
layer[key1][key2] = self._reorder_state(layer[key1][key2], indices)
# print(key1, key2, layer[key1][key2].shape)
new_layer_[key2] = layer[key1][key2]
new_layer[key1] = new_layer_
new.append(new_layer)
self.past_key_values = new | 14,844 | 46.428115 | 145 | py |
DMASTE | DMASTE-main/BARTABSA/peng/model/modeling_bart.py | # coding=utf-8
# Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BART model, ported from the fairseq repo."""
import math
import random
import warnings
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import CrossEntropyLoss
from transformers.modeling_bart import *
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "BartConfig"
_TOKENIZER_FOR_DOC = "BartTokenizer"
BART_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/bart-base",
"facebook/bart-large",
"facebook/bart-large-mnli",
"facebook/bart-large-cnn",
"facebook/bart-large-xsum",
"facebook/mbart-large-en-ro",
]
# This list is incomplete. See all BART models at https://huggingface.co/models?filter=bart
BART_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.BartConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
BART_GENERATION_EXAMPLE = r"""
Summarization example::
>>> from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig
>>> # see ``examples/summarization/bart/run_eval.py`` for a longer example
>>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
>>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
>>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True)
>>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
"""
BART_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using :class:`~transformers.BartTokenizer`.
See :meth:`transformers.PreTrainedTokenizer.encode` and
:meth:`transformers.PreTrainedTokenizer.__call__` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Provide for translation and summarization training. By default, the model will create this tensor by
shifting the :obj:`input_ids` to the right, following the paper.
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
If you want to change padding behavior, you should read :func:`modeling_bart._prepare_decoder_inputs` and
modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more
information on the default strategy.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`: :obj:`attentions`)
:obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`) is a
sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of
the decoder.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
``decoder_input_ids`` (those that don't have their past key value states given to this model) of shape
:obj:`(batch_size, 1)` instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
def invert_mask(attention_mask):
"""Turns 1->0, 0->1, False->True, True-> False"""
assert attention_mask.dim() == 2
return attention_mask.eq(0)
def _prepare_bart_decoder_inputs(
config, input_ids, decoder_input_ids=None, decoder_padding_mask=None, causal_mask_dtype=torch.float32
):
"""Prepare masks that ignore padding tokens in the decoder and a causal mask for the decoder if
none are provided. This mimics the default behavior in fairseq. To override it pass in masks.
Note: this is not called during generation
"""
pad_token_id = config.pad_token_id
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(input_ids, pad_token_id)
bsz, tgt_len = decoder_input_ids.size()
if decoder_padding_mask is None:
decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id)
else:
decoder_padding_mask = invert_mask(decoder_padding_mask)
if decoder_padding_mask is not None and decoder_padding_mask.shape[1] > 1:
# never mask leading token, even if it is pad
decoder_padding_mask[:, 0] = decoder_padding_mask[:, 1]
tmp = fill_with_neg_inf(torch.zeros(tgt_len, tgt_len))
mask = torch.arange(tmp.size(-1))
tmp.masked_fill_(mask < (mask + 1).view(tmp.size(-1), 1), 0)
causal_mask = tmp.to(dtype=causal_mask_dtype, device=decoder_input_ids.device)
return decoder_input_ids, decoder_padding_mask, causal_mask
class PretrainedBartModel(PreTrainedModel):
config_class = BartConfig
base_model_prefix = "model"
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, SinusoidalPositionalEmbedding):
pass
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
}
return dummy_inputs
def _make_linear_from_emb(emb):
vocab_size, emb_size = emb.weight.shape
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
lin_layer.weight.data = emb.weight.data
return lin_layer
# Helper Functions, mostly for making masks
def _check_shapes(shape_1, shape2):
if shape_1 != shape2:
raise AssertionError("shape mismatch: {} != {}".format(shape_1, shape2))
def shift_tokens_right(input_ids, pad_token_id):
"""Shift input ids one token to the right, and wrap the last non pad token (usually <eos>)."""
prev_output_tokens = input_ids.clone()
index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()
prev_output_tokens[:, 1:] = input_ids[:, :-1]
return prev_output_tokens
def make_padding_mask(input_ids, padding_idx=1):
"""True for pad tokens"""
padding_mask = input_ids.eq(padding_idx)
if not padding_mask.any():
padding_mask = None
return padding_mask
# Helper Modules
class EncoderLayer(nn.Module):
def __init__(self, config: BartConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Attention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout)
self.normalize_before = config.normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def forward(self, x, encoder_padding_mask, output_attentions=False):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
for t_tgt, t_src is excluded (or masked out), =0 means it is
included in attention
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, attn_weights = self.self_attn(
query=x, key=x, key_padding_mask=encoder_padding_mask, output_attentions=output_attentions
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
if torch.isinf(x).any() or torch.isnan(x).any():
clamp_value = torch.finfo(x.dtype).max - 1000
x = torch.clamp(x, min=-clamp_value, max=clamp_value)
return x, attn_weights
class BartEncoder(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer
is a :class:`EncoderLayer`.
Args:
config: BartConfig
"""
def __init__(self, config: BartConfig, embed_tokens):
super().__init__()
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = embed_tokens.embedding_dim
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = config.max_position_embeddings
self.embed_tokens = embed_tokens
if config.static_position_embeddings:
self.embed_positions = SinusoidalPositionalEmbedding(
config.max_position_embeddings, embed_dim, self.padding_idx
)
else:
self.embed_positions = LearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
self.padding_idx,
config.extra_pos_embeddings,
)
self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = LayerNorm(embed_dim) if config.normalize_embedding else nn.Identity()
# mbart has one extra layer_norm
self.layer_norm = LayerNorm(config.d_model) if config.add_final_layer_norm else None
def forward(
self, input_ids, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=False
):
"""
Args:
input_ids (LongTensor): tokens in the source language of shape
`(batch, src_len)`
attention_mask (torch.LongTensor): indicating which indices are padding tokens.
Returns:
BaseModelOutput or Tuple comprised of:
- **x** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_states** (tuple(torch.FloatTensor)): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *output_hidden_states:* is True.
- **all_attentions** (tuple(torch.FloatTensor)): Attention weights for each layer.
During training might not be of length n_layers because of layer dropout.
"""
# check attention mask and invert
if attention_mask is not None:
attention_mask = invert_mask(attention_mask)
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_ids)
x = inputs_embeds + embed_pos
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = [] if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states.append(x)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
attn = None
else:
x, attn = encoder_layer(x, attention_mask, output_attentions=output_attentions)
if output_attentions:
all_attentions = all_attentions + (attn,)
if self.layer_norm:
x = self.layer_norm(x)
if output_hidden_states:
encoder_states.append(x)
# T x B x C -> B x T x C
encoder_states = tuple(hidden_state.transpose(0, 1) for hidden_state in encoder_states)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if not return_dict:
return tuple(v for v in [x, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions)
class DecoderLayer(nn.Module):
def __init__(self, config: BartConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Attention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.normalize_before = config.normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.encoder_attn = Attention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
encoder_decoder_attention=True,
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def forward(
self,
x,
encoder_hidden_states,
encoder_attn_mask=None,
layer_state=None,
causal_mask=None,
decoder_padding_mask=None,
output_attentions=False,
):
residual = x
if layer_state is None:
layer_state = {}
if self.normalize_before:
x = self.self_attn_layer_norm(x)
# Self Attention
x, self_attn_weights = self.self_attn(
query=x,
key=x,
layer_state=layer_state, # adds keys to layer state
key_padding_mask=decoder_padding_mask,
attn_mask=causal_mask,
output_attentions=output_attentions,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
# Cross attention
residual = x
assert self.encoder_attn.cache_key != self.self_attn.cache_key
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
x, _ = self.encoder_attn(
query=x,
key=encoder_hidden_states,
key_padding_mask=encoder_attn_mask,
layer_state=layer_state, # mutates layer state
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
# Fully Connected
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
return (
x,
self_attn_weights,
layer_state,
) # just self_attn weights for now, following t5, layer_state = cache for decoding
class BartDecoder(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer
is a :class:`DecoderLayer`.
Args:
config: BartConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: BartConfig, embed_tokens: nn.Embedding):
super().__init__()
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.do_blenderbot_90_layernorm = config.do_blenderbot_90_layernorm # layernorm variant
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = embed_tokens
if config.static_position_embeddings:
self.embed_positions = SinusoidalPositionalEmbedding(
config.max_position_embeddings, config.d_model, config.pad_token_id
)
else:
self.embed_positions = LearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
self.padding_idx,
config.extra_pos_embeddings
)
self.layers = nn.ModuleList(
[DecoderLayer(config) for _ in range(config.decoder_layers)]
) # type: List[DecoderLayer]
self.layernorm_embedding = LayerNorm(config.d_model) if config.normalize_embedding else nn.Identity()
self.layer_norm = LayerNorm(config.d_model) if config.add_final_layer_norm else None
self.config = config
def forward(
self,
input_ids,
encoder_hidden_states,
encoder_padding_mask,
decoder_padding_mask,
decoder_causal_mask,
past_key_values=None,
use_cache=False,
output_attentions=False,
output_hidden_states=False,
return_dict=False,
**unused,
):
"""
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
input_ids (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_hidden_states: output from the encoder, used for
encoder-side attention
encoder_padding_mask: for ignoring pad tokens
past_key_values (dict or None): dictionary used for storing state during generation
Returns:
BaseModelOutputWithPast or tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- the cache
- hidden states
- attentions
"""
if "decoder_cached_states" in unused:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = unused.pop("decoder_cached_states")
if "decoder_past_key_values" in unused:
warnings.warn(
"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = unused.pop("decoder_past_key_values")
# check attention mask and invert
if encoder_padding_mask is not None:
encoder_padding_mask = invert_mask(encoder_padding_mask)
# embed positions
positions = self.embed_positions(input_ids, use_cache=use_cache)
if use_cache:
input_ids = input_ids[:, -1:]
positions = positions[:, -1:]
x = self.embed_tokens(input_ids) * self.embed_scale
if self.do_blenderbot_90_layernorm:
x = self.layernorm_embedding(x)
x += positions
else:
x += positions
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# Convert to Bart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = []
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (x,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
layer_state = past_key_values[idx] if past_key_values is not None else None
x, layer_self_attn, layer_past = decoder_layer(
x,
encoder_hidden_states,
encoder_attn_mask=encoder_padding_mask,
decoder_padding_mask=decoder_padding_mask,
layer_state=layer_state,
causal_mask=decoder_causal_mask,
output_attentions=output_attentions,
)
if use_cache:
next_decoder_cache.append(layer_past.copy())
if output_attentions:
all_self_attns += (layer_self_attn,)
if self.layer_norm: # if config.add_final_layer_norm (mBART)
x = self.layer_norm(x)
# Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
if output_hidden_states:
all_hidden_states = tuple(hidden_state.transpose(0, 1) for hidden_state in all_hidden_states)
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(v for v in [x, next_cache, all_hidden_states, all_self_attns] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=x, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns
)
def _reorder_buffer(attn_cache, new_order):
for k, input_buffer_k in attn_cache.items():
if input_buffer_k is not None:
attn_cache[k] = input_buffer_k.index_select(0, new_order)
return attn_cache
class Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
encoder_decoder_attention=False, # otherwise self_attention
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.encoder_decoder_attention = encoder_decoder_attention
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.cache_key = "encoder_decoder" if self.encoder_decoder_attention else "self"
def _shape(self, tensor, seq_len, bsz):
return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
def forward(
self,
query,
key: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
layer_state: Optional[Dict[str, Optional[Tensor]]] = None,
attn_mask: Optional[Tensor] = None,
output_attentions=False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time(SeqLen) x Batch x Channel"""
static_kv: bool = self.encoder_decoder_attention
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
# get here for encoder decoder cause of static_kv
if layer_state is not None: # reuse k,v and encoder_padding_mask
saved_state = layer_state.get(self.cache_key, {})
if "prev_key" in saved_state and static_kv:
# previous time steps are cached - no need to recompute key and value if they are static
key = None
else:
saved_state = None
layer_state = {}
q = self.q_proj(query) * self.scaling
if static_kv:
if key is None:
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
k = self.k_proj(query)
v = self.v_proj(query)
q = self._shape(q, tgt_len, bsz)
if k is not None:
k = self._shape(k, -1, bsz)
if v is not None:
v = self._shape(v, -1, bsz)
if saved_state is not None:
k, v, key_padding_mask = self._use_saved_state(k, v, saved_state, key_padding_mask, static_kv, bsz)
# Update cache
layer_state[self.cache_key] = {
"prev_key": k.view(bsz, self.num_heads, -1, self.head_dim),
"prev_value": v.view(bsz, self.num_heads, -1, self.head_dim),
"prev_key_padding_mask": key_padding_mask if not static_kv else None,
}
assert k is not None
src_len = k.size(1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len)
if attn_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
# This is part of a workaround to get around fork/join parallelism not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
assert key_padding_mask is None or key_padding_mask.size()[:2] == (
bsz,
src_len,
)
if key_padding_mask is not None: # don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2)
attn_weights = attn_weights.masked_fill(reshaped, float("-inf"))
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
attn_probs = F.dropout(
attn_weights,
p=self.dropout,
training=self.training,
)
assert v is not None
attn_output = torch.bmm(attn_probs, v)
assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = self.out_proj(attn_output)
if output_attentions:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
else:
attn_weights = None
return attn_output, attn_weights
def _use_saved_state(self, k, v, saved_state, key_padding_mask, static_kv, bsz):
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
assert k is not None and v is not None
prev_key_padding_mask: Optional[Tensor] = saved_state.get("prev_key_padding_mask", None)
if prev_key_padding_mask is not None:
if static_kv:
new_key_padding_mask = prev_key_padding_mask
else:
new_key_padding_mask = torch.cat([prev_key_padding_mask, key_padding_mask], dim=1)
else:
new_key_padding_mask = key_padding_mask
return k, v, new_key_padding_mask
class BartClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
# This can trivially be shared with RobertaClassificationHead
def __init__(
self,
input_dim,
inner_dim,
num_classes,
pooler_dropout,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, x):
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, offset):
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models dont have this hack
self.offset = offset
assert padding_idx is not None
num_embeddings += offset
super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx)
def forward(self, input_ids, use_cache=False):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input_ids.shape[:2]
if use_cache:
positions = input_ids.data.new(1, 1).fill_(seq_len - 1) # called before slicing
else:
# starts at 0, ends at 1-seq_len
positions = torch.arange(seq_len, dtype=torch.long, device=self.weight.device)
return super().forward(positions + self.offset)
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True):
if torch.cuda.is_available():
try:
from apex.normalization import FusedLayerNorm
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
except ImportError:
pass
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a input_ids with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
# Public API
def _get_shape(t):
return getattr(t, "shape", None)
@add_start_docstrings(
"The bare BART Model outputting raw hidden-states without any specific head on top.",
BART_START_DOCSTRING,
)
class BartModel(PretrainedBartModel):
def __init__(self, config: BartConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = BartEncoder(config, self.shared)
self.decoder = BartDecoder(config, self.shared)
self.init_weights()
@add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="facebook/bart-large",
output_type=Seq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs: Optional[Tuple] = None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
if "decoder_past_key_values" in kwargs:
warnings.warn(
"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_values")
if decoder_input_ids is None:
use_cache = False
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# make masks if user doesn't supply
if not use_cache:
decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_bart_decoder_inputs(
self.config,
input_ids,
decoder_input_ids=decoder_input_ids,
decoder_padding_mask=decoder_attention_mask,
causal_mask_dtype=self.shared.weight.dtype,
)
else:
decoder_padding_mask, causal_mask = None, None
assert decoder_input_ids is not None
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOuput when return_dict=False
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
decoder_input_ids,
encoder_outputs[0],
attention_mask,
decoder_padding_mask,
decoder_causal_mask=causal_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_output_embeddings(self):
return _make_linear_from_emb(self.shared) # make it on the fly
@add_start_docstrings(
"The BART Model with a language modeling head. Can be used for summarization.", BART_START_DOCSTRING
)
class BartForConditionalGeneration(PretrainedBartModel):
base_model_prefix = "model"
authorized_missing_keys = [r"final_logits_bias", r"encoder\.version", r"decoder\.version"]
def __init__(self, config: BartConfig):
super().__init__(config)
base_model = BartModel(config)
self.model = base_model
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
old_num_tokens = self.model.shared.num_embeddings
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self.model.shared = new_embeddings
self._resize_final_logits_bias(new_num_tokens, old_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int, old_num_tokens: int) -> None:
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
@add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(BART_GENERATION_EXAMPLE)
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**unused,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss.
Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring).
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens
with labels in ``[0, ..., config.vocab_size]``.
Returns:
Conditional generation example::
>>> # Mask filling only works for bart-large
>>> from transformers import BartTokenizer, BartForConditionalGeneration
>>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
>>> TXT = "My friends are <mask> but they eat too many carbs."
>>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large')
>>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids']
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
>>> # ['good', 'great', 'all', 'really', 'very']
"""
if "lm_labels" in unused:
warnings.warn(
"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = unused.pop("lm_labels")
if "decoder_cached_states" in unused:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = unused.pop("decoder_cached_states")
if "decoder_past_key_values" in unused:
warnings.warn(
"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = unused.pop("decoder_past_key_values")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# TODO(SS): do we need to ignore pad tokens in labels?
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self, decoder_input_ids, past, attention_mask, use_cache, encoder_outputs, **kwargs
):
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def adjust_logits_during_generation(self, logits, cur_len, max_length):
if cur_len == 1 and self.config.force_bos_token_to_be_generated:
self._force_token_ids_generation(logits, self.config.bos_token_id)
elif cur_len == max_length - 1 and self.config.eos_token_id is not None:
self._force_token_ids_generation(logits, self.config.eos_token_id)
return logits
def _force_token_ids_generation(self, scores, token_id) -> None:
"""force one of token_ids to be generated by setting prob of all other tokens to 0 (logprob=-float("inf"))"""
scores[:, [x for x in range(self.config.vocab_size) if x != token_id]] = -float("inf")
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = []
for layer_past in past:
# get the correct batch idx from decoder layer's batch dim for cross and self-attn
layer_past_new = {
attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()
}
reordered_past.append(layer_past_new)
return reordered_past
def get_encoder(self):
return self.model.encoder
def get_output_embeddings(self):
return _make_linear_from_emb(self.model.shared) # make it on the fly
@add_start_docstrings(
"""Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """,
BART_START_DOCSTRING,
)
class BartForSequenceClassification(PretrainedBartModel):
def __init__(self, config: BartConfig, **kwargs):
super().__init__(config, **kwargs)
self.model = BartModel(config)
self.classification_head = BartClassificationHead(
config.d_model,
config.d_model,
config.num_labels,
config.classifier_dropout,
)
self.model._init_weights(self.classification_head.dense)
self.model._init_weights(self.classification_head.out_proj)
@add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="facebook/bart-large",
output_type=Seq2SeqSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
x = outputs[0] # last hidden state
eos_mask = input_ids.eq(self.config.eos_token_id)
if len(torch.unique(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
sentence_representation = x[eos_mask, :].view(x.size(0), -1, x.size(-1))[:, -1, :]
logits = self.classification_head(sentence_representation)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqSequenceClassifierOutput(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@add_start_docstrings(
"""BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
BART_START_DOCSTRING,
)
class BartForQuestionAnswering(PretrainedBartModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.model = BartModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.model._init_weights(self.qa_outputs)
@add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="facebook/bart-large",
output_type=Seq2SeqQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
start_positions=None,
end_positions=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if start_positions is not None and end_positions is not None:
use_cache = False
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (
start_logits,
end_logits,
) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return Seq2SeqQuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
class SinusoidalPositionalEmbedding(nn.Embedding):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions, embedding_dim, padding_idx=None):
super().__init__(num_positions, embedding_dim)
if embedding_dim % 2 != 0:
raise NotImplementedError(f"odd embedding_dim {embedding_dim} not supported")
self.weight = self._init_weight(self.weight)
@staticmethod
def _init_weight(out: nn.Parameter):
"""Identical to the XLM create_sinusoidal_embeddings except features are not interleaved.
The cos features are in the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = out.shape
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
out[:, 0: dim // 2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) # This line breaks for odd n_pos
out[:, dim // 2:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False
return out
@torch.no_grad()
def forward(self, input_ids, use_cache=False):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input_ids.shape[:2]
if use_cache:
positions = input_ids.data.new(1, 1).fill_(seq_len - 1) # called before slicing
else:
# starts at 0, ends at 1-seq_len
positions = torch.arange(seq_len, dtype=torch.long, device=self.weight.device)
return super().forward(positions)
| 58,282 | 41.234058 | 213 | py |
DMASTE | DMASTE-main/BARTABSA/peng/model/generator.py | r"""Modify from fastNLP"""
import torch
from torch import nn
from fastNLP.models.seq2seq_model import Seq2SeqModel
from fastNLP.modules.decoder.seq2seq_decoder import Seq2SeqDecoder, State
import torch.nn.functional as F
from fastNLP.core.utils import _get_model_device
from functools import partial
class SequenceGeneratorModel(nn.Module):
"""
用于封装Seq2SeqModel使其可以做生成任务
"""
def __init__(self, seq2seq_model: Seq2SeqModel, bos_token_id, eos_token_id=None, max_length=30, max_len_a=0.0,
num_beams=1, do_sample=True,
repetition_penalty=1, length_penalty=1.0, pad_token_id=0,
restricter=None):
"""
:param Seq2SeqModel seq2seq_model: 序列到序列模型. 会使用seq2seq_model的decoder进行生成
:param int,None bos_token_id: 句子开头的token id
:param int,None eos_token_id: 句子结束的token id
:param int max_length: 生成句子的最大长度, 每句话的decode长度为max_length + max_len_a*src_len
:param float max_len_a: 每句话的decode长度为max_length + max_len_a*src_len。 如果不为0,需要保证State中包含encoder_mask
:param int num_beams: beam search的大小
:param bool do_sample: 是否通过采样的方式生成
:param float temperature: 只有在do_sample为True才有意义
:param int top_k: 只从top_k中采样
:param float top_p: 只从top_p的token中采样,nucles sample
:param float repetition_penalty: 多大程度上惩罚重复的token
:param float length_penalty: 对长度的惩罚,小于1鼓励长句,大于1鼓励短剧
:param int pad_token_id: 当某句话生成结束之后,之后生成的内容用pad_token_id补充
"""
super().__init__()
self.seq2seq_model = seq2seq_model
self.restricter = restricter
self.generator = SequenceGenerator(seq2seq_model.decoder, max_length=max_length, max_len_a=max_len_a,
num_beams=num_beams,
do_sample=do_sample,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty, length_penalty=length_penalty,
pad_token_id=pad_token_id,
restricter=restricter)
def forward(self, src_tokens, tgt_tokens, src_seq_len=None, tgt_seq_len=None, first=None):
"""
透传调用seq2seq_model的forward
:param torch.LongTensor src_tokens: bsz x max_len
:param torch.LongTensor tgt_tokens: bsz x max_len'
:param torch.LongTensor src_seq_len: bsz
:param torch.LongTensor tgt_seq_len: bsz
:return:
"""
return self.seq2seq_model(src_tokens, tgt_tokens, src_seq_len, tgt_seq_len, first)
def predict(self, src_tokens, src_seq_len=None, first=None):
"""
给定source的内容,输出generate的内容
:param torch.LongTensor src_tokens: bsz x max_len
:param torch.LongTensor src_seq_len: bsz
:return:
"""
state = self.seq2seq_model.prepare_state(src_tokens, src_seq_len, first)
result = self.generator.generate(state)
return {'pred': result}
r"""
"""
__all__ = [
'SequenceGenerator'
]
class SequenceGenerator:
"""
给定一个Seq2SeqDecoder,decode出句子
"""
def __init__(self, decoder: Seq2SeqDecoder, max_length=20, max_len_a=0.0, num_beams=1,
do_sample=False, bos_token_id=None, eos_token_id=None,
repetition_penalty=1, length_penalty=1.0, pad_token_id=0, restricter=None):
"""
:param Seq2SeqDecoder decoder: Decoder对象
:param int max_length: 生成句子的最大长度, 每句话的decode长度为max_length + max_len_a*src_len
:param float max_len_a: 每句话的decode长度为max_length + max_len_a*src_len。 如果不为0,需要保证State中包含encoder_mask
:param int num_beams: beam search的大小
:param bool do_sample: 是否通过采样的方式生成
:param float temperature: 只有在do_sample为True才有意义
:param int top_k: 只从top_k中采样
:param float top_p: 只从top_p的token中采样,nucles sample
:param int,None bos_token_id: 句子开头的token id
:param int,None eos_token_id: 句子结束的token id
:param float repetition_penalty: 多大程度上惩罚重复的token
:param float length_penalty: 对长度的惩罚,小于1鼓励长句,大于1鼓励短剧
:param int pad_token_id: 当某句话生成结束之后,之后生成的内容用pad_token_id补充
"""
self.generate_func = partial(greedy_generate, decoder=decoder, max_length=max_length, max_len_a=max_len_a,
num_beams=num_beams,
bos_token_id=bos_token_id, eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty,
length_penalty=length_penalty, pad_token_id=pad_token_id,
restricter=restricter)
self.do_sample = do_sample
self.max_length = max_length
self.num_beams = num_beams
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.repetition_penalty = repetition_penalty
self.length_penalty = length_penalty
self.decoder = decoder
self.pad_token_id = pad_token_id
self.restricter = restricter
self.max_len_a = max_len_a
def set_new_generator(self, max_length=-1, max_len_a=-1, num_beams=-1,
repetition_penalty=-1, length_penalty=-1, restricter=-1):
if max_length == -1:
max_length = self.max_length
if max_len_a == -1:
max_len_a = self.max_len_a
if num_beams == -1:
num_beams = self.num_beams
if repetition_penalty == -1:
repetition_penalty = self.repetition_penalty
if length_penalty == -1:
length_penalty = self.length_penalty
if restricter == -1:
restricter = self.restricter
self.generate_func = partial(greedy_generate, decoder=self.decoder, max_length=max_length, max_len_a=max_len_a,
num_beams=num_beams,
bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id,
repetition_penalty=repetition_penalty,
length_penalty=length_penalty, pad_token_id=self.pad_token_id,
restricter=restricter)
@torch.no_grad()
def generate(self, state, tokens=None):
"""
:param State state: encoder结果的State, 是与Decoder配套是用的
:param torch.LongTensor,None tokens: batch_size x length, 开始的token
:return: bsz x max_length' 生成的token序列。如果eos_token_id不为None, 每个sequence的结尾一定是eos_token_id
"""
return self.generate_func(tokens=tokens, state=state)
@torch.no_grad()
def greedy_generate(decoder, tokens=None, state=None, max_length=20, max_len_a=0.0, num_beams=1,
bos_token_id=None, eos_token_id=None, pad_token_id=0,
repetition_penalty=1, length_penalty=1.0, restricter=None):
"""
贪婪地搜索句子
:param Decoder decoder: Decoder对象
:param torch.LongTensor tokens: batch_size x len, decode的输入值,如果为None,则自动从bos_token_id开始生成
:param State state: 应该包含encoder的一些输出。
:param int max_length: 生成句子的最大长度, 每句话的decode长度为max_length + max_len_a*src_len
:param float max_len_a: 每句话的decode长度为max_length + max_len_a*src_len。 如果不为0,需要保证State中包含encoder_mask
:param int num_beams: 使用多大的beam进行解码。
:param int bos_token_id: 如果tokens传入为None,则使用bos_token_id开始往后解码。
:param int eos_token_id: 结束的token,如果为None,则一定会解码到max_length这么长。
:param int pad_token_id: pad的token id
:param float repetition_penalty: 对重复出现的token多大的惩罚。
:param float length_penalty: 对每个token(除了eos)按照长度进行一定的惩罚。
:return:
"""
if num_beams == 1:
token_ids = _no_beam_search_generate(decoder, tokens=tokens, state=state, max_length=max_length, max_len_a=max_len_a,
bos_token_id=bos_token_id, eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty, length_penalty=length_penalty,
pad_token_id=pad_token_id, restricter=restricter)
else:
token_ids = _beam_search_generate(decoder, tokens=tokens, state=state, max_length=max_length, max_len_a=max_len_a,
num_beams=num_beams,
bos_token_id=bos_token_id, eos_token_id=eos_token_id, do_sample=False,
repetition_penalty=repetition_penalty, length_penalty=length_penalty,
pad_token_id=pad_token_id, restricter=restricter)
return token_ids
def _no_beam_search_generate(decoder: Seq2SeqDecoder, state, tokens=None, max_length=20, max_len_a=0.0, bos_token_id=None,
eos_token_id=None,
repetition_penalty=1.0, length_penalty=1.0, pad_token_id=0,
restricter=None):
device = _get_model_device(decoder)
if tokens is None:
if bos_token_id is None:
raise RuntimeError("You have to specify either `tokens` or `bos_token_id`.")
batch_size = state.num_samples
if batch_size is None:
raise RuntimeError("Cannot infer the number of samples from `state`.")
tokens = torch.full([batch_size, 1], fill_value=bos_token_id, dtype=torch.long).to(device)
batch_size = tokens.size(0)
if state.num_samples:
assert state.num_samples == batch_size, "The number of samples in `tokens` and `state` should match."
if eos_token_id is None:
_eos_token_id = -1
else:
_eos_token_id = eos_token_id
scores = decoder.decode(tokens=tokens, state=state) # 主要是为了update state
# 这里需要考虑如果在第一个位置就结束的情况
# if _eos_token_id!=-1:
# scores[:, _eos_token_id] = -1e12
if restricter is not None:
_, next_tokens = restricter(state, tokens, scores, num_beams=1)
else:
next_tokens = scores.argmax(dim=-1, keepdim=True)
token_ids = torch.cat([tokens, next_tokens], dim=1)
cur_len = token_ids.size(1)
dones = token_ids.new_zeros(batch_size).eq(1).__or__(next_tokens.squeeze(1).eq(eos_token_id))
# tokens = tokens[:, -1:]
if max_len_a!=0:
# (bsz x num_beams, )
if state.encoder_mask is not None:
max_lengths = (state.encoder_mask.sum(dim=1).float()*max_len_a).long() + max_length
else:
max_lengths = tokens.new_full((tokens.size(0), ), fill_value=max_length, dtype=torch.long)
real_max_length = max_lengths.max().item()
else:
real_max_length = max_length
if state.encoder_mask is not None:
max_lengths = state.encoder_mask.new_ones(state.encoder_mask.size(0)).long()*max_length
else:
max_lengths = tokens.new_full((tokens.size(0),), fill_value=max_length, dtype=torch.long)
while cur_len < real_max_length:
scores = decoder.decode(tokens=token_ids, state=state) # batch_size x vocab_size
if repetition_penalty != 1.0:
token_scores = scores.gather(dim=1, index=token_ids)
lt_zero_mask = token_scores.lt(0).float()
ge_zero_mask = lt_zero_mask.eq(0).float()
token_scores = lt_zero_mask * repetition_penalty * token_scores + ge_zero_mask / repetition_penalty * token_scores
scores.scatter_(dim=1, index=token_ids, src=token_scores)
if eos_token_id is not None and length_penalty != 1.0:
token_scores = scores / cur_len ** length_penalty # batch_size x vocab_size
eos_mask = scores.new_ones(scores.size(1))
eos_mask[eos_token_id] = 0
eos_mask = eos_mask.unsqueeze(0).eq(1)
scores = scores.masked_scatter(eos_mask, token_scores) # 也即除了eos,其他词的分数经过了放大/缩小
if restricter is not None:
_, next_tokens = restricter(state, token_ids, scores, 1)
else:
next_tokens = scores.argmax(dim=-1, keepdim=True)
next_tokens = next_tokens.squeeze(-1)
# 如果已经达到对应的sequence长度了,就直接填为eos了
if _eos_token_id!=-1:
next_tokens = next_tokens.masked_fill(max_lengths.eq(cur_len+1), _eos_token_id)
next_tokens = next_tokens.masked_fill(dones, pad_token_id) # 对已经搜索完成的sample做padding
tokens = next_tokens.unsqueeze(1)
token_ids = torch.cat([token_ids, tokens], dim=-1) # batch_size x max_len
end_mask = next_tokens.eq(_eos_token_id)
dones = dones.__or__(end_mask)
cur_len += 1
if dones.min() == 1:
break
# if eos_token_id is not None:
# tokens.scatter(index=max_lengths[:, None], dim=1, value=eos_token_id) # 将最大长度位置设置为eos
# if cur_len == max_length:
# token_ids[:, -1].masked_fill_(~dones, eos_token_id) # 若到最长长度仍未到EOS,则强制将最后一个词替换成eos
return token_ids
def _beam_search_generate(decoder: Seq2SeqDecoder, tokens=None, state=None, max_length=20, max_len_a=0.0, num_beams=4,
bos_token_id=None, eos_token_id=None, do_sample=True,
repetition_penalty=1.0, length_penalty=None, pad_token_id=0,
restricter=None) -> torch.LongTensor:
assert do_sample is False
# 进行beam search
device = _get_model_device(decoder)
if tokens is None:
if bos_token_id is None:
raise RuntimeError("You have to specify either `tokens` or `bos_token_id`.")
batch_size = state.num_samples
if batch_size is None:
raise RuntimeError("Cannot infer the number of samples from `state`.")
tokens = torch.full([batch_size, 1], fill_value=bos_token_id, dtype=torch.long).to(device)
batch_size = tokens.size(0)
if state.num_samples:
assert state.num_samples == batch_size, "The number of samples in `tokens` and `state` should match."
if eos_token_id is None:
_eos_token_id = -1
else:
_eos_token_id = eos_token_id
scores = decoder.decode(tokens=tokens, state=state) # 这里要传入的是整个句子的长度
# 这里需要考虑如果在第一个位置就结束的情况
# if _eos_token_id!=-1:
# scores[:, _eos_token_id] = -1e12
vocab_size = scores.size(1)
assert vocab_size >= num_beams, "num_beams should be smaller than the number of vocabulary size."
scores = F.log_softmax(scores, dim=-1) # (batch_size, vocab_size)
# 得到(batch_size, num_beams), (batch_size, num_beams)
# TODO 把限制写到这个位置, 加1是因为需要考虑输出就是eos的情况
if restricter is not None:
_next_scores, _next_tokens = restricter(state, tokens, scores, num_beams+1)
else:
# 是bsz x (num_beams+1)大小的东西
_next_scores, _next_tokens = torch.topk(scores, num_beams+1, dim=1, largest=True, sorted=True)
# 根据index来做顺序的调转
indices = torch.arange(batch_size, dtype=torch.long).to(device)
indices = indices.repeat_interleave(num_beams)
state.reorder_state(indices)
tokens = tokens.index_select(dim=0, index=indices) # batch_size * num_beams x length
# if hasattr(state, 'tgt_seq_len'): # TODO 应该需要删除
# max_lengths = state.tgt_seq_len
# real_max_length = max_lengths.max().item()
if max_len_a!=0:
# (bsz x num_beams, )
if state.encoder_mask is not None:
max_lengths = (state.encoder_mask.sum(dim=1).float()*max_len_a).long() + max_length
else:
max_lengths = tokens.new_full((batch_size*num_beams, ), fill_value=max_length, dtype=torch.long)
real_max_length = max_lengths.max().item()
else:
real_max_length = max_length
if state.encoder_mask is not None:
max_lengths = state.encoder_mask.new_ones(state.encoder_mask.size(0)).long()*max_length
else:
max_lengths = tokens.new_full((batch_size*num_beams,), fill_value=max_length, dtype=torch.long)
hypos = [
BeamHypotheses(num_beams, real_max_length, length_penalty, early_stopping=False) for _ in range(batch_size)
]
not_eos_mask = _next_tokens.ne(_eos_token_id) # 为1的地方不是eos
keep_mask = not_eos_mask.cumsum(dim=1).le(num_beams) # 为1的地方需要保留
keep_mask = not_eos_mask.__and__(keep_mask) # 为1的地方是需要进行下一步search的
next_tokens = _next_tokens.masked_select(keep_mask).view(batch_size, num_beams) # 这是真的接下来要继续的
next_scores = _next_scores.masked_select(keep_mask).view(batch_size, num_beams)
rows, cols = not_eos_mask.eq(0)[:, :num_beams].nonzero(as_tuple=True)
if len(rows)>0: # 说明有的开头就结束了
for row, col in zip(rows.tolist(), cols.tolist()):
_token = torch.cat([tokens[row*num_beams], _next_tokens[row, col:col+1]], dim=0)
hypos[row].add(_token.clone(), _next_scores[row, col].item())
# 记录生成好的token (batch_size', cur_len)
token_ids = torch.cat([tokens, next_tokens.view(-1, 1)], dim=-1)
dones = [False] * batch_size
beam_scores = next_scores.view(-1) # batch_size * num_beams
# 用来记录已经生成好的token的长度
cur_len = token_ids.size(1)
# 0, num_beams, 2*num_beams, ...
batch_inds_with_numbeams_interval = (torch.arange(batch_size) * num_beams).view(-1, 1).to(token_ids)
while cur_len < real_max_length:
scores = decoder.decode(token_ids, state) # (bsz x num_beams, vocab_size)
if repetition_penalty != 1.0:
token_scores = scores.gather(dim=1, index=token_ids)
lt_zero_mask = token_scores.lt(0).float()
ge_zero_mask = lt_zero_mask.eq(0).float()
token_scores = lt_zero_mask * repetition_penalty * token_scores + ge_zero_mask / repetition_penalty * token_scores
scores.scatter_(dim=1, index=token_ids, src=token_scores)
if _eos_token_id!=-1:
max_len_eos_mask = max_lengths.eq(cur_len+1)
eos_scores = scores[:, _eos_token_id]
# 如果已经达到最大长度,就把eos的分数加大
scores[:, _eos_token_id] = torch.where(max_len_eos_mask, eos_scores+1e32, eos_scores)
scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size)
_scores = scores + beam_scores[:, None] # (batch_size * num_beams, vocab_size)
_scores = _scores.view(batch_size, -1) # (batch_size, num_beams*vocab_size)
# TODO 把限制加到这个位置
if restricter is not None:
next_scores, ids = restricter(state, token_ids, _scores, 2 * num_beams)
else:
next_scores, ids = torch.topk(_scores, 2 * num_beams, dim=1, largest=True, sorted=True) # (bsz, 2*num_beams)
from_which_beam = ids // vocab_size # (batch_size, 2*num_beams)
next_tokens = ids % vocab_size # (batch_size, 2*num_beams)
# 接下来需要组装下一个batch的结果。
# 需要选定哪些留下来
# next_scores, sorted_inds = next_scores.sort(dim=-1, descending=True)
# next_tokens = next_tokens.gather(dim=1, index=sorted_inds)
# from_which_beam = from_which_beam.gather(dim=1, index=sorted_inds)
not_eos_mask = next_tokens.ne(_eos_token_id) # 为1的地方不是eos
keep_mask = not_eos_mask.cumsum(dim=1).le(num_beams) # 为1的地方需要保留
keep_mask = not_eos_mask.__and__(keep_mask) # 为1的地方是需要进行下一步search的
_next_tokens = next_tokens.masked_select(keep_mask).view(-1, 1)
_from_which_beam = from_which_beam.masked_select(keep_mask).view(batch_size, num_beams) # 上面的token是来自哪个beam
_next_scores = next_scores.masked_select(keep_mask).view(batch_size, num_beams)
beam_scores = _next_scores.view(-1)
flag = True
if cur_len+1 == real_max_length:
eos_batch_idx = torch.arange(batch_size).to(next_tokens).repeat_interleave(repeats=num_beams, dim=0)
eos_beam_ind = torch.arange(num_beams).to(token_ids).repeat(batch_size) # 表示的是indice
eos_beam_idx = from_which_beam[:, :num_beams].reshape(-1) # 表示的是从哪个beam获取得到的
else:
# 将每个batch中在num_beam内的序列添加到结束中, 为1的地方需要结束了
effective_eos_mask = next_tokens[:, :num_beams].eq(_eos_token_id) # batch_size x num_beams
if effective_eos_mask.sum().gt(0):
eos_batch_idx, eos_beam_ind = effective_eos_mask.nonzero(as_tuple=True)
# 是由于from_which_beam是 (batch_size, 2*num_beams)的,所以需要2*num_beams
eos_beam_idx = eos_batch_idx * num_beams * 2 + eos_beam_ind
eos_beam_idx = from_which_beam.view(-1)[eos_beam_idx] # 获取真实的从哪个beam获取的eos
else:
flag = False
if flag:
_token_ids = torch.cat([token_ids, _next_tokens], dim=-1)
for batch_idx, beam_ind, beam_idx in zip(eos_batch_idx.tolist(), eos_beam_ind.tolist(),
eos_beam_idx.tolist()):
if not dones[batch_idx]:
score = next_scores[batch_idx, beam_ind].item()
# 之后需要在结尾新增一个eos
if _eos_token_id!=-1:
hypos[batch_idx].add(_token_ids[batch_idx * num_beams + beam_idx, :cur_len].clone(), score)
else:
hypos[batch_idx].add(_token_ids[batch_idx * num_beams + beam_idx].clone(), score)
# 更改state状态, 重组token_ids
reorder_inds = (batch_inds_with_numbeams_interval + _from_which_beam).view(-1) # flatten成一维
state.reorder_state(reorder_inds)
# 重新组织token_ids的状态
token_ids = torch.cat([token_ids.index_select(index=reorder_inds, dim=0), _next_tokens], dim=-1)
for batch_idx in range(batch_size):
dones[batch_idx] = dones[batch_idx] or hypos[batch_idx].is_done(next_scores[batch_idx, 0].item()) or \
max_lengths[batch_idx*num_beams]==cur_len+1
cur_len += 1
if all(dones):
break
# select the best hypotheses
tgt_len = token_ids.new_zeros(batch_size)
best = []
for i, hypotheses in enumerate(hypos):
best_hyp = max(hypotheses.hyp, key=lambda x: x[0])[1]
# 把上面替换为非eos的词替换回eos
if _eos_token_id!=-1:
best_hyp = torch.cat([best_hyp, best_hyp.new_ones(1)*_eos_token_id])
tgt_len[i] = len(best_hyp)
best.append(best_hyp)
# generate target batch
decoded = token_ids.new_zeros(batch_size, tgt_len.max().item()).fill_(pad_token_id)
for i, hypo in enumerate(best):
decoded[i, :tgt_len[i]] = hypo
return decoded
class BeamHypotheses(object):
def __init__(self, num_beams, max_length, length_penalty, early_stopping):
"""
Initialize n-best list of hypotheses.
"""
self.max_length = max_length - 1 # ignoring bos_token
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.num_beams = num_beams
self.hyp = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.hyp)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.num_beams or score > self.worst_score:
self.hyp.append((score, hyp))
if len(self) > self.num_beams:
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.hyp)])
del self.hyp[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.num_beams:
return False
elif self.early_stopping:
return True
else:
return self.worst_score >= best_sum_logprobs / self.max_length ** self.length_penalty
| 23,989 | 44.435606 | 126 | py |
DMASTE | DMASTE-main/mySpanASTE/main.py | import os
import random
import argparse
import torch
from transformers import BertTokenizer, BertModel
from torch.utils.data import DataLoader
from torch.optim import AdamW
from tqdm import tqdm
from transformers.optimization import get_linear_schedule_with_warmup
from torch.utils.tensorboard import SummaryWriter
import random, os
import numpy as np
from utils.collate import collate_fn
from utils.data_utils import ABSADataset, ABSAProcessor, convert_pad_tensor_to_list, convert_predictions_to_triples
from models.span_aste import SpanModel
from utils.metric import Metric
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default='../dataset',
help="the dataset for train")
parser.add_argument("--unlabeled_data_dir", type=str, default='../amazon')
parser.add_argument("--source", type=str)
parser.add_argument("--target", type=str)
parser.add_argument("--model_dir", type=str, default="save_models",
help="the model.pkl save path")
parser.add_argument('--log_dir', type=str, default='log')
parser.add_argument('--model_name', type=str, default='model')
parser.add_argument("--batch_size", type=int, default=8, help="number of batch_size")
parser.add_argument("--encoder_lr", type=float, default=5e-5, help="learning rate of adam")
parser.add_argument('--cls_lr', type=float, default=1e-3)
parser.add_argument("--mode", type=str, choices=['train', 'test'])
parser.add_argument("--n_epochs", type=int, default=10)
parser.add_argument('--reduction', type=str, default='sum', choices=['mean', 'sum'])
parser.add_argument('--seed', type=int)
args = parser.parse_args()
device = "cuda" if torch.cuda.is_available() else "cpu"
print(args)
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf``
(if installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if args.seed is not None:
print('set seed', args.seed)
set_seed(args.seed)
def get_dataset(dataset, mode, tokenizer):
data_dir = os.path.join(args.data_dir, dataset)
processor = ABSAProcessor(tokenizer)
examples = processor.get_examples(data_dir, mode)
features = processor.convert_examples_to_features(examples)
dataset = ABSADataset(features)
return examples, dataset
def evaluate(dataloader, model, examples):
model.eval()
all_predictions = []
metric = Metric()
for batch_i, batch in enumerate(dataloader):
input_dict = dict()
for k in ['input_ids', 'attention_mask', 'spans', 'span_labels', 'span_mask', 'relation_labels', 'seq_length']:
input_dict[k] = batch[k].to(device)
output = model(**input_dict)
batch_example = examples[batch_i * args.batch_size: (batch_i + 1) * args.batch_size]
all_predictions.extend(metric.compute(batch_example, output, batch))
model.train()
return metric.get_metric(), all_predictions
def test(test_dataloader, model, test_examples, mode):
metric, predictions = evaluate(test_dataloader, model, test_examples)
print('test metric', metric)
os.makedirs(os.path.join(args.log_dir, args.model_name), exist_ok=True)
metric_file = os.path.join(args.log_dir, args.model_name, 'metric.txt')
with open(metric_file, 'w') as f:
f.write(str(metric) + '\n')
predict_file = os.path.join(args.log_dir, args.model_name, 'pred.txt')
with open(predict_file, 'w') as f:
for p in predictions:
f.write(str(p) + '\n')
def main():
metric_file = os.path.join(args.log_dir, args.model_name, 'metric.txt')
if os.path.exists(metric_file):
print('------------------------------ file exists, return ---------------------------')
return
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer.add_special_tokens({'additional_special_tokens': ['<null-aspect>']})
tb = SummaryWriter('tb_' + args.log_dir)
if args.mode == 'train':
os.makedirs(args.model_dir, exist_ok=True)
_, train_dataset = get_dataset(args.source, 'train.txt', tokenizer)
dev_examples, dev_dataset = get_dataset(args.source, 'dev.txt', tokenizer)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, collate_fn=collate_fn, shuffle=True)
dev_dataloader = DataLoader(dev_dataset, batch_size=args.batch_size, collate_fn=collate_fn, shuffle=False)
print('num train data', len(train_dataset), 'num dev data', len(dev_dataset))
bert = BertModel.from_pretrained('bert-base-uncased')
bert.resize_token_embeddings(len(tokenizer))
model = SpanModel(bert).to(device)
optimizer = AdamW([{'params': model.encoder.parameters(), 'lr': args.encoder_lr, 'weight_decay': 1e-2},
{'params': list(set(model.parameters()) - set(model.encoder.parameters())), 'lr': args.cls_lr, 'weight_decay': 0}])
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=int(args.n_epochs * len(train_dataloader) * 0.1),
num_training_steps=args.n_epochs * len(train_dataloader))
total_steps = args.n_epochs * len(train_dataloader)
best_metric = None
num_steps = 0
with tqdm(total=len(train_dataloader)) as pbar:
for epoch in range(args.n_epochs):
model.train()
pbar.reset()
for batch in train_dataloader:
pass
num_steps += 1
input_dict = dict()
for k in ['input_ids', 'attention_mask', 'spans', 'span_labels', 'span_mask', 'relation_labels', 'seq_length']:
input_dict[k] = batch[k].to(device)
output = model(**input_dict)
loss = output['loss']
if num_steps % int(total_steps / 300) == 0:
tb.add_scalar('loss', loss.item(), global_step=num_steps)
tb.add_scalar('ner loss', output['ner_loss'].item(), global_step=num_steps)
tb.add_scalar('relation loss', output['relation_loss'].item(), global_step=num_steps)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
pbar.update(1)
pbar.set_postfix(epoch=f'{epoch + 1}/{args.n_epochs}', loss=loss.item(), best_f1=f"{round(best_metric['triplet']['f1'] * 100, 2)}" if best_metric is not None else 'none')
metric, _ = evaluate(dev_dataloader, model, dev_examples)
for name in metric:
for k in metric[name]:
tb.add_scalar(f'{name}_{k}', metric[name][k], global_step=num_steps)
if best_metric is None or best_metric['triplet']['f1'] < metric['triplet']['f1']:
best_metric = metric
torch.save(model, os.path.join(args.model_dir, args.model_name + '.pt'))
tb.add_hparams(hparam_dict=vars(args), metric_dict=best_metric['triplet'])
# torch.save(model, os.path.join(args.model_dir, args.model_name + '.pt'))
else:
model = torch.load(os.path.join(args.model_dir, args.model_name + '.pt'))
test_examples, test_dataset = get_dataset(args.target, 'test.txt', tokenizer)
print('num test data', len(test_dataset))
test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, collate_fn=collate_fn)
test(test_dataloader, model, test_examples, 'test')
# dev_examples, dev_dataset = get_dataset(args.target, 'dev.txt', tokenizer)
# print('num dev data', len(dev_dataset))
# dev_dataloader = DataLoader(dev_dataset, batch_size=args.batch_size, collate_fn=collate_fn)
# test(dev_dataloader, model, dev_examples, 'dev')
os.makedirs(args.log_dir, exist_ok=True)
param_file = os.path.join(args.log_dir, args.model_name + '_params.txt')
with open(param_file, 'w') as f:
f.write(str(args) + '\n')
if __name__ == '__main__':
main()
| 8,336 | 46.369318 | 190 | py |
DMASTE | DMASTE-main/mySpanASTE/DANN_main.py | import os
import random
import argparse
import torch
from transformers import BertTokenizer, BertModel
from torch.utils.data import DataLoader
from torch.optim import AdamW
from tqdm import tqdm
from transformers.optimization import get_linear_schedule_with_warmup
from torch.utils.tensorboard import SummaryWriter
import random, os
import numpy as np
from utils.collate import collate_fn
from utils.collate_unlabeled import collate_fn_target
from utils.data_utils_unlabeled import UnlabeledDataset, UnlabeledProcessor
from utils.data_utils import ABSADataset, ABSAProcessor, convert_pad_tensor_to_list, convert_predictions_to_triples
from models.DANN_span_aste import SpanModel
from utils.metric import Metric
class Domain:
Source = 0
Target = 1
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default='../dataset',
help="the dataset for train")
parser.add_argument("--unlabeled_data_dir", type=str, default='../amazon')
parser.add_argument("--source", type=str)
parser.add_argument("--target", type=str)
parser.add_argument("--model_dir", type=str, default="save_models",
help="the model.pkl save path")
parser.add_argument('--log_dir', type=str, default='log')
parser.add_argument('--model_name', type=str, default='model')
parser.add_argument("--batch_size", type=int, default=8, help="number of batch_size")
parser.add_argument("--encoder_lr", type=float, default=5e-5, help="learning rate of adam")
parser.add_argument('--cls_lr', type=float, default=1e-3)
parser.add_argument("--mode", type=str, choices=['train', 'test'])
parser.add_argument("--n_epochs", type=int, default=10)
parser.add_argument('--reduction', type=str, default='sum', choices=['mean', 'sum'])
parser.add_argument('--seed', type=int)
parser.add_argument('--ad_steps', type=int)
args = parser.parse_args()
device = "cuda" if torch.cuda.is_available() else "cpu"
print(args)
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf``
(if installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if args.seed is not None:
print('set seed', args.seed)
set_seed(args.seed)
def get_dataset(dataset, mode, tokenizer):
data_dir = os.path.join(args.data_dir, dataset)
processor = ABSAProcessor(tokenizer)
examples = processor.get_examples(data_dir, mode)
features = processor.convert_examples_to_features(examples)
dataset = ABSADataset(features)
return examples, dataset
def get_unlabeled_dataset(dataset, tokenizer):
processor = UnlabeledProcessor(tokenizer)
examples = processor.get_examples(args.unlabeled_data_dir, dataset + '.txt')
features = processor.convert_examples_to_features(examples)
dataset = UnlabeledDataset(features)
return dataset
def evaluate(dataloader, model, examples):
model.eval()
all_predictions = []
metric = Metric()
for batch_i, batch in enumerate(dataloader):
input_dict = dict()
for k in ['input_ids', 'attention_mask', 'spans', 'span_labels', 'span_mask', 'relation_labels', 'seq_length']:
input_dict[k] = batch[k].to(device)
output = model(**input_dict)
batch_example = examples[batch_i * args.batch_size: (batch_i + 1) * args.batch_size]
all_predictions.extend(metric.compute(batch_example, output, batch))
model.train()
return metric.get_metric(), all_predictions
def test(test_dataloader, model, test_examples, mode):
metric, predictions = evaluate(test_dataloader, model, test_examples)
print('test metric', metric)
os.makedirs(os.path.join(args.log_dir, args.model_name), exist_ok=True)
metric_file = os.path.join(args.log_dir, args.model_name, f'{mode}_metric.txt')
with open(metric_file, 'w') as f:
f.write(str(metric) + '\n')
predict_file = os.path.join(args.log_dir, args.model_name, f'{mode}_pred.txt')
with open(predict_file, 'w') as f:
for p in predictions:
f.write(str(p) + '\n')
def main():
metric_file = os.path.join(args.log_dir, args.model_name, 'test_metric.txt')
if os.path.exists(metric_file):
print('------------------------------ file exists, return ---------------------------')
return
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer.add_special_tokens({'additional_special_tokens': ['<null-aspect>']})
tb = SummaryWriter('tb_' + args.log_dir)
if args.mode == 'train':
os.makedirs(args.model_dir, exist_ok=True)
_, train_dataset = get_dataset(args.source, 'train.txt', tokenizer)
dev_examples, dev_dataset = get_dataset(args.source, 'dev.txt', tokenizer)
unlabeled_dataset = get_unlabeled_dataset(args.target, tokenizer)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, collate_fn=collate_fn, shuffle=True)
dev_dataloader = DataLoader(dev_dataset, batch_size=args.batch_size, collate_fn=collate_fn, shuffle=False)
unlabeled_dataloader = DataLoader(unlabeled_dataset, batch_size=args.batch_size, collate_fn=collate_fn_target, shuffle=True)
print('num train data', len(train_dataset), 'num dev data', len(dev_dataset), 'num unlabeled data', len(unlabeled_dataset))
bert = BertModel.from_pretrained('bert-base-uncased')
bert.resize_token_embeddings(len(tokenizer))
model = SpanModel(bert).to(device)
optimizer = AdamW([{'params': model.encoder.parameters(), 'lr': args.encoder_lr, 'weight_decay': 1e-2},
{'params': list(set(model.parameters()) - set(model.encoder.parameters())), 'lr': args.cls_lr, 'weight_decay': 0}])
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=int(args.n_epochs * len(train_dataloader) * 0.1),
num_training_steps=args.n_epochs * len(train_dataloader))
total_steps = args.n_epochs * len(train_dataloader)
best_metric = None
num_steps = 0
it = iter(unlabeled_dataloader)
with tqdm(total=len(train_dataloader)) as pbar:
for epoch in range(args.n_epochs):
model.train()
pbar.reset()
for batch in train_dataloader:
pass
num_steps += 1
p = num_steps / total_steps
alpha = 2. / (1. + np.exp(-10 * p)) - 1
input_dict = dict()
for k in ['input_ids', 'attention_mask', 'spans', 'span_labels', 'span_mask', 'relation_labels', 'seq_length']:
input_dict[k] = batch[k].to(device)
input_dict['domain'] = Domain.Source
input_dict['alpha'] = alpha
output = model(**input_dict)
loss = output['loss']
if num_steps % int(total_steps / 300) == 0:
tb.add_scalar('loss', loss.item(), global_step=num_steps)
tb.add_scalar('ner loss', output['ner_loss'].item(), global_step=num_steps)
tb.add_scalar('relation loss', output['relation_loss'].item(), global_step=num_steps)
domain_loss = torch.tensor([0.]).cuda()
if num_steps % args.ad_steps == 0:
domain_loss = output['domain_loss']
try:
unlabeled = it.next()
except StopIteration:
it = iter(unlabeled_dataloader)
unlabeled = it.next()
input_dict = dict()
for k in ['input_ids', 'attention_mask', 'spans', 'span_mask', 'seq_length']:
input_dict[k] = unlabeled[k].to(device)
input_dict['domain'] = Domain.Target
input_dict['alpha'] = alpha
target_output = model(**input_dict)
domain_loss += target_output['domain_loss']
loss += domain_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
pbar.update(1)
pbar.set_postfix(epoch=f'{epoch + 1}/{args.n_epochs}', loss=output['loss'].item(), domain_loss=domain_loss.item(), best_f1=f"{round(best_metric['triplet']['f1'] * 100, 2)}" if best_metric is not None else 'none')
metric, _ = evaluate(dev_dataloader, model, dev_examples)
for name in metric:
for k in metric[name]:
tb.add_scalar(f'{name}_{k}', metric[name][k], global_step=num_steps)
if best_metric is None or best_metric['triplet']['f1'] < metric['triplet']['f1']:
best_metric = metric
torch.save(model, os.path.join(args.model_dir, args.model_name + '.pt'))
tb.add_hparams(hparam_dict=vars(args), metric_dict=best_metric['triplet'])
# torch.save(model, os.path.join(args.model_dir, args.model_name + '.pt'))
else:
model = torch.load(os.path.join(args.model_dir, args.model_name + '.pt'))
test_examples, test_dataset = get_dataset(args.target, 'test.txt', tokenizer)
print('num test data', len(test_dataset))
test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, collate_fn=collate_fn)
test(test_dataloader, model, test_examples, 'test')
dev_examples, dev_dataset = get_dataset(args.target, 'dev.txt', tokenizer)
print('num dev data', len(dev_dataset))
dev_dataloader = DataLoader(dev_dataset, batch_size=args.batch_size, collate_fn=collate_fn)
test(dev_dataloader, model, dev_examples, 'dev')
os.makedirs(args.log_dir, exist_ok=True)
param_file = os.path.join(args.log_dir, args.model_name + '_params.txt')
with open(param_file, 'w') as f:
f.write(str(args) + '\n')
if __name__ == '__main__':
main()
| 10,335 | 47.754717 | 232 | py |
DMASTE | DMASTE-main/mySpanASTE/models/relation.py | from os import read
import torch
import math
from utils.data_utils import RelationLabel, SpanLabel
from utils.index_select import batched_index_select
from models.feedForward import FeedForward
def bucket_values(
distances: torch.Tensor, num_identity_buckets: int = 4, num_total_buckets: int = 10
) -> torch.Tensor:
"""
Places the given values (designed for distances) into `num_total_buckets`semi-logscale
buckets, with `num_identity_buckets` of these capturing single values.
The default settings will bucket values into the following buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
# Parameters
distances : `torch.Tensor`, required.
A Tensor of any size, to be bucketed.
num_identity_buckets: `int`, optional (default = `4`).
The number of identity buckets (those only holding a single value).
num_total_buckets : `int`, (default = `10`)
The total number of buckets to bucket values into.
# Returns
`torch.Tensor`
A tensor of the same shape as the input, containing the indices of the buckets
the values were placed in.
"""
# Chunk the values into semi-logscale buckets using .floor().
# This is a semi-logscale bucketing because we divide by log(2) after taking the log.
# We do this to make the buckets more granular in the initial range, where we expect
# most values to fall. We then add (num_identity_buckets - 1) because we want these indices
# to start _after_ the fixed number of buckets which we specified would only hold single values.
logspace_index = (distances.float().log() / math.log(2)).floor().long() + (
num_identity_buckets - 1
)
# create a mask for values which will go into single number buckets (i.e not a range).
use_identity_mask = (distances <= num_identity_buckets).long()
use_buckets_mask = 1 + (-1 * use_identity_mask)
# Use the original values if they are less than num_identity_buckets, otherwise
# use the logspace indices.
combined_index = use_identity_mask * distances + use_buckets_mask * logspace_index
# Clamp to put anything > num_total_buckets into the final bucket.
return combined_index.clamp(0, num_total_buckets - 1)
class RelationModel(torch.nn.Module):
def __init__(self, pair_embed_dim, spans_per_word=0.5, distance_embed_dim=128, hidden_dim=150, num_layers=2, activation=torch.nn.ReLU(), dropout=0.4, n_labels=4):
super(RelationModel, self).__init__()
self.pair_embed_dim = pair_embed_dim
self.n_labels = n_labels
self.spans_per_word = spans_per_word
self.distance_embedding = torch.nn.Embedding(512, embedding_dim=distance_embed_dim)
torch.nn.init.xavier_normal_(self.distance_embedding.weight)
self.ffnn = FeedForward(input_dim=pair_embed_dim + distance_embed_dim, hidden_dim=hidden_dim, num_layers=num_layers, activation=activation, dropout=dropout)
self.classifier = torch.nn.Linear(in_features=hidden_dim, out_features=n_labels)
torch.nn.init.xavier_normal_(self.classifier.weight)
self._loss = torch.nn.CrossEntropyLoss(reduction='sum')
def forward(
self, # type: ignore
spans,
ner_scores,
span_embeddings,
span_mask,
seq_length,
relation_labels = None
):
pruned_a = self._prune_spans(ner_scores[..., SpanLabel.ASPECT], span_mask, seq_length)
pruned_o = self._prune_spans(ner_scores[..., SpanLabel.OPINION], span_mask, seq_length)
spans_a = batched_index_select(spans, pruned_a['indices'])
spans_o = batched_index_select(spans, pruned_o['indices'])
relation_scores, relation_mask, relation_embeddings = self.predict_relation(spans, pruned_a['indices'], pruned_a['mask'], pruned_o['indices'], pruned_o['mask'], span_embeddings)
pruned_relation_labels = None
loss = torch.tensor(0, dtype=torch.float).to(spans_a.device)
if relation_labels is not None:
pruned_relation_labels = self.get_pruned_gold_relations(relation_labels, pruned_a, pruned_o)
flatten_relation_scores = relation_scores.reshape([-1, self.n_labels])
flatten_labels = pruned_relation_labels.view(-1)
flatten_score_mask = relation_mask.unsqueeze(-1).expand_as(relation_scores).view(flatten_relation_scores.shape)
flatten_relation_scores = flatten_relation_scores[flatten_score_mask]
flatten_labels = flatten_labels[relation_mask.view(-1)]
loss = self._loss(input=flatten_relation_scores.reshape([-1, self.n_labels]), target=flatten_labels)
return {'relation_scores': torch.softmax(relation_scores, dim=-1),
'relation_mask': relation_mask,
'relation_embeddings': relation_embeddings,
'pruned_relation_labels': pruned_relation_labels,
'loss': loss,
'pruned_a': pruned_a,
'pruned_o': pruned_o,
'spans_a': spans_a,
'spans_a_mask': pruned_a['mask'],
'spans_o': spans_o,
'spans_o_mask': pruned_o['mask']}
def get_pruned_gold_relations(self, relation_labels, pruned_a, pruned_o):
indices_a = pruned_a['indices']
indices_o = pruned_o['indices']
new_relation_labels = []
for i in range(relation_labels.shape[0]):
entry = relation_labels[i]
width = indices_a[i].shape[0]
assert indices_a[i].shape[0] == indices_o[i].shape[0]
idx_a = indices_a[i].unsqueeze(-1).expand([width, width])
idx_o = indices_o[i].unsqueeze(0).expand([width, width])
# print(entry.shape, idx_a.shape, idx_o.shape)
labels = entry[idx_a.reshape(-1), idx_o.reshape(-1)]
new_relation_labels.append(labels.reshape(width, width))
new_relation_labels = torch.stack(new_relation_labels, dim=0)
return new_relation_labels
def predict_relation(self, spans, a_indices, a_mask, o_indices, o_mask, span_embeddings):
bsz, seq_a = a_indices.shape
_, seq_o = o_indices.shape
mask = a_mask.unsqueeze(-1) * o_mask.unsqueeze(1)
# print('mask', mask.shape)
new_shape = (bsz, seq_a, seq_o)
a_indices = a_indices.unsqueeze(2).expand(new_shape)
o_indices = o_indices.unsqueeze(1).expand(new_shape)
a_embeddings = batched_index_select(span_embeddings, a_indices)
o_embeddings = batched_index_select(span_embeddings, o_indices)
spans_a = batched_index_select(spans, a_indices)
spans_o = batched_index_select(spans, o_indices)
dis1 = spans_a[..., 0] - spans_o[..., 1]
dis2 = spans_a[..., 1] - spans_o[..., 0]
dis, _ = torch.min(torch.cat([torch.absolute(dis1).unsqueeze(-1), torch.absolute(dis2).unsqueeze(-1)], dim=-1), dim=-1)
dis = bucket_values(dis)
distance_embeddings = self.distance_embedding(dis)
pair_embeddings = torch.cat([a_embeddings, o_embeddings, distance_embeddings], dim=-1)
pair_scores = self.classifier(self.ffnn(pair_embeddings))
return pair_scores, mask, pair_embeddings
def _prune_spans(self, scores, mask, seq_length):
num_spans_to_keep = torch.ceil(
seq_length.float() * self.spans_per_word
).long()
num_spans = scores.shape[1]
num_items_to_keep = torch.clamp(num_spans_to_keep, max=num_spans).to(scores.device)
max_items_to_keep = max(num_items_to_keep.max().item(), 1)
scores = torch.where(mask.bool(), scores, torch.zeros_like(scores) + -1e20)
_, top_indices = scores.topk(max_items_to_keep, dim=1)
top_indices_mask = torch.arange(start=0, end=max_items_to_keep).to(scores.device).reshape([1, -1]).expand_as(top_indices)
top_indices_mask = top_indices_mask < num_items_to_keep.reshape(-1, 1)
return {'indices': top_indices, 'mask': top_indices_mask} | 8,024 | 51.796053 | 186 | py |
DMASTE | DMASTE-main/mySpanASTE/models/feedForward.py | import torch
class FeedForward(torch.nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers, activation, dropout):
super(FeedForward, self).__init__()
hidden_dims = [hidden_dim] * num_layers # type: ignore
activations = [activation] * num_layers # type: ignore
dropout = [dropout] * num_layers # type: ignore
self._activations = torch.nn.ModuleList(activations)
input_dims = [input_dim] + hidden_dims[:-1]
linear_layers = []
for layer_input_dim, layer_output_dim in zip(input_dims, hidden_dims):
a = torch.nn.Linear(layer_input_dim, layer_output_dim)
torch.nn.init.xavier_normal_(a.weight)
linear_layers.append(a)
self._linear_layers = torch.nn.ModuleList(linear_layers)
dropout_layers = [torch.nn.Dropout(p=value) for value in dropout]
self._dropout = torch.nn.ModuleList(dropout_layers)
self._output_dim = hidden_dims[-1]
self.input_dim = input_dim
def get_output_dim(self):
return self._output_dim
def get_input_dim(self):
return self.input_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
output = inputs
for layer, activation, dropout in zip(
self._linear_layers, self._activations, self._dropout
):
output = dropout(activation(layer(output)))
return output | 1,421 | 39.628571 | 79 | py |
DMASTE | DMASTE-main/mySpanASTE/models/DANN_span_aste.py | import torch
from torch.nn import functional as F
from utils.index_select import batched_index_select
from models.ner import NERModel
from models.relation import RelationModel
from models.functions import ReverseLayerF
class SpanModel(torch.nn.Module):
def __init__(self, encoder, width_embedding_dim=20, max_width=512, spans_per_word=0.5):
super(SpanModel, self).__init__()
self.encoder = encoder
self.max_width = max_width
self.width_embedding = torch.nn.Embedding(max_width, width_embedding_dim)
torch.nn.init.xavier_normal_(self.width_embedding.weight)
self.span_embed_dim = 768 * 2 + width_embedding_dim
self.ner = NERModel(span_embed_dim=self.span_embed_dim)
self.relation = RelationModel(pair_embed_dim=self.span_embed_dim * 2, spans_per_word=spans_per_word)
self.domain_cls = torch.nn.Linear(768, 2)
def forward(self, input_ids, attention_mask, spans, span_mask, seq_length, span_labels=None, relation_labels=None, alpha=None, domain=None):
text_embeddings = self.encoder(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state
span_embeddings = self.text_to_span_embeds(text_embeddings, spans)
ner_output = self.ner(span_embeddings, span_mask, span_labels)
relation_output = self.relation(spans, ner_output['ner_scores'], span_embeddings, span_mask, seq_length=seq_length, relation_labels=relation_labels)
loss = ner_output['loss'] + relation_output['loss']
num_spans = span_mask.sum()
num_relations = relation_output['relation_mask'].sum()
loss = ner_output['loss'] + relation_output['loss']
domain_loss = torch.tensor([0.]).cuda()
if domain is not None:
reverse_embed = ReverseLayerF.apply(text_embeddings, alpha)
domain_scores = self.domain_cls(reverse_embed)
domain_label = torch.where(attention_mask.bool(), torch.zeros_like(attention_mask).long() + domain, torch.zeros_like(attention_mask).long() -1 )
# reverse_rel_embed = ReverseLayerF.apply(relation_output['relation_embeddings'], alpha)
# rel_domain_scores = self.relation_domain_cls(reverse_rel_embed)
# zero = torch.zeros_like(relation_output['relation_mask'])
# rel_domain_label = torch.where(relation_output['relation_mask'].bool(), zero.long() + domain, zero.long() - 1)
domain_loss = F.cross_entropy(domain_scores.view(-1, 2), domain_label.view(-1).long(), reduction='sum', ignore_index=-1)
# rel_domain_loss = F.cross_entropy(rel_domain_scores.view(-1, 2), rel_domain_label.view(-1).long(), reduction='sum', ignore_index=-1)
return {'loss': loss,
'ner_loss': ner_output['loss'] / (num_spans + num_relations),
'relation_loss': relation_output['loss'] / (num_spans + num_relations),
'ner_output': ner_output,
'relation_output': relation_output,
'domain_loss': domain_loss}
def text_to_span_embeds(self, text_embeddings, spans):
# batch index select
span_starts, span_ends = [index.squeeze(-1) for index in spans.split(1, dim=-1)]
start_embeddings = batched_index_select(text_embeddings, span_starts)
end_embeddings = batched_index_select(text_embeddings, span_ends)
width = span_ends - span_starts
width_embedding = self.width_embedding(width)
span_embedding = torch.cat([start_embeddings, end_embeddings, width_embedding], dim=-1)
return span_embedding
| 3,605 | 59.1 | 157 | py |
DMASTE | DMASTE-main/mySpanASTE/models/ner.py | import torch
from torch.nn.modules import dropout
import torch.nn.functional as F
from utils.data_utils import SpanLabel
from models.feedForward import FeedForward
class NERModel(torch.nn.Module):
def __init__(self, span_embed_dim, hidden_dim=150, num_layers=2, activation=torch.nn.ReLU(), dropout=0.4, n_labels=3):
super(NERModel, self).__init__()
self.span_embed_dim = span_embed_dim
self.n_labels = n_labels
self.ffnn = FeedForward(input_dim=span_embed_dim, hidden_dim=hidden_dim, num_layers=num_layers, activation=activation, dropout=dropout)
self.classifier = torch.nn.Linear(in_features=hidden_dim, out_features=n_labels)
torch.nn.init.xavier_normal_(self.classifier.weight)
self._loss = torch.nn.CrossEntropyLoss(reduction='sum')
def forward(self, span_embeddings, span_mask, span_labels=None):
# shape: bsz, span_length, n_labels
ner_scores = self.classifier(self.ffnn(span_embeddings))
masked_scores = torch.zeros_like(span_mask, dtype=torch.long) + 1e20
ner_scores[..., SpanLabel.INVALID] = torch.where(span_mask.bool(), ner_scores[..., SpanLabel.INVALID], masked_scores)
softmax_ner_scores = ner_scores.softmax(dim=-1)
output_dict = dict()
output_dict.update(ner_scores=softmax_ner_scores)
output_dict.update(opinion_scores=ner_scores.softmax(dim=-1)[..., SpanLabel.OPINION])
output_dict.update(target_scores=ner_scores.softmax(dim=-1)[..., SpanLabel.ASPECT])
loss = torch.tensor(0,dtype=torch.float).to(span_mask.device)
if span_labels is not None:
# test
# predicts = torch.argmax(softmax_ner_scores, dim=-1)
# from sklearn.metrics import precision_score, recall_score, f1_score
# valid_mask = span_labels != SpanLabel.INVALID
# predicts = predicts[valid_mask]
# new_labels = span_labels[valid_mask]
# p, r = precision_score(new_labels.cpu().tolist(), predicts.cpu().tolist(), average='macro'), recall_score(new_labels.cpu().tolist(), predicts.cpu().tolist(), average='macro')
# f1 = f1_score(new_labels.cpu().tolist(), predicts.cpu().tolist(), average='macro')
# print(f'ner p: {p}, r: {r}, f1: {f1}')
# end
ner_scores_flat = ner_scores.view(
-1, self.n_labels
)
ner_labels_flat = span_labels.view(-1)
mask_flat = span_mask.view(-1).bool()
loss = self._loss(ner_scores_flat[mask_flat], ner_labels_flat[mask_flat])
output_dict["loss"] = loss
return output_dict
| 2,651 | 48.111111 | 188 | py |
DMASTE | DMASTE-main/mySpanASTE/models/functions.py | from torch.autograd import Function
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
output = grad_output.neg() * ctx.alpha
return output, None | 305 | 18.125 | 46 | py |
DMASTE | DMASTE-main/mySpanASTE/models/span_aste.py | import torch
from utils.index_select import batched_index_select
from models.ner import NERModel
from models.relation import RelationModel
class SpanModel(torch.nn.Module):
def __init__(self, encoder, width_embedding_dim=20, max_width=512, spans_per_word=0.5):
super(SpanModel, self).__init__()
self.encoder = encoder
self.max_width = max_width
self.width_embedding = torch.nn.Embedding(max_width, width_embedding_dim)
torch.nn.init.xavier_normal_(self.width_embedding.weight)
self.span_embed_dim = 768 * 2 + width_embedding_dim
self.ner = NERModel(span_embed_dim=self.span_embed_dim)
self.relation = RelationModel(pair_embed_dim=self.span_embed_dim * 2, spans_per_word=spans_per_word)
def forward(self, input_ids, attention_mask, spans, span_mask, seq_length, span_labels=None, relation_labels=None):
text_embeddings = self.encoder(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state
span_embeddings = self.text_to_span_embeds(text_embeddings, spans)
ner_output = self.ner(span_embeddings, span_mask, span_labels)
relation_output = self.relation(spans, ner_output['ner_scores'], span_embeddings, span_mask, seq_length=seq_length, relation_labels=relation_labels)
loss = ner_output['loss'] + relation_output['loss']
num_spans = span_mask.sum()
num_relations = relation_output['relation_mask'].sum()
loss = ner_output['loss'] + relation_output['loss']
return {'loss': loss,
'ner_loss': ner_output['loss'] / (num_spans + num_relations),
'relation_loss': relation_output['loss'] / (num_spans + num_relations),
'ner_output': ner_output,
'relation_output': relation_output}
def text_to_span_embeds(self, text_embeddings, spans):
# batch index select
span_starts, span_ends = [index.squeeze(-1) for index in spans.split(1, dim=-1)]
start_embeddings = batched_index_select(text_embeddings, span_starts)
end_embeddings = batched_index_select(text_embeddings, span_ends)
width = span_ends - span_starts
width_embedding = self.width_embedding(width)
span_embedding = torch.cat([start_embeddings, end_embeddings, width_embedding], dim=-1)
return span_embedding
| 2,370 | 52.886364 | 157 | py |
DMASTE | DMASTE-main/mySpanASTE/utils/data_utils_unlabeled.py | import os
from enum import IntEnum
from torch.utils.data import Dataset
class DomainLabel(IntEnum):
Source = 0
Target = 1
class UnlabeledDataset(Dataset):
def __init__(self, features):
self.features = features
def __getitem__(self, index):
return self.features[index]
def __len__(self):
return len(self.features)
class UnlabeledFeature:
def __init__(self, input_ids, spans, token_range, seq_length) -> None:
self.input_ids = input_ids
self.spans = spans
self.seq_length = seq_length
self.token_range = token_range
class UnlabeledProcessor:
def __init__(self, tokenizer, min_span_width=1, max_span_width=10, max_seq_length=512):
self.tokenizer = tokenizer
self.null_aspect_id = self.tokenizer.convert_tokens_to_ids(['[ia]'])
self.min_span_width = min_span_width
self.max_span_width = max_span_width
self.max_seq_length = max_seq_length
def get_examples(self, data_dir, mode):
file_name = os.path.join(data_dir, mode)
lines = []
with open(file_name) as f:
counter = 0
for line in f:
lines.append('[ia] ' + line.split(' #### ')[-1])
return lines
def convert_examples_to_features(self, examples):
features = []
for sent in examples:
input_ids, token_range = self._tokenize(sent)
seq_length = len(sent.split())
spans = self._enumerate_spans(token_range)
features.append(UnlabeledFeature(input_ids=input_ids,
spans=spans,
seq_length=seq_length,
token_range=token_range))
return features
def _enumerate_spans(self, token_range):
word_length = len(token_range)
spans = [(1, 1)]
for i in range(word_length):
for j in range(self.min_span_width - 1, self.max_span_width):
if i + j < word_length:
start = token_range[i][0]
end = token_range[i + j][1]
spans.append((start, end))
return spans
def _tokenize(self, sentence):
words = sentence.split()
input_ids = [self.tokenizer.cls_token_id]
token_range = []
start_ids = 1
for word in words:
word_ids = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(word))
if len(input_ids) + len(word_ids) + 1 > self.max_seq_length:
break
input_ids.extend(word_ids)
token_range.append([start_ids, start_ids + len(word_ids) - 1])
start_ids += len(word_ids)
input_ids.append(self.tokenizer.sep_token_id)
return input_ids, token_range
if __name__ == '__main__':
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer.add_special_tokens({'additional_special_tokens': ['<null-aspect>']})
processor = UnlabeledProcessor(tokenizer=tokenizer)
root = '../../../dataset/amazon'
for domain in os.listdir(root):
examples = processor.get_examples('../../../dataset/amazon/', domain, num_data=1000)
features = processor.convert_examples_to_features(examples)
for example, feature in zip(examples, features):
print(example)
print(tokenizer.convert_ids_to_tokens(feature.input_ids))
print(feature.token_range)
print()
| 3,572 | 33.68932 | 92 | py |
DMASTE | DMASTE-main/mySpanASTE/utils/collate_unlabeled.py | import torch
from utils.data_utils import RelationLabel
from utils.data_utils_unlabeled import DomainLabel
def collate_fn_target(data):
"""批处理,填充同一batch中句子最大的长度"""
def pad_and_tensor(data, pad_value=0):
max_len = max([len(x) for x in data])
new_data = []
mask = []
for x in data:
tmp_data = torch.tensor(x)
size = tmp_data.shape
pad_data = torch.zeros((max_len - size[0], *size[1:]))
new_data.append(torch.cat([tmp_data, pad_data], dim=0))
mask.append(torch.cat([torch.ones_like(tmp_data), torch.zeros_like(pad_data)], dim=0))
return torch.stack(new_data, dim=0).to(torch.long), torch.stack(mask, dim=0).to(torch.long)
input_ids = [f.input_ids for f in data]
bsz = len(data)
input_ids, attention_mask = pad_and_tensor(input_ids)
spans = [f.spans for f in data]
spans, span_mask = pad_and_tensor(spans)
span_mask = span_mask[...,0]
seq_length = [f.seq_length for f in data]
seq_length = torch.tensor(seq_length).to(torch.long)
token_range = [f.token_range for f in data]
token_range, token_range_mask = pad_and_tensor(token_range)
token_range_mask = token_range_mask[..., 0]
batch = {'input_ids': input_ids,
'attention_mask': attention_mask,
'spans': spans,
'span_mask': span_mask,
'seq_length': seq_length,
'token_range': token_range,
'token_range_mask': token_range_mask}
return batch
| 1,535 | 36.463415 | 99 | py |
DMASTE | DMASTE-main/mySpanASTE/utils/data_utils.py | import os
from enum import IntEnum
from pydantic import BaseModel
from typing import List
from torch.utils.data import Dataset
import torch
class SpanLabel(IntEnum):
INVALID = 0
ASPECT = 1
OPINION = 2
class RelationLabel(IntEnum):
INVALID = 0
POS = 1
NEG = 2
NEU = 3
class ABSADataset(Dataset):
def __init__(self, features):
self.features = features
def __getitem__(self, index):
return self.features[index]
def __len__(self):
return len(self.features)
class SentimentTriple(BaseModel):
aspects: List
opinions: List
triples: List
@classmethod
def from_sentiment_triple(cls, triples, token_range):
"""read from sentiment triple"""
sentiment_map = {'POS': RelationLabel.POS, 'NEG': RelationLabel.NEG, 'NEU': RelationLabel.NEU}
aspects, opinions, new_triples = [], [], []
for a, o, s in triples:
new_a, new_o = None, None
if a[1] < len(token_range):
if -1 in a:
new_a = (1, 1)
else:
new_a = (token_range[a[0]][0], token_range[a[1]][1])
aspects.append(new_a)
if o[1] < len(token_range):
assert -1 not in o
new_o = (token_range[o[0]][0], token_range[o[1]][1])
opinions.append(new_o)
if new_a is not None and new_o is not None:
new_triples.append((new_a, new_o, sentiment_map[s]))
return cls(
aspects=aspects,
opinions=opinions,
triples=new_triples,
)
class ABSAFeature:
def __init__(self, input_ids, spans, span_labels, triples, token_range, seq_length) -> None:
self.input_ids = input_ids
self.spans = spans
self.span_labels = span_labels
# self.relation_labels = relation_labels
self.seq_length = seq_length
self.token_range = token_range
self.triples = triples
class ABSAProcessor:
def __init__(self, tokenizer, min_span_width=1, max_span_width=10, max_seq_length=512):
self.tokenizer = tokenizer
self.null_aspect_id = self.tokenizer.convert_tokens_to_ids('<null-aspect>')
self.min_span_width = min_span_width
self.max_span_width = max_span_width
self.max_seq_length = max_seq_length
def get_features(self, data_dir, mode):
examples = self.get_examples(data_dir, mode)
features = self.convert_examples_to_features(examples)
return features
def get_examples(self, data_dir, mode):
file_name = os.path.join(data_dir, mode)
instances = []
lines = []
with open(file_name) as f:
lines = f.readlines()
lines = [x.split('####') for x in lines]
for line in lines:
sentence, triples, = line[:2]
triples = eval(triples)
new_triples = []
for t in triples:
a, o, s = t
a = [a[0], a[-1]]
o = [o[0], o[-1]]
assert len(a) == 2 and len(o) == 2 and s in ('POS', 'NEG', 'NEU')
assert a[0] <= a[1]
assert o[0] <= o[1]
new_triples.append((a, o, s))
instances.append((sentence, new_triples))
return instances
def convert_examples_to_features(self, examples):
features = []
for sent, triples in examples:
input_ids, token_range = self._tokenize(sent)
seq_length = len(sent.split())
triples = SentimentTriple.from_sentiment_triple(triples, token_range)
spans = self._enumerate_spans(token_range)
span_labels = [SpanLabel.INVALID] * len(spans)
for a in triples.aspects:
# print(a)
if a[-1] - a[0] > self.max_span_width:
continue
idx = spans.index(a)
span_labels[idx] = SpanLabel.ASPECT
for o in triples.opinions:
if o[-1] - o[0] > self.max_span_width:
continue
idx = spans.index(o)
span_labels[idx] = SpanLabel.OPINION
# for a, o, s in triples.triples:
# idx_a, idx_o = spans.index(a), spans.index(o)
# relation_labels[idx_a][idx_o] = s
features.append(ABSAFeature(input_ids=input_ids,
spans=spans,
span_labels=span_labels,
triples = triples.triples,
# relation_labels=relation_labels,
seq_length=seq_length,
token_range=token_range))
return features
def _enumerate_spans(self, token_range):
word_length = len(token_range)
spans = [(1, 1)]
for i in range(word_length):
for j in range(self.min_span_width - 1, self.max_span_width):
if i + j < word_length:
start = token_range[i][0]
end = token_range[i + j][1]
spans.append((start, end))
return spans
def _tokenize(self, sentence):
words = sentence.split()
input_ids = [self.tokenizer.cls_token_id, self.null_aspect_id]
token_range = []
start_ids = 2
for word in words:
word_ids = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(word))
if len(input_ids) + len(word_ids) + 1 > self.max_seq_length:
break
input_ids.extend(word_ids)
token_range.append([start_ids, start_ids + len(word_ids) - 1])
start_ids += len(word_ids)
input_ids.append(self.tokenizer.sep_token_id)
return input_ids, token_range
def convert_predictions_to_triples(spans_a, spans_o, relation_labels, token_range):
# relation_idx = [i for i, label in enumerate(relations_labels) if label != RelationLabel.INVALID]
# relations_labels = [relations_labels[i] for i in relation_idx]
relation_indices = [(i, j) for i in range(len(relation_labels)) for j in range(len(relation_labels)) if relation_labels[i][j] != RelationLabel.INVALID]
# print('relation indices', relation_indices)
def subword_span2_word_span(subword_span, token_range):
if 1 in subword_span:
return [-1, -1]
start, end = -1, -1
for i, ran in enumerate(token_range):
if ran[0] <= subword_span[0] <= ran[1]:
assert start == -1
start = i
if ran[0] <= subword_span[1] <= ran[1]:
assert end == -1
end = i
return [start, end]
triples = []
int2sentiment = {RelationLabel.POS: 'POS', RelationLabel.NEG: 'NEG', RelationLabel.NEU: 'NEU'}
for i, (a_idx, o_idx) in enumerate(relation_indices):
# assert span_labels[a_idx] == SpanLabel.ASPECT, span_labels[a_idx]
# assert span_labels[o_idx] == SpanLabel.OPINION, span_labels[o_idx]
a_subword_span, o_subword_span = spans_a[a_idx], spans_o[o_idx]
a_word_span = subword_span2_word_span(a_subword_span, token_range)
o_word_span = subword_span2_word_span(o_subword_span, token_range)
# print('idx', a_idx, o_idx)
triples.append((a_word_span, o_word_span, int2sentiment[relation_labels[a_idx][o_idx]]))
return triples
def convert_pad_tensor_to_list(batch_data, mask):
assert len(mask.shape) == 2
batch_data = batch_data.detach().cpu().tolist()
len_list = torch.sum(mask, dim=-1).detach().cpu().tolist()
ret = []
for length, data in zip(len_list, batch_data):
ret.append(data[: length])
return ret
if __name__ == '__main__':
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
processor = ABSAProcessor(tokenizer=tokenizer)
root = '../../../dataset/del/CDASTE-Data'
for domain in os.listdir(root):
if '.' in domain:
continue
examples = processor.get_examples(f'../../../dataset/del/CDASTE-Data/{domain}', 'train.txt')
features = processor.convert_examples_to_features(examples)
for example, feature in zip(examples, features):
triples1 = example[1]
# print(domain, example)
triples2 = convert_predictions_to_triples(feature.spans, feature.relation_labels, feature.token_range)
assert len(feature.input_ids) == feature.token_range[-1][1] + 2
if str(sorted(triples1)) != str(sorted(triples2)):
print(example, len(feature.token_range))
print(triples2)
print()
| 8,811 | 38.339286 | 155 | py |
DMASTE | DMASTE-main/mySpanASTE/utils/collate.py | import torch
from utils.data_utils import RelationLabel
def collate_fn(data):
"""批处理,填充同一batch中句子最大的长度"""
def pad_and_tensor(data, pad_value=0):
max_len = max([len(x) for x in data])
new_data = []
mask = []
for x in data:
tmp_data = torch.tensor(x)
size = tmp_data.shape
pad_data = torch.zeros((max_len - size[0], *size[1:]))
new_data.append(torch.cat([tmp_data, pad_data], dim=0))
mask.append(torch.cat([torch.ones_like(tmp_data), torch.zeros_like(pad_data)], dim=0))
return torch.stack(new_data, dim=0).to(torch.long), torch.stack(mask, dim=0).to(torch.long)
input_ids = [f.input_ids for f in data]
input_ids, attention_mask = pad_and_tensor(input_ids)
spans = [f.spans for f in data]
max_span_length = max([len(x) for x in spans])
triples = [f.triples for f in data]
relation_labels = []
relation_mask = []
for i, ins_triple in enumerate(triples):
labels = torch.zeros([max_span_length, max_span_length], dtype=torch.long) + RelationLabel.INVALID
for triple in ins_triple:
a, o, s = triple
try:
a_idx, o_idx = spans[i].index(a), spans[i].index(o)
labels[a_idx, o_idx] = s
except:
pass
mask = torch.zeros([max_span_length, max_span_length], dtype=torch.long)
mask[: len(spans[i]), : len(spans[i])] = 1
relation_labels.append(labels)
relation_mask.append(mask)
relation_labels = torch.stack(relation_labels, dim=0)
relation_mask = torch.stack(relation_mask, dim=0)
spans, _ = pad_and_tensor(spans)
span_labels = [f.span_labels for f in data]
span_labels, span_mask = pad_and_tensor(span_labels)
seq_length = [f.seq_length for f in data]
seq_length = torch.tensor(seq_length).to(torch.long)
token_range = [f.token_range for f in data]
token_range, token_range_mask = pad_and_tensor(token_range)
token_range_mask = token_range_mask[..., 0]
batch = {'input_ids': input_ids,
'attention_mask': attention_mask,
'spans': spans,
'span_labels': span_labels,
'span_mask': span_mask,
'relation_labels': relation_labels,
'relation_mask': relation_mask,
'seq_length': seq_length,
'token_range': token_range,
'token_range_mask': token_range_mask}
return batch
| 2,502 | 39.370968 | 106 | py |
DMASTE | DMASTE-main/mySpanASTE/utils/index_select.py | import torch
def batched_index_select(target, indices):
"""
target : `torch.Tensor`, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).
This is the tensor to be indexed.
indices : `torch.LongTensor`
A tensor of shape (batch_size, ...), where each element is an index into the
`sequence_length` dimension of the `target` tensor.
"""
# Shape: (batch_size * sequence_length, embedding_size)
flattened_target = target.view(-1, target.size(-1))
flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))
# Shape: (batch_size * d_1 * ... * d_n, embedding_size)
flattened_selected = flattened_target.index_select(0, flattened_indices)
selected_shape = list(indices.size()) + [target.size(-1)]
# Shape: (batch_size, d_1, ..., d_n, embedding_size)
selected_targets = flattened_selected.view(*selected_shape)
return selected_targets
def flatten_and_batch_shift_indices(indices, sequence_length):
if torch.max(indices) >= sequence_length or torch.min(indices) < 0:
print(
f"All elements in indices should be in range (0, {sequence_length - 1})"
)
exit()
offsets = torch.arange(start=0, end=indices.size(0), dtype=torch.long).to(indices.device) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# Shape: (batch_size, d_1, ..., d_n)
offset_indices = indices + offsets
# Shape: (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices | 1,622 | 42.864865 | 111 | py |
DMASTE | DMASTE-main/mySpanASTE/utils/metric.py | import torch
from utils.data_utils import convert_pad_tensor_to_list, convert_predictions_to_triples, SpanLabel, RelationLabel
from sklearn.metrics import precision_score, recall_score, f1_score
def convert_relations_to_list(relations, mask):
ret = []
for i in range(relations.shape[0]):
r, m = relations[i], mask[i]
width = torch.sum(m, dim=0)
height = torch.sum(m, dim=1)
assert torch.sum(torch.eq(width, height)) == width.shape[0]
ret.append(r[: width[0], :width[0]].detach().tolist())
return ret
class Metric:
def __init__(self):
self.triplet = {'pred': 0, 'golden': 0, 'tp': 0}
self.ner = {'p': 0, 'r': 0, 'f1': 0}
self.relation = {'p': 0, 'r': 0, 'f1': 0}
self.aspect = {'pred': 0, 'golden': 0, 'tp': 0}
self.opinion = {'pred': 0, 'golden': 0, 'tp': 0}
self.pos_relation = {'pred': 0, 'golden': 0, 'tp': 0}
self.neg_relation = {'pred': 0, 'golden': 0, 'tp': 0}
self.neu_relation = {'pred': 0, 'golden': 0, 'tp': 0}
self.inv_relaiton = {'pred': 0, 'golden': 0, 'tp': 0}
self.num_ins = 0
def get_metric(self):
ret = dict()
mean_metric = {'ner': self.ner, 'relation': self.relation}
for type_ in mean_metric:
type_metric = dict()
for metric_name in ['p', 'r', 'f1']:
type_metric[metric_name] = mean_metric[type_][metric_name] / self.num_ins
ret[type_] = type_metric
num_metric = {'triplet': self.triplet, 'aspect': self.aspect, 'opinion': self.opinion, 'pos_rel': self.pos_relation,
'neg_rel': self.neg_relation, 'nue_rel': self.neu_relation, 'inv_rel': self.inv_relaiton}
for type_ in num_metric:
num = num_metric[type_]
tp, golden, pred = num['tp'], num['golden'], num['pred']
p = tp / pred if pred != 0 else 0
r = tp / golden if golden != 0 else 0
f1 = 2 * p * r / (p + r) if (p + r) != 0 else 0
ret[type_] = {'p': p, 'r': r, 'f1': f1}
return ret
def get_span_labels(self, batch, output):
span_labels = batch['span_labels']
span_mask = batch['span_mask']
span_labels = convert_pad_tensor_to_list(span_labels, span_mask)
span_predictions = output['ner_output']['ner_scores']
span_predictions = torch.argmax(span_predictions, dim=-1)
span_predictions = convert_pad_tensor_to_list(span_predictions, span_mask)
return span_labels, span_predictions
def cal_num(self, ins_pred, ins_label, ins_type, metric):
golden = set([i for i, x in enumerate(ins_label) if x == ins_type])
pred = set([i for i, x in enumerate(ins_pred) if x == ins_type])
tp = golden & pred
ins_metric = {'golden': len(golden), 'pred': len(pred), 'tp': len(tp)}
for k in ins_metric:
metric[k] += ins_metric[k]
def cal_span_metric(self, span_labels, span_predictions):
for ins_label, ins_pred in zip(span_labels, span_predictions):
assert len(ins_label) == len(ins_pred)
self.num_ins += 1
self.ner['p'] += precision_score(ins_label, ins_pred, average='macro', zero_division=1)
self.ner['r'] += recall_score(ins_label, ins_pred, average='macro', zero_division=1)
self.ner['f1'] += f1_score(ins_label, ins_pred, average='macro', zero_division=1)
self.cal_num(ins_pred, ins_label, SpanLabel.ASPECT, self.aspect)
self.cal_num(ins_pred, ins_label, SpanLabel.OPINION, self.opinion)
def cal_relation_metric(self, output):
relation_labels = output['relation_output']['pruned_relation_labels']
relation_mask = output['relation_output']['relation_mask']
relation_predictions = output['relation_output']['relation_scores']
relation_predictions = torch.argmax(relation_predictions, dim=-1)
assert relation_labels.shape == relation_predictions.shape
relation_labels = convert_relations_to_list(relation_labels, relation_mask)
relation_predictions = convert_relations_to_list(relation_predictions, relation_mask)
for ins_label, ins_pred in zip(relation_labels, relation_predictions):
ins_label = [x for row in ins_label for x in row]
ins_pred = [x for row in ins_pred for x in row]
assert len(ins_label) == len(ins_pred)
self.relation['p'] += precision_score(ins_label, ins_pred, average='macro', zero_division=1)
self.relation['r'] += recall_score(ins_label, ins_pred, average='macro', zero_division=1)
self.relation['f1'] += f1_score(ins_label, ins_pred, average='macro', zero_division=1)
self.cal_num(ins_pred, ins_label, RelationLabel.NEG, self.neg_relation)
self.cal_num(ins_pred, ins_label, RelationLabel.NEU, self.neu_relation)
self.cal_num(ins_pred, ins_label, RelationLabel.POS, self.pos_relation)
self.cal_num(ins_pred, ins_label, RelationLabel.INVALID, self.inv_relaiton)
def compute(self, examples, output, batch):
# ner
span_labels, span_predictions = self.get_span_labels(batch, output)
self.cal_span_metric(span_labels, span_predictions)
# relation
self.cal_relation_metric(output)
# triples
spans_a = output['relation_output']['spans_a']
spans_a_mask = output['relation_output']['spans_a_mask']
spans_a = convert_pad_tensor_to_list(spans_a, spans_a_mask)
spans_o = output['relation_output']['spans_o']
spans_o_mask = output['relation_output']['spans_o_mask']
spans_o = convert_pad_tensor_to_list(spans_o, spans_o_mask)
relation_scores = output['relation_output']['relation_scores']
relation_mask = output['relation_output']['relation_mask']
predict_relations = torch.argmax(relation_scores, dim=-1)
# print('relation', predict_relations.shape, batch['relation_labels'].shape)
predict_relations = convert_relations_to_list(predict_relations, relation_mask)
# print(predict_relations)
token_range, token_range_mask = batch['token_range'], batch['token_range_mask']
token_range = convert_pad_tensor_to_list(token_range, token_range_mask)
predict_triples = []
for i in range(len(examples)):
triples1 = examples[i][1]
triples2 = convert_predictions_to_triples(spans_a=spans_a[i], spans_o=spans_o[i], relation_labels=predict_relations[i], token_range=token_range[i])
predict_triples.append(triples2)
self.triplet['pred'] += len(triples2)
self.triplet['golden'] += len(triples1)
for t1 in triples1:
for t2 in triples2:
if str(t1) == str(t2):
self.triplet['tp'] += 1
return predict_triples
| 6,970 | 51.022388 | 159 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/test.py | import numpy as np
import torch
import random
from learner import Learner
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Learning Debiased Representation via Disentangled Feature Augmentation (NeurIPS 21 Oral)')
# training
parser.add_argument("--batch_size", help="batch_size", default=256, type=int)
parser.add_argument("--lr",help='learning rate',default=1e-3, type=float)
parser.add_argument("--weight_decay",help='weight_decay',default=0.0, type=float)
parser.add_argument("--momentum",help='momentum',default=0.9, type=float)
parser.add_argument("--num_workers", help="workers number", default=16, type=int)
parser.add_argument("--exp", help='experiment name', default='Test', type=str)
parser.add_argument("--device", help="cuda or cpu", default='cuda', type=str)
parser.add_argument("--num_steps", help="# of iterations", default= 500 * 100, type=int)
parser.add_argument("--target_attr_idx", help="target_attr_idx", default= 0, type=int)
parser.add_argument("--bias_attr_idx", help="bias_attr_idx", default= 1, type=int)
parser.add_argument("--dataset", help="data to train, [cmnist, cifar10, bffhq]", default= 'cmnist', type=str)
parser.add_argument("--percent", help="percentage of conflict", default= "1pct", type=str)
parser.add_argument("--use_lr_decay", action='store_true', help="whether to use learning rate decay")
parser.add_argument("--lr_decay_step", help="learning rate decay steps", type=int, default=10000)
parser.add_argument("--q", help="GCE parameter q", type=float, default=0.7)
parser.add_argument("--lr_gamma", help="lr gamma", type=float, default=0.1)
parser.add_argument("--lambda_dis_align", help="lambda_dis in Eq.2", type=float, default=1.0)
parser.add_argument("--lambda_swap_align", help="lambda_swap_b in Eq.3", type=float, default=1.0)
parser.add_argument("--lambda_swap", help="lambda swap (lambda_swap in Eq.4)", type=float, default=1.0)
parser.add_argument("--ema_alpha", help="use weight mul", type=float, default=0.7)
parser.add_argument("--curr_step", help="curriculum steps", type=int, default= 0)
parser.add_argument("--use_type0", action='store_true', help="whether to use type 0 CIFAR10C")
parser.add_argument("--use_type1", action='store_true', help="whether to use type 1 CIFAR10C")
parser.add_argument("--use_resnet20", help="Use Resnet20", action="store_true") # ResNet 20 was used in Learning From Failure CifarC10 (We used ResNet18 in our paper)
parser.add_argument("--model", help="which network, [MLP, ResNet18, ResNet20, ResNet50]", default= 'MLP', type=str)
# logging
parser.add_argument("--log_dir", help='path for loading data', default='./log', type=str)
parser.add_argument("--data_dir", help='path for saving models & logs', default='dataset', type=str)
parser.add_argument("--valid_freq", help='frequency to evaluate on valid/test set', default=500, type=int)
parser.add_argument("--log_freq", help='frequency to log on tensorboard', default=500, type=int)
parser.add_argument("--save_freq", help='frequency to save model checkpoint', default=1000, type=int)
parser.add_argument("--wandb", action="store_true", help="whether to use wandb")
parser.add_argument("--tensorboard", action="store_true", help="whether to use tensorboard")
# experiment
parser.add_argument("--pretrained_path", help="path for pretrained model", type=str)
args = parser.parse_args()
# init learner
learner = Learner(args)
# actual training
print('Official Pytorch Code of "Learning Debiased Representation via Disentangled Feature Augmentation (NeurIPS 21 Oral)"')
print('Test starts ...')
learner.test_ours(args)
| 3,784 | 63.152542 | 170 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/learner.py | from tqdm import tqdm
import wandb
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import os
import torch.optim as optim
from data.util import get_dataset, IdxDataset
from module.loss import GeneralizedCELoss
from module.util import get_model
from util import EMA
class Learner(object):
def __init__(self, args):
data2model = {'cmnist': "MLP",
'cifar10c': "ResNet18",
'bffhq': "ResNet18"}
data2batch_size = {'cmnist': 256,
'cifar10c': 256,
'bffhq': 64}
data2preprocess = {'cmnist': None,
'cifar10c': True,
'bffhq': True}
if args.wandb:
import wandb
wandb.init(project='Learning-Debiased-Disetangled')
wandb.run.name = args.exp
run_name = args.exp
if args.tensorboard:
from tensorboardX import SummaryWriter
self.writer = SummaryWriter(f'result/summary/{run_name}')
self.model = data2model[args.dataset]
self.batch_size = data2batch_size[args.dataset]
print(f'model: {self.model} || dataset: {args.dataset}')
print(f'working with experiment: {args.exp}...')
self.log_dir = os.makedirs(os.path.join(args.log_dir, args.dataset, args.exp), exist_ok=True)
self.device = torch.device(args.device)
self.args = args
print(self.args)
# logging directories
self.log_dir = os.path.join(args.log_dir, args.dataset, args.exp)
self.summary_dir = os.path.join(args.log_dir, args.dataset, "summary", args.exp)
self.summary_gradient_dir = os.path.join(self.log_dir, "gradient")
self.result_dir = os.path.join(self.log_dir, "result")
os.makedirs(self.summary_dir, exist_ok=True)
os.makedirs(self.result_dir, exist_ok=True)
self.train_dataset = get_dataset(
args.dataset,
data_dir=args.data_dir,
dataset_split="train",
transform_split="train",
percent=args.percent,
use_preprocess=data2preprocess[args.dataset],
use_type0=args.use_type0,
use_type1=args.use_type1
)
self.valid_dataset = get_dataset(
args.dataset,
data_dir=args.data_dir,
dataset_split="valid",
transform_split="valid",
percent=args.percent,
use_preprocess=data2preprocess[args.dataset],
use_type0=args.use_type0,
use_type1=args.use_type1
)
self.test_dataset = get_dataset(
args.dataset,
data_dir=args.data_dir,
dataset_split="test",
transform_split="valid",
percent=args.percent,
use_preprocess=data2preprocess[args.dataset],
use_type0=args.use_type0,
use_type1=args.use_type1
)
train_target_attr = []
for data in self.train_dataset.data:
train_target_attr.append(int(data.split('_')[-2]))
train_target_attr = torch.LongTensor(train_target_attr)
attr_dims = []
attr_dims.append(torch.max(train_target_attr).item() + 1)
self.num_classes = attr_dims[0]
self.train_dataset = IdxDataset(self.train_dataset)
# make loader
self.train_loader = DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True
)
self.valid_loader = DataLoader(
self.valid_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
)
self.test_loader = DataLoader(
self.test_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
)
# define model and optimizer
self.model_b = get_model(self.model, attr_dims[0]).to(self.device)
self.model_d = get_model(self.model, attr_dims[0]).to(self.device)
self.optimizer_b = torch.optim.Adam(
self.model_b.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
)
self.optimizer_d = torch.optim.Adam(
self.model_d.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
)
# define loss
self.criterion = nn.CrossEntropyLoss(reduction='none')
self.bias_criterion = nn.CrossEntropyLoss(reduction='none')
print(f'self.criterion: {self.criterion}')
print(f'self.bias_criterion: {self.bias_criterion}')
self.sample_loss_ema_b = EMA(torch.LongTensor(train_target_attr), num_classes=self.num_classes, alpha=args.ema_alpha)
self.sample_loss_ema_d = EMA(torch.LongTensor(train_target_attr), num_classes=self.num_classes, alpha=args.ema_alpha)
print(f'alpha : {self.sample_loss_ema_d.alpha}')
self.best_valid_acc_b, self.best_test_acc_b = 0., 0.
self.best_valid_acc_d, self.best_test_acc_d = 0., 0.
print('finished model initialization....')
# evaluation code for vanilla
def evaluate(self, model, data_loader):
model.eval()
total_correct, total_num = 0, 0
for data, attr, index in tqdm(data_loader, leave=False):
label = attr[:, 0]
data = data.to(self.device)
label = label.to(self.device)
with torch.no_grad():
logit = model(data)
pred = logit.data.max(1, keepdim=True)[1].squeeze(1)
correct = (pred == label).long()
total_correct += correct.sum()
total_num += correct.shape[0]
accs = total_correct/float(total_num)
model.train()
return accs
# evaluation code for ours
def evaluate_ours(self,model_b, model_l, data_loader, model='label'):
model_b.eval()
model_l.eval()
total_correct, total_num = 0, 0
for data, attr, index in tqdm(data_loader, leave=False):
label = attr[:, 0]
# label = attr
data = data.to(self.device)
label = label.to(self.device)
with torch.no_grad():
if self.args.dataset == 'cmnist':
z_l = model_l.extract(data)
z_b = model_b.extract(data)
else:
z_l, z_b = [], []
hook_fn = self.model_l.avgpool.register_forward_hook(self.concat_dummy(z_l))
_ = self.model_l(data)
hook_fn.remove()
z_l = z_l[0]
hook_fn = self.model_b.avgpool.register_forward_hook(self.concat_dummy(z_b))
_ = self.model_b(data)
hook_fn.remove()
z_b = z_b[0]
z_origin = torch.cat((z_l, z_b), dim=1)
if model == 'bias':
pred_label = model_b.fc(z_origin)
else:
pred_label = model_l.fc(z_origin)
pred = pred_label.data.max(1, keepdim=True)[1].squeeze(1)
correct = (pred == label).long()
total_correct += correct.sum()
total_num += correct.shape[0]
accs = total_correct/float(total_num)
model_b.train()
model_l.train()
return accs
def save_vanilla(self, step, best=None):
if best:
model_path = os.path.join(self.result_dir, "best_model.th")
else:
model_path = os.path.join(self.result_dir, "model_{}.th".format(step))
state_dict = {
'steps': step,
'state_dict': self.model_b.state_dict(),
'optimizer': self.optimizer_b.state_dict(),
}
with open(model_path, "wb") as f:
torch.save(state_dict, f)
print(f'{step} model saved ...')
def save_ours(self, step, best=None):
if best:
model_path = os.path.join(self.result_dir, "best_model_l.th")
else:
model_path = os.path.join(self.result_dir, "model_l_{}.th".format(step))
state_dict = {
'steps': step,
'state_dict': self.model_l.state_dict(),
'optimizer': self.optimizer_l.state_dict(),
}
with open(model_path, "wb") as f:
torch.save(state_dict, f)
if best:
model_path = os.path.join(self.result_dir, "best_model_b.th")
else:
model_path = os.path.join(self.result_dir, "model_b_{}.th".format(step))
state_dict = {
'steps': step,
'state_dict': self.model_b.state_dict(),
'optimizer': self.optimizer_b.state_dict(),
}
with open(model_path, "wb") as f:
torch.save(state_dict, f)
print(f'{step} model saved ...')
def board_vanilla_loss(self, step, loss_b):
if self.args.wandb:
wandb.log({
"loss_b_train": loss_b,
}, step=step,)
if self.args.tensorboard:
self.writer.add_scalar(f"loss/loss_b_train", loss_b, step)
def board_ours_loss(self, step, loss_dis_conflict, loss_dis_align, loss_swap_conflict, loss_swap_align, lambda_swap):
if self.args.wandb:
wandb.log({
"loss_dis_conflict": loss_dis_conflict,
"loss_dis_align": loss_dis_align,
"loss_swap_conflict": loss_swap_conflict,
"loss_swap_align": loss_swap_align,
"loss": (loss_dis_conflict + loss_dis_align) + lambda_swap * (loss_swap_conflict + loss_swap_align)
}, step=step,)
if self.args.tensorboard:
self.writer.add_scalar(f"loss/loss_dis_conflict", loss_dis_conflict, step)
self.writer.add_scalar(f"loss/loss_dis_align", loss_dis_align, step)
self.writer.add_scalar(f"loss/loss_swap_conflict", loss_swap_conflict, step)
self.writer.add_scalar(f"loss/loss_swap_align", loss_swap_align, step)
self.writer.add_scalar(f"loss/loss", (loss_dis_conflict + loss_dis_align) + lambda_swap * (loss_swap_conflict + loss_swap_align), step)
def board_vanilla_acc(self, step, epoch, inference=None):
valid_accs_b = self.evaluate(self.model_b, self.valid_loader)
test_accs_b = self.evaluate(self.model_b, self.test_loader)
print(f'epoch: {epoch}')
if valid_accs_b >= self.best_valid_acc_b:
self.best_valid_acc_b = valid_accs_b
if test_accs_b >= self.best_test_acc_b:
self.best_test_acc_b = test_accs_b
self.save_vanilla(step, best=True)
if self.args.wandb:
wandb.log({
"acc_b_valid": valid_accs_b,
"acc_b_test": test_accs_b,
},
step=step,)
wandb.log({
"best_acc_b_valid": self.best_valid_acc_b,
"best_acc_b_test": self.best_test_acc_b,
},
step=step, )
print(f'valid_b: {valid_accs_b} || test_b: {test_accs_b}')
if self.args.tensorboard:
self.writer.add_scalar(f"acc/acc_b_valid", valid_accs_b, step)
self.writer.add_scalar(f"acc/acc_b_test", test_accs_b, step)
self.writer.add_scalar(f"acc/best_acc_b_valid", self.best_valid_acc_b, step)
self.writer.add_scalar(f"acc/best_acc_b_test", self.best_test_acc_b, step)
def board_ours_acc(self, step, inference=None):
# check label network
valid_accs_d = self.evaluate_ours(self.model_b, self.model_l, self.valid_loader, model='label')
test_accs_d = self.evaluate_ours(self.model_b, self.model_l, self.test_loader, model='label')
if inference:
print(f'test acc: {test_accs_d.item()}')
import sys
sys.exit(0)
if valid_accs_d >= self.best_valid_acc_d:
self.best_valid_acc_d = valid_accs_d
if test_accs_d >= self.best_test_acc_d:
self.best_test_acc_d = test_accs_d
self.save_ours(step, best=True)
if self.args.wandb:
wandb.log({
"acc_d_valid": valid_accs_d,
"acc_d_test": test_accs_d,
},
step=step, )
wandb.log({
"best_acc_d_valid": self.best_valid_acc_d,
"best_acc_d_test": self.best_test_acc_d,
},
step=step, )
if self.args.tensorboard:
self.writer.add_scalar(f"acc/acc_d_valid", valid_accs_d, step)
self.writer.add_scalar(f"acc/acc_d_test", test_accs_d, step)
self.writer.add_scalar(f"acc/best_acc_d_valid", self.best_valid_acc_d, step)
self.writer.add_scalar(f"acc/best_acc_d_test", self.best_test_acc_d, step)
print(f'valid_d: {valid_accs_d} || test_d: {test_accs_d} ')
def concat_dummy(self, z):
def hook(model, input, output):
z.append(output.squeeze())
return torch.cat((output, torch.zeros_like(output)), dim=1)
return hook
def train_vanilla(self, args):
# training vanilla ...
train_iter = iter(self.train_loader)
train_num = len(self.train_dataset.dataset)
epoch, cnt = 0, 0
for step in tqdm(range(args.num_steps)):
try:
index, data, attr, _ = next(train_iter)
except:
train_iter = iter(self.train_loader)
index, data, attr, _ = next(train_iter)
data = data.to(self.device)
attr = attr.to(self.device)
label = attr[:, args.target_attr_idx]
logit_b = self.model_b(data)
loss_b_update = self.criterion(logit_b, label)
loss = loss_b_update.mean()
self.optimizer_b.zero_grad()
loss.backward()
self.optimizer_b.step()
##################################################
#################### LOGGING #####################
##################################################
if step % args.save_freq == 0:
self.save_vanilla(step)
if step % args.log_freq == 0:
self.board_vanilla_loss(step, loss_b=loss)
if step % args.valid_freq == 0:
self.board_vanilla_acc(step, epoch)
cnt += len(index)
if cnt == train_num:
print(f'finished epoch: {epoch}')
epoch += 1
cnt = 0
def train_ours(self, args):
epoch, cnt = 0, 0
print('************** main training starts... ************** ')
train_num = len(self.train_dataset)
# self.model_l : model for predicting intrinsic attributes ((E_i,C_i) in the main paper)
# self.model_l.fc: fc layer for predicting intrinsic attributes (C_i in the main paper)
# self.model_b : model for predicting bias attributes ((E_b, C_b) in the main paper)
# self.model_b.fc: fc layer for predicting bias attributes (C_b in the main paper)
if args.dataset == 'cmnist':
self.model_l = get_model('mlp_DISENTANGLE', self.num_classes).to(self.device)
self.model_b = get_model('mlp_DISENTANGLE', self.num_classes).to(self.device)
else:
if self.args.use_resnet20: # Use this option only for comparing with LfF
self.model_l = get_model('ResNet20_OURS', self.num_classes).to(self.device)
self.model_b = get_model('ResNet20_OURS', self.num_classes).to(self.device)
print('our resnet20....')
else:
self.model_l = get_model('resnet_DISENTANGLE', self.num_classes).to(self.device)
self.model_b = get_model('resnet_DISENTANGLE', self.num_classes).to(self.device)
self.optimizer_l = torch.optim.Adam(
self.model_l.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
)
self.optimizer_b = torch.optim.Adam(
self.model_b.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
)
if args.use_lr_decay:
self.scheduler_b = optim.lr_scheduler.StepLR(self.optimizer_b, step_size=args.lr_decay_step, gamma=args.lr_gamma)
self.scheduler_l = optim.lr_scheduler.StepLR(self.optimizer_l, step_size=args.lr_decay_step, gamma=args.lr_gamma)
self.bias_criterion = GeneralizedCELoss(q=0.7)
print(f'criterion: {self.criterion}')
print(f'bias criterion: {self.bias_criterion}')
train_iter = iter(self.train_loader)
for step in tqdm(range(args.num_steps)):
try:
index, data, attr, image_path = next(train_iter)
except:
train_iter = iter(self.train_loader)
index, data, attr, image_path = next(train_iter)
data = data.to(self.device)
attr = attr.to(self.device)
label = attr[:, args.target_attr_idx].to(self.device)
# Feature extraction
# Prediction by concatenating zero vectors (dummy vectors).
# We do not use the prediction here.
if args.dataset == 'cmnist':
z_l = self.model_l.extract(data)
z_b = self.model_b.extract(data)
else:
z_b = []
# Use this only for reproducing CIFARC10 of LfF
if self.args.use_resnet20:
hook_fn = self.model_b.layer3.register_forward_hook(self.concat_dummy(z_b))
_ = self.model_b(data)
hook_fn.remove()
z_b = z_b[0]
z_l = []
hook_fn = self.model_l.layer3.register_forward_hook(self.concat_dummy(z_l))
_ = self.model_l(data)
hook_fn.remove()
z_l = z_l[0]
else:
hook_fn = self.model_b.avgpool.register_forward_hook(self.concat_dummy(z_b))
_ = self.model_b(data)
hook_fn.remove()
z_b = z_b[0]
z_l = []
hook_fn = self.model_l.avgpool.register_forward_hook(self.concat_dummy(z_l))
_ = self.model_l(data)
hook_fn.remove()
z_l = z_l[0]
# z=[z_l, z_b]
# Gradients of z_b are not backpropagated to z_l (and vice versa) in order to guarantee disentanglement of representation.
z_conflict = torch.cat((z_l, z_b.detach()), dim=1)
z_align = torch.cat((z_l.detach(), z_b), dim=1)
# Prediction using z=[z_l, z_b]
pred_conflict = self.model_l.fc(z_conflict)
pred_align = self.model_b.fc(z_align)
loss_dis_conflict = self.criterion(pred_conflict, label).detach()
loss_dis_align = self.criterion(pred_align, label).detach()
# EMA sample loss
self.sample_loss_ema_d.update(loss_dis_conflict, index)
self.sample_loss_ema_b.update(loss_dis_align, index)
# class-wise normalize
loss_dis_conflict = self.sample_loss_ema_d.parameter[index].clone().detach()
loss_dis_align = self.sample_loss_ema_b.parameter[index].clone().detach()
loss_dis_conflict = loss_dis_conflict.to(self.device)
loss_dis_align = loss_dis_align.to(self.device)
for c in range(self.num_classes):
class_index = torch.where(label == c)[0].to(self.device)
max_loss_conflict = self.sample_loss_ema_d.max_loss(c)
max_loss_align = self.sample_loss_ema_b.max_loss(c)
loss_dis_conflict[class_index] /= max_loss_conflict
loss_dis_align[class_index] /= max_loss_align
loss_weight = loss_dis_align / (loss_dis_align + loss_dis_conflict + 1e-8) # Eq.1 (reweighting module) in the main paper
loss_dis_conflict = self.criterion(pred_conflict, label) * loss_weight.to(self.device) # Eq.2 W(z)CE(C_i(z),y)
loss_dis_align = self.bias_criterion(pred_align, label) # Eq.2 GCE(C_b(z),y)
# feature-level augmentation : augmentation after certain iteration (after representation is disentangled at a certain level)
if step > args.curr_step:
indices = np.random.permutation(z_b.size(0))
z_b_swap = z_b[indices] # z tilde
label_swap = label[indices] # y tilde
# Prediction using z_swap=[z_l, z_b tilde]
# Again, gradients of z_b tilde are not backpropagated to z_l (and vice versa) in order to guarantee disentanglement of representation.
z_mix_conflict = torch.cat((z_l, z_b_swap.detach()), dim=1)
z_mix_align = torch.cat((z_l.detach(), z_b_swap), dim=1)
# Prediction using z_swap
pred_mix_conflict = self.model_l.fc(z_mix_conflict)
pred_mix_align = self.model_b.fc(z_mix_align)
loss_swap_conflict = self.criterion(pred_mix_conflict, label) * loss_weight.to(self.device) # Eq.3 W(z)CE(C_i(z_swap),y)
loss_swap_align = self.bias_criterion(pred_mix_align, label_swap) # Eq.3 GCE(C_b(z_swap),y tilde)
lambda_swap = self.args.lambda_swap # Eq.3 lambda_swap_b
else:
# before feature-level augmentation
loss_swap_conflict = torch.tensor([0]).float()
loss_swap_align = torch.tensor([0]).float()
lambda_swap = 0
loss_dis = loss_dis_conflict.mean() + args.lambda_dis_align * loss_dis_align.mean() # Eq.2 L_dis
loss_swap = loss_swap_conflict.mean() + args.lambda_swap_align * loss_swap_align.mean() # Eq.3 L_swap
loss = loss_dis + lambda_swap * loss_swap # Eq.4 Total objective
self.optimizer_l.zero_grad()
self.optimizer_b.zero_grad()
loss.backward()
self.optimizer_l.step()
self.optimizer_b.step()
if step >= args.curr_step and args.use_lr_decay:
self.scheduler_b.step()
self.scheduler_l.step()
if args.use_lr_decay and step % args.lr_decay_step == 0:
print('******* learning rate decay .... ********')
print(f"self.optimizer_b lr: { self.optimizer_b.param_groups[-1]['lr']}")
print(f"self.optimizer_l lr: { self.optimizer_l.param_groups[-1]['lr']}")
if step % args.save_freq == 0:
self.save_ours(step)
if step % args.log_freq == 0:
bias_label = attr[:, 1]
align_flag = torch.where(label == bias_label)[0]
self.board_ours_loss(
step=step,
loss_dis_conflict=loss_dis_conflict.mean(),
loss_dis_align=args.lambda_dis_align * loss_dis_align.mean(),
loss_swap_conflict=loss_swap_conflict.mean(),
loss_swap_align=args.lambda_swap_align * loss_swap_align.mean(),
lambda_swap=lambda_swap
)
if step % args.valid_freq == 0:
self.board_ours_acc(step)
cnt += data.shape[0]
if cnt == train_num:
print(f'finished epoch: {epoch}')
epoch += 1
cnt = 0
def test_ours(self, args):
if args.dataset == 'cmnist':
self.model_l = get_model('mlp_DISENTANGLE', self.num_classes).to(self.device)
self.model_b = get_model('mlp_DISENTANGLE', self.num_classes).to(self.device)
else:
self.model_l = get_model('resnet_DISENTANGLE', self.num_classes).to(self.device)
self.model_b = get_model('resnet_DISENTANGLE', self.num_classes).to(self.device)
self.model_l.load_state_dict(torch.load(os.path.join(args.pretrained_path, 'best_model_l.th'))['state_dict'])
self.model_b.load_state_dict(torch.load(os.path.join(args.pretrained_path, 'best_model_b.th'))['state_dict'])
self.board_ours_acc(step=0, inference=True)
| 25,007 | 39.400646 | 161 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/util.py | '''Modified from https://github.com/alinlab/LfF/blob/master/util.py'''
import io
import torch
import numpy as np
import torch.nn as nn
class EMA:
def __init__(self, label, num_classes=None, alpha=0.9):
self.label = label.cuda()
self.alpha = alpha
self.parameter = torch.zeros(label.size(0))
self.updated = torch.zeros(label.size(0))
self.num_classes = num_classes
self.max = torch.zeros(self.num_classes).cuda()
def update(self, data, index, curve=None, iter_range=None, step=None):
self.parameter = self.parameter.to(data.device)
self.updated = self.updated.to(data.device)
index = index.to(data.device)
if curve is None:
self.parameter[index] = self.alpha * self.parameter[index] + (1 - self.alpha * self.updated[index]) * data
else:
alpha = curve ** -(step / iter_range)
self.parameter[index] = alpha * self.parameter[index] + (1 - alpha * self.updated[index]) * data
self.updated[index] = 1
def max_loss(self, label):
label_index = torch.where(self.label == label)[0]
return self.parameter[label_index].max()
| 1,178 | 35.84375 | 118 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/train.py | import numpy as np
import torch
import random
from learner import Learner
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Learning Debiased Representation via Disentangled Feature Augmentation (NeurIPS 21 Oral)')
# training
parser.add_argument("--batch_size", help="batch_size", default=256, type=int)
parser.add_argument("--lr",help='learning rate',default=1e-3, type=float)
parser.add_argument("--weight_decay",help='weight_decay',default=0.0, type=float)
parser.add_argument("--momentum",help='momentum',default=0.9, type=float)
parser.add_argument("--num_workers", help="workers number", default=16, type=int)
parser.add_argument("--exp", help='experiment name', default='debugging', type=str)
parser.add_argument("--device", help="cuda or cpu", default='cuda', type=str)
parser.add_argument("--num_steps", help="# of iterations", default= 500 * 100, type=int)
parser.add_argument("--target_attr_idx", help="target_attr_idx", default= 0, type=int)
parser.add_argument("--bias_attr_idx", help="bias_attr_idx", default= 1, type=int)
parser.add_argument("--dataset", help="data to train, [cmnist, cifar10, bffhq]", default= 'cmnist', type=str)
parser.add_argument("--percent", help="percentage of conflict", default= "1pct", type=str)
parser.add_argument("--use_lr_decay", action='store_true', help="whether to use learning rate decay")
parser.add_argument("--lr_decay_step", help="learning rate decay steps", type=int, default=10000)
parser.add_argument("--q", help="GCE parameter q", type=float, default=0.7)
parser.add_argument("--lr_gamma", help="lr gamma", type=float, default=0.1)
parser.add_argument("--lambda_dis_align", help="lambda_dis in Eq.2", type=float, default=1.0)
parser.add_argument("--lambda_swap_align", help="lambda_swap_b in Eq.3", type=float, default=1.0)
parser.add_argument("--lambda_swap", help="lambda swap (lambda_swap in Eq.4)", type=float, default=1.0)
parser.add_argument("--ema_alpha", help="use weight mul", type=float, default=0.7)
parser.add_argument("--curr_step", help="curriculum steps", type=int, default= 0)
parser.add_argument("--use_type0", action='store_true', help="whether to use type 0 CIFAR10C")
parser.add_argument("--use_type1", action='store_true', help="whether to use type 1 CIFAR10C")
parser.add_argument("--use_resnet20", help="Use Resnet20", action="store_true") # ResNet 20 was used in Learning From Failure CifarC10 (We used ResNet18 in our paper)
parser.add_argument("--model", help="which network, [MLP, ResNet18, ResNet20, ResNet50]", default= 'MLP', type=str)
# logging
parser.add_argument("--log_dir", help='path for saving model', default='./log', type=str)
parser.add_argument("--data_dir", help='path for loading data', default='dataset', type=str)
parser.add_argument("--valid_freq", help='frequency to evaluate on valid/test set', default=500, type=int)
parser.add_argument("--log_freq", help='frequency to log on tensorboard', default=500, type=int)
parser.add_argument("--save_freq", help='frequency to save model checkpoint', default=1000, type=int)
parser.add_argument("--wandb", action="store_true", help="whether to use wandb")
parser.add_argument("--tensorboard", action="store_true", help="whether to use tensorboard")
# experiment
parser.add_argument("--train_ours", action="store_true", help="whether to train our method")
parser.add_argument("--train_vanilla", action="store_true", help="whether to train vanilla")
args = parser.parse_args()
# init learner
learner = Learner(args)
# actual training
print('Official Pytorch Code of "Learning Debiased Representation via Disentangled Feature Augmentation (NeurIPS 21 Oral)"')
print('Training starts ...')
if args.train_ours:
learner.train_ours(args)
elif args.train_vanilla:
learner.train_vanilla(args)
else:
print('choose one of the two options ...')
import sys
sys.exit(0)
| 4,084 | 59.970149 | 170 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/module/resnet.py | ''' From https://github.com/alinlab/LfF/blob/master/module/resnet.py '''
"""
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
__all__ = [
"ResNet",
"resnet20",
"resnet32",
"resnet44",
"resnet56",
"resnet110",
"resnet1202",
]
def _weights_init(m):
classname = m.__class__.__name__
# print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option="A"):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == "A":
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(
lambda x: F.pad(
x[:, :, ::2, ::2],
(0, 0, 0, 0, planes // 4, planes // 4),
"constant",
0,
)
)
elif option == "B":
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(
3, 16, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.fc = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def extract(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
feat = out.view(out.size(0), -1)
return feat
def predict(self, x):
prediction = self.fc(x)
return prediction
def forward(self, x, mode=None):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
# out = F.avg_pool2d(out, out.size()[3])
# out = out.view(out.size(0), -1)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
final_out = self.fc(out)
if mode == 'tsne' or mode == 'mixup':
return out, final_out
else:
return final_out
def resnet20(num_classes):
return ResNet(BasicBlock, [3, 3, 3], num_classes)
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print(
"Total layers",
len(
list(
filter(
lambda p: p.requires_grad and len(p.data.size()) > 1,
net.parameters(),
)
)
),
)
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith("resnet"):
print(net_name)
test(globals()[net_name]())
print()
| 6,270 | 27.375566 | 78 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/module/mlp.py | ''' Modified from https://github.com/alinlab/LfF/blob/master/module/mlp.py'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP_DISENTANGLE(nn.Module):
def __init__(self, num_classes = 10):
super(MLP_DISENTANGLE, self).__init__()
self.feature = nn.Sequential(
nn.Linear(3*28*28, 100),
nn.ReLU(),
nn.Linear(100, 100),
nn.ReLU(),
nn.Linear(100, 16),
nn.ReLU()
)
self.fc = nn.Linear(32, num_classes)
def extract(self, x):
x = x.view(x.size(0), -1) / 255
feat = self.feature(x)
return feat
def predict(self, x):
prediction = self.classifier(x)
return prediction
def forward(self, x, mode=None, return_feat=False):
x = x.view(x.size(0), -1) / 255
feat = x = self.feature(x)
final_x = self.classifier(x)
if mode == 'tsne' or mode == 'mixup':
return x, final_x
else:
if return_feat:
return final_x, feat
else:
return final_x
class MLP(nn.Module):
def __init__(self, num_classes = 10):
super(MLP, self).__init__()
self.feature = nn.Sequential(
nn.Linear(3*28*28, 100),
nn.ReLU(),
nn.Linear(100, 100),
nn.ReLU(),
nn.Linear(100, 16),
nn.ReLU()
)
self.classifier = nn.Linear(16, num_classes)
def forward(self, x, mode=None, return_feat=False):
x = x.view(x.size(0), -1) / 255
feat = x = self.feature(x)
final_x = self.classifier(x)
if mode == 'tsne' or mode == 'mixup':
return x, final_x
else:
if return_feat:
return final_x, feat
else:
return final_x
class Noise_MLP(nn.Module):
def __init__(self, n_dim=16, n_layer=3):
super(Noise_MLP, self).__init__()
layers = []
for i in range(n_layer):
layers.append(nn.Linear(n_dim, n_dim))
layers.append(nn.LeakyReLU(0.2))
self.style = nn.Sequential(*layers)
def forward(self, z):
x = self.style(z)
return x
| 2,245 | 26.728395 | 77 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/module/loss.py | '''From https://github.com/alinlab/LfF/blob/master/module/loss.py'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class GeneralizedCELoss(nn.Module):
def __init__(self, q=0.7):
super(GeneralizedCELoss, self).__init__()
self.q = q
def forward(self, logits, targets):
p = F.softmax(logits, dim=1)
if np.isnan(p.mean().item()):
raise NameError('GCE_p')
Yg = torch.gather(p, 1, torch.unsqueeze(targets, 1))
# modify gradient of cross entropy
loss_weight = (Yg.squeeze().detach()**self.q)*self.q
if np.isnan(Yg.mean().item()):
raise NameError('GCE_Yg')
loss = F.cross_entropy(logits, targets, reduction='none') * loss_weight
return loss
| 813 | 28.071429 | 79 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/module/util.py | ''' Modified from https://github.com/alinlab/LfF/blob/master/module/util.py '''
import torch.nn as nn
from module.resnet import resnet20
from module.mlp import *
from torchvision.models import resnet18, resnet50
def get_model(model_tag, num_classes):
if model_tag == "ResNet20":
return resnet20(num_classes)
elif model_tag == "ResNet20_OURS":
model = resnet20(num_classes)
model.fc = nn.Linear(128, num_classes)
return model
elif model_tag == "ResNet18":
print('bringing no pretrained resnet18 ...')
model = resnet18(pretrained=False)
model.fc = nn.Linear(512, num_classes)
return model
elif model_tag == "MLP":
return MLP(num_classes=num_classes)
elif model_tag == "mlp_DISENTANGLE":
return MLP_DISENTANGLE(num_classes=num_classes)
elif model_tag == 'resnet_DISENTANGLE':
print('bringing no pretrained resnet18 disentangle...')
model = resnet18(pretrained=False)
model.fc = nn.Linear(1024, num_classes)
return model
else:
raise NotImplementedError
| 1,099 | 34.483871 | 79 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/data/util.py | '''Modified from https://github.com/alinlab/LfF/blob/master/data/util.py'''
import os
import torch
from torch.utils.data.dataset import Dataset, Subset
from torchvision import transforms as T
from glob import glob
from PIL import Image
class IdxDataset(Dataset):
def __init__(self, dataset):
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return (idx, *self.dataset[idx])
class ZippedDataset(Dataset):
def __init__(self, datasets):
super(ZippedDataset, self).__init__()
self.dataset_sizes = [len(d) for d in datasets]
self.datasets = datasets
def __len__(self):
return max(self.dataset_sizes)
def __getitem__(self, idx):
items = []
for dataset_idx, dataset_size in enumerate(self.dataset_sizes):
items.append(self.datasets[dataset_idx][idx % dataset_size])
item = [torch.stack(tensors, dim=0) for tensors in zip(*items)]
return item
class CMNISTDataset(Dataset):
def __init__(self,root,split,transform=None, image_path_list=None):
super(CMNISTDataset, self).__init__()
self.transform = transform
self.root = root
self.image2pseudo = {}
self.image_path_list = image_path_list
if split=='train':
self.align = glob(os.path.join(root, 'align',"*","*"))
self.conflict = glob(os.path.join(root, 'conflict',"*","*"))
self.data = self.align + self.conflict
elif split=='valid':
self.data = glob(os.path.join(root,split,"*"))
elif split=='test':
self.data = glob(os.path.join(root, '../test',"*","*"))
def __len__(self):
return len(self.data)
def __getitem__(self, index):
attr = torch.LongTensor([int(self.data[index].split('_')[-2]),int(self.data[index].split('_')[-1].split('.')[0])])
image = Image.open(self.data[index]).convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image, attr, self.data[index]
class CIFAR10Dataset(Dataset):
def __init__(self, root, split, transform=None, image_path_list=None, use_type0=None, use_type1=None):
super(CIFAR10Dataset, self).__init__()
self.transform = transform
self.root = root
self.image2pseudo = {}
self.image_path_list = image_path_list
if split=='train':
self.align = glob(os.path.join(root, 'align',"*","*"))
self.conflict = glob(os.path.join(root, 'conflict',"*","*"))
self.data = self.align + self.conflict
elif split=='valid':
self.data = glob(os.path.join(root,split,"*", "*"))
elif split=='test':
self.data = glob(os.path.join(root, '../test',"*","*"))
def __len__(self):
return len(self.data)
def __getitem__(self, index):
attr = torch.LongTensor(
[int(self.data[index].split('_')[-2]), int(self.data[index].split('_')[-1].split('.')[0])])
image = Image.open(self.data[index]).convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image, attr, self.data[index]
class bFFHQDataset(Dataset):
def __init__(self, root, split, transform=None, image_path_list=None):
super(bFFHQDataset, self).__init__()
self.transform = transform
self.root = root
self.image2pseudo = {}
self.image_path_list = image_path_list
if split=='train':
self.align = glob(os.path.join(root, 'align',"*","*"))
self.conflict = glob(os.path.join(root, 'conflict',"*","*"))
self.data = self.align + self.conflict
elif split=='valid':
self.data = glob(os.path.join(os.path.dirname(root), split, "*"))
elif split=='test':
self.data = glob(os.path.join(os.path.dirname(root), split, "*"))
data_conflict = []
for path in self.data:
target_label = path.split('/')[-1].split('.')[0].split('_')[1]
bias_label = path.split('/')[-1].split('.')[0].split('_')[2]
if target_label != bias_label:
data_conflict.append(path)
self.data = data_conflict
def __len__(self):
return len(self.data)
def __getitem__(self, index):
attr = torch.LongTensor(
[int(self.data[index].split('_')[-2]), int(self.data[index].split('_')[-1].split('.')[0])])
image = Image.open(self.data[index]).convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image, attr, self.data[index]
transforms = {
"cmnist": {
"train": T.Compose([T.ToTensor()]),
"valid": T.Compose([T.ToTensor()]),
"test": T.Compose([T.ToTensor()])
},
"bffhq": {
"train": T.Compose([T.Resize((224,224)), T.ToTensor()]),
"valid": T.Compose([T.Resize((224,224)), T.ToTensor()]),
"test": T.Compose([T.Resize((224,224)), T.ToTensor()])
},
"cifar10c": {
"train": T.Compose([T.ToTensor(),]),
"valid": T.Compose([T.ToTensor(),]),
"test": T.Compose([T.ToTensor(),]),
},
}
transforms_preprcs = {
"cmnist": {
"train": T.Compose([T.ToTensor()]),
"valid": T.Compose([T.ToTensor()]),
"test": T.Compose([T.ToTensor()])
},
"bffhq": {
"train": T.Compose([
T.Resize((224,224)),
T.RandomCrop(224, padding=4),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"valid": T.Compose([
T.Resize((224,224)),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"test": T.Compose([
T.Resize((224,224)),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
},
"cifar10c": {
"train": T.Compose(
[
T.RandomCrop(32, padding=4),
# T.RandomResizedCrop(32),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"valid": T.Compose(
[
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"test": T.Compose(
[
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
},
}
transforms_preprcs_ae = {
"cmnist": {
"train": T.Compose([T.ToTensor()]),
"valid": T.Compose([T.ToTensor()]),
"test": T.Compose([T.ToTensor()])
},
"bffhq": {
"train": T.Compose([
T.Resize((224,224)),
T.RandomCrop(224, padding=4),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"valid": T.Compose([
T.Resize((224,224)),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"test": T.Compose([
T.Resize((224,224)),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
},
"cifar10c": {
"train": T.Compose(
[
# T.RandomCrop(32, padding=4),
T.RandomResizedCrop(32),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"valid": T.Compose(
[
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"test": T.Compose(
[
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
},
}
def get_dataset(dataset, data_dir, dataset_split, transform_split, percent, use_preprocess=None, image_path_list=None, use_type0=None, use_type1=None):
dataset_category = dataset.split("-")[0]
if use_preprocess:
transform = transforms_preprcs[dataset_category][transform_split]
else:
transform = transforms[dataset_category][transform_split]
dataset_split = "valid" if (dataset_split == "eval") else dataset_split
if dataset == 'cmnist':
root = data_dir + f"/cmnist/{percent}"
dataset = CMNISTDataset(root=root,split=dataset_split,transform=transform, image_path_list=image_path_list)
elif 'cifar10c' in dataset:
# if use_type0:
# root = data_dir + f"/cifar10c_0805_type0/{percent}"
# elif use_type1:
# root = data_dir + f"/cifar10c_0805_type1/{percent}"
# else:
root = data_dir + f"/cifar10c/{percent}"
dataset = CIFAR10Dataset(root=root, split=dataset_split, transform=transform, image_path_list=image_path_list, use_type0=use_type0, use_type1=use_type1)
elif dataset == "bffhq":
root = data_dir + f"/bffhq/{percent}"
dataset = bFFHQDataset(root=root, split=dataset_split, transform=transform, image_path_list=image_path_list)
else:
print('wrong dataset ...')
import sys
sys.exit(0)
return dataset
| 9,788 | 31.73913 | 160 | py |
fast-dpsgd | fast-dpsgd-main/opacusdp.py | '''
Opacus experiments for all the models
'''
import time
import torch
import torch.nn.functional as F
from opacus import PrivacyEngine
from opacus.layers import DPLSTM
from torch import nn, optim
import data
import utils
from pytorch import get_data, model_dict
class LSTMNet(nn.Module):
def __init__(self, vocab_size: int, batch_size):
super().__init__()
# Embedding dimension: vocab_size + <unk>, <pad>, <eos>, <sos>
self.emb = nn.Embedding(vocab_size + 4, 100)
self.h_init = torch.randn(1, batch_size, 100).cuda()
self.c_init = torch.randn(1, batch_size, 100).cuda()
self.hidden = (self.h_init, self.c_init)
self.lstm = DPLSTM(100, 100, batch_first=True)
self.fc1 = nn.Linear(100, 2)
def forward(self, x):
x = self.emb(x) # batch_size, seq_len, embed_dim
# x has to be of shape [batch_size, seq_len, input_dim]
x, _ = self.lstm(x, self.hidden) # batch_size, seq_len, lstm_dim
x = x.mean(1) # batch_size, lstm_dim
x = self.fc1(x) # batch_size, fc_dim
return x
def main(args):
print(args)
assert args.dpsgd
torch.backends.cudnn.benchmark = True
mdict = model_dict.copy()
mdict['lstm'] = LSTMNet
train_data, train_labels = get_data(args)
model = mdict[args.experiment](vocab_size=args.max_features, batch_size=args.batch_size).cuda()
optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0)
loss_function = nn.CrossEntropyLoss() if args.experiment != 'logreg' else nn.BCELoss()
privacy_engine = PrivacyEngine(
model,
batch_size=args.batch_size,
sample_size=len(train_data),
alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
noise_multiplier=args.sigma,
max_grad_norm=args.max_per_sample_grad_norm,
)
privacy_engine.attach(optimizer)
timings = []
for epoch in range(1, args.epochs + 1):
start = time.perf_counter()
dataloader = data.dataloader(train_data, train_labels, args.batch_size)
for batch_idx, (x, y) in enumerate(dataloader):
x, y = x.cuda(non_blocking=True), y.cuda(non_blocking=True)
model.zero_grad()
outputs = model(x)
loss = loss_function(outputs, y)
loss.backward()
optimizer.step()
torch.cuda.synchronize()
duration = time.perf_counter() - start
print("Time Taken for Epoch: ", duration)
timings.append(duration)
if args.dpsgd:
epsilon, best_alpha = optimizer.privacy_engine.get_privacy_spent(args.delta)
print(f"Train Epoch: {epoch} \t"
# f"Loss: {np.mean(losses):.6f} "
f"(ε = {epsilon:.2f}, δ = {args.delta}) for α = {best_alpha}")
else:
print(f"Train Epoch: {epoch} \t Loss: {np.mean(losses):.6f}")
if not args.no_save:
utils.save_runtimes(__file__.split('.')[0], args, timings)
else:
print('Not saving!')
print('Done!')
if __name__ == '__main__':
parser = utils.get_parser(model_dict.keys())
parser.add_argument(
"--sigma",
type=float,
default=1.0,
help="Noise multiplier (default 1.0)",
)
parser.add_argument(
"-c",
"--max-per-sample-grad_norm",
type=float,
default=1.0,
help="Clip per-sample gradients to this norm (default 1.0)",
)
parser.add_argument(
"--delta",
type=float,
default=1e-5,
help="Target delta (default: 1e-5)",
)
args = parser.parse_args()
main(args)
| 3,656 | 31.078947 | 99 | py |
fast-dpsgd | fast-dpsgd-main/runtime_experiment.py | import argparse
import pprint
import subprocess
from utils import pr_green, pr_red
def launch(expt, batch_size, epochs):
"""Runs expt at batch_size for all the scripts"""
errors = []
# yapf: disable
cmds = [
('jax', f'CUDA_VISIBLE_DEVICES=0 python jaxdp.py {expt} --no_dpsgd --epochs {epochs} --batch_size {batch_size}'),
('tf2', f'CUDA_VISIBLE_DEVICES=0 python tf2dp.py {expt} --no_dpsgd --epochs {epochs} --batch_size {batch_size} --no_xla'),
('tf1', f'CUDA_VISIBLE_DEVICES=0 python tf1dp.py {expt} --no_dpsgd --epochs {epochs} --batch_size {batch_size} --no_xla'),
('pytorch', f'CUDA_VISIBLE_DEVICES=0 python pytorch.py {expt} --no_dpsgd --epochs {epochs} --batch_size {batch_size}'),
('jaxdp', f'CUDA_VISIBLE_DEVICES=0 python jaxdp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size}'),
('tf2dp', f'CUDA_VISIBLE_DEVICES=0 python tf2dp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --no_xla'),
('tf1dp', f'CUDA_VISIBLE_DEVICES=0 python tf1dp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --no_xla'),
('opacusdp', f'CUDA_VISIBLE_DEVICES=0 python opacusdp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size}'),
('backpackdp', f'CUDA_VISIBLE_DEVICES=0 python backpackdp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size}'),
('pyvacydp', f'CUDA_VISIBLE_DEVICES=0 python pyvacydp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size}'),
('owkindp', f'CUDA_VISIBLE_DEVICES=0 python owkindp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size}'),
('tf2xla', f'CUDA_VISIBLE_DEVICES=0 python tf2dp.py {expt} --no_dpsgd --epochs {epochs} --batch_size {batch_size} --xla'),
('tf2dpxla', f'CUDA_VISIBLE_DEVICES=0 python tf2dp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --xla'),
('tf1xla', f'TF_XLA_FLAGS=--tf_xla_auto_jit=2 CUDA_VISIBLE_DEVICES=0 python tf1dp.py {expt} --no_dpsgd --epochs {epochs} --batch_size {batch_size} --xla'),
('tf1dpxla', f'TF_XLA_FLAGS=--tf_xla_auto_jit=2 CUDA_VISIBLE_DEVICES=0 python tf1dp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --xla'),
# For Ablations:
('jaxdp_nv', f'CUDA_VISIBLE_DEVICES=0 python jaxdp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --no_vmap'),
# Outside of JIT compilation, the dynamic_unroll's LSTM (using scan) is faster than the static_unroll'd version.
('jaxdp_nj', f'CUDA_VISIBLE_DEVICES=0 python jaxdp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --no_jit --dynamic_unroll'),
('jaxdp_nvj', f'CUDA_VISIBLE_DEVICES=0 python jaxdp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --no_vmap --no_jit --dynamic_unroll'),
('tf2dp_nv', f'CUDA_VISIBLE_DEVICES=0 python tf2dp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --no_xla --no_vmap'),
('tf2dp_nvj', f'CUDA_VISIBLE_DEVICES=0 python tf2dp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --no_xla --no_vmap --no_jit'),
('tf2dpxla_nv', f'CUDA_VISIBLE_DEVICES=0 python tf2dp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --xla --no_vmap'),
]
# yapf: enable
for name, cmd in cmds:
if expt == 'lstm':
if 'jax' in name:
# Due to https://github.com/deepmind/dm-haiku/issues/77, we disable
# omnistaging when running the LSTM in JAX (it will fail to compile).
cmd = 'JAX_OMNISTAGING=0 ' + cmd
if name in ('tf1', 'tf2', 'tf1xla', 'tf2dp_nv'):
# The dynamically unrolled LSTM uses the cudNN LSTM implementation
# in the non-vectorized_map case, making it faster.
cmd = cmd + ' --no_unroll'
pr_green(f'Starting {name}: {cmd}')
out = subprocess.run([cmd],
shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
universal_newlines=True)
print(out.stdout)
if out.returncode != 0:
errors.append(name)
pr_red(out.stderr)
print()
pr_red(f'Done {name}: {cmd} \n')
else:
pr_green(f'Done {name}: {cmd} \n')
pr_green(f'Done {expt} at batch size {batch_size}.')
return errors
def main(args):
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(args)
failed = {}
for expt in args.experiments:
for bs in args.batch_sizes:
failed[(expt, bs)] = launch(expt, bs, args.epochs)
pr_red('\nFailed Experiments: \n')
pp.pprint(failed)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Returns Max Batch Size before OOM')
parser.add_argument('--epochs', default=20, type=int)
parser.add_argument('--experiments',
default=['logreg', 'ffnn', 'mnist', 'embed', 'lstm', 'cifar10'],
nargs='+')
parser.add_argument('--batch_sizes', default=[256, 128, 64, 32, 16], nargs='+', type=int)
args = parser.parse_args()
main(args)
| 5,190 | 59.360465 | 163 | py |
fast-dpsgd | fast-dpsgd-main/pytorch.py | '''
Model file and non-differentially private file
'''
import time
import torch
import torch.nn.functional as F
from torch import nn, optim
import data
import utils
class EmbeddingNet(nn.Module):
def __init__(self, vocab_size: int, **_):
super().__init__()
# Embedding dimension: vocab_size + <unk>, <pad>, <eos>, <sos>
self.emb = nn.Embedding(vocab_size + 4, 16)
self.fc1 = nn.Linear(16, 2)
def forward(self, x):
# x: batch_size, seq_len
x = self.emb(x) # batch_size, seq_len, embed_dim
x = x.mean(1) # batch_size, embed_dim
x = self.fc1(x) # batch_size, fc_dim
return x
class LSTMNet(nn.Module):
def __init__(self, vocab_size: int, **_):
super().__init__()
# Embedding dimension: vocab_size + <unk>, <pad>, <eos>, <sos>
self.emb = nn.Embedding(vocab_size + 4, 100)
self.lstm = nn.LSTM(100, 100)
self.fc1 = nn.Linear(100, 2)
def forward(self, x):
# x: batch_size, seq_len
x = self.emb(x) # batch_size, seq_len, embed_dim
x = x.transpose(0, 1) # seq_len, batch_size, embed_dim
x, _ = self.lstm(x) # seq_len, batch_size, lstm_dim
x = x.mean(0) # batch_size, lstm_dim
x = self.fc1(x) # batch_size, fc_dim
return x
class MNISTNet(nn.Module):
def __init__(self, **_):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, 8, 2, padding=3)
self.conv2 = nn.Conv2d(16, 32, 4, 2)
self.fc1 = nn.Linear(32 * 4 * 4, 32)
self.fc2 = nn.Linear(32, 10)
def forward(self, x):
# x of shape [B, 1, 28, 28]
x = F.relu(self.conv1(x)) # -> [B, 16, 14, 14]
x = F.max_pool2d(x, 2, 1) # -> [B, 16, 13, 13]
x = F.relu(self.conv2(x)) # -> [B, 32, 5, 5]
x = F.max_pool2d(x, 2, 1) # -> [B, 32, 4, 4]
x = x.view(-1, 32 * 4 * 4) # -> [B, 512]
x = F.relu(self.fc1(x)) # -> [B, 32]
x = self.fc2(x) # -> [B, 10]
return x
class FFNN(nn.Module):
def __init__(self, **_):
super().__init__()
self.fc1 = nn.Linear(104, 50)
self.fc2 = nn.Linear(50, 2)
def forward(self, x):
out = self.fc1(x)
out = F.relu(out)
out = self.fc2(out)
return out
class Logistic(nn.Module):
def __init__(self, **_):
super().__init__()
self.fc1 = nn.Linear(104, 1)
def forward(self, x):
out = self.fc1(x)
out = F.sigmoid(out)
return out
class CIFAR10Model(nn.Module):
def __init__(self, **_):
super().__init__()
self.layer_list = nn.ModuleList([
nn.Sequential(nn.Conv2d(3, 32, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.Sequential(nn.Conv2d(32, 32, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.AvgPool2d(2, stride=2),
nn.Sequential(nn.Conv2d(32, 64, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.Sequential(nn.Conv2d(64, 64, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.AvgPool2d(2, stride=2),
nn.Sequential(nn.Conv2d(64, 128, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.Sequential(nn.Conv2d(128, 128, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.AvgPool2d(2, stride=2),
nn.Sequential(nn.Conv2d(128, 256, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.Conv2d(256, 10, (3, 3), padding=1, stride=(1, 1)),
])
def forward(self, x):
for layer in self.layer_list:
x = layer(x)
# print(x.shape)
return torch.mean(x, dim=(2, 3))
model_dict = {
'mnist': MNISTNet,
'lstm': LSTMNet,
'embed': EmbeddingNet,
'ffnn': FFNN,
'logreg': Logistic,
'cifar10': CIFAR10Model,
}
def get_data(args):
data_fn = data.data_fn_dict[args.experiment][int(args.dummy_data)]
kwargs = {
'max_features': args.max_features,
'max_len': args.max_len,
'format': 'NCHW',
}
if args.dummy_data:
kwargs['num_examples'] = args.batch_size * 2
train_data, _ = data_fn(**kwargs)
for d in train_data: # train_data, train_labels
d = torch.from_numpy(d)
if d.dtype == torch.int32:
d = d.long()
if args.experiment == 'logreg' and d.dtype != torch.float32:
d = d.float()
yield d
def main(args):
print(args)
assert not args.dpsgd
torch.backends.cudnn.benchmark = True
train_data, train_labels = get_data(args)
model = model_dict[args.experiment](vocab_size=args.max_features).cuda()
optimizer = optim.SGD(model.parameters(), lr=args.learning_rate)
loss_function = nn.CrossEntropyLoss() if args.experiment != 'logreg' else nn.BCELoss()
timings = []
for epoch in range(1, args.epochs + 1):
start = time.perf_counter()
dataloader = data.dataloader(train_data, train_labels, args.batch_size)
for batch_idx, (x, y) in enumerate(dataloader):
x, y = x.cuda(non_blocking=True), y.cuda(non_blocking=True)
model.zero_grad()
outputs = model(x)
loss = loss_function(outputs, y)
loss.backward()
optimizer.step()
duration = time.perf_counter() - start
print("Time Taken for Epoch: ", duration)
timings.append(duration)
if not args.no_save:
utils.save_runtimes(__file__.split('.')[0], args, timings)
else:
print('Not saving!')
print('Done!')
if __name__ == '__main__':
parser = utils.get_parser(model_dict.keys())
args = parser.parse_args()
main(args)
| 5,651 | 30.4 | 92 | py |
fast-dpsgd | fast-dpsgd-main/data.py | import numpy as np
import tensorflow as tf
from keras.preprocessing import sequence
def dataloader(x, y, batch_size):
if batch_size > len(x):
raise ValueError('Batch Size too big.')
num_eg = len(x)
assert num_eg == len(y)
for i in range(0, num_eg, batch_size):
yield x[i:i + batch_size], y[i:i + batch_size]
def load_cifar10(format='NHWC', **_):
train, test = tf.keras.datasets.cifar10.load_data()
train_data, train_labels = train
test_data, test_labels = test
train_data = np.asarray(train_data, dtype=np.float32) / 255.
test_data = np.asarray(test_data, dtype=np.float32) / 255.
if format == 'NHWC':
pass
elif format == 'NCHW':
train_data = train_data.transpose((0, 3, 1, 2))
test_data = test_data.transpose((0, 3, 1, 2))
else:
raise ValueError('Invalid format.')
train_labels = np.asarray(train_labels, dtype=np.int32).squeeze()
test_labels = np.asarray(test_labels, dtype=np.int32).squeeze()
return (train_data, train_labels), (test_data, test_labels)
def load_dummy_cifar10(num_examples, format='NHWC', **_):
train_labels = np.random.randint(0, 10, num_examples).astype(np.int32)
if format == 'NHWC':
train_data = np.random.random((num_examples, 32, 32, 3)).astype(np.float32)
elif format == 'NCHW':
train_data = np.random.random((num_examples, 3, 32, 32)).astype(np.float32)
else:
raise ValueError('Invalid format.')
return (train_data, train_labels), (train_data, train_labels)
def load_mnist(format='NHWC', **_):
"""Loads MNIST and preprocesses to combine training and validation data."""
train, test = tf.keras.datasets.mnist.load_data()
train_data, train_labels = train
test_data, test_labels = test
train_data = np.asarray(train_data, dtype=np.float32) / 255.
test_data = np.asarray(test_data, dtype=np.float32) / 255.
if format == 'NHWC':
train_data, test_data = train_data[..., None], test_data[..., None]
elif format == 'NCHW':
train_data, test_data = train_data[:, None], test_data[:, None]
else:
raise ValueError('Invalid format.')
train_labels = np.asarray(train_labels, dtype=np.int32)
test_labels = np.asarray(test_labels, dtype=np.int32)
assert train_data.min() == 0.
assert train_data.max() == 1.
assert test_data.min() == 0.
assert test_data.max() == 1.
assert train_labels.ndim == 1
assert test_labels.ndim == 1
return (train_data, train_labels), (test_data, test_labels)
def load_dummy_mnist(num_examples, format='NHWC', **_):
train_data = np.random.random((num_examples, 28, 28)).astype(np.float32)
train_labels = np.random.randint(0, 10, num_examples).astype(np.int32)
if format == 'NHWC':
train_data = train_data[..., None]
elif format == 'NCHW':
train_data = train_data[:, None]
else:
raise ValueError('Invalid format.')
return (train_data, train_labels), (train_data, train_labels)
def load_imdb(max_features=10_000, max_len=256, **_):
"""Load IMDB movie reviews data."""
train, test = tf.keras.datasets.imdb.load_data(num_words=max_features)
(train_data, train_labels), (test_data, test_labels) = train, test
train_data = sequence.pad_sequences(train_data, maxlen=max_len).astype(np.int32)
test_data = sequence.pad_sequences(test_data, maxlen=max_len).astype(np.int32)
train_labels, test_labels = train_labels.astype(np.int32), test_labels.astype(np.int32)
return (train_data, train_labels), (test_data, test_labels)
def load_dummy_imdb(num_examples, max_features=10_000, max_len=256, **_):
train_data = np.random.randint(0, max_features, (num_examples, max_len)).astype(np.int32)
train_labels = np.random.random(num_examples).round().astype(np.int32)
return (train_data, train_labels), (train_data, train_labels)
def load_adult(**_):
"""Loads ADULT a2a as in LIBSVM and preprocesses to combine training and validation data."""
# https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary.html
data_x = np.load('adult_processed_x.npy')
data_y = np.load('adult_processed_y.npy')
data_y[data_y == -1] = 0
train_data = data_x.astype(np.float32)
train_labels = data_y.astype(np.int32)
return (train_data, train_labels), None
def load_dummy_adult(num_examples, **_):
train_data = np.random.random((num_examples, 104)).astype(np.float32)
train_labels = np.random.random(num_examples).round().astype(np.int32)
return (train_data, train_labels), None
data_fn_dict = {
'mnist': (load_mnist, load_dummy_mnist),
'lstm': (load_imdb, load_dummy_imdb),
'embed': (load_imdb, load_dummy_imdb),
'ffnn': (load_adult, load_dummy_adult),
'logreg': (load_adult, load_dummy_adult),
'cifar10': (load_cifar10, load_dummy_cifar10),
}
if __name__ == '__main__':
# Test Functionality
names = ['mnist', 'imdb', 'adult', 'cifar10']
data_fns = [load_mnist, load_imdb, load_adult, load_cifar10]
dummy_data_fns = [load_dummy_mnist, load_dummy_imdb, load_dummy_adult, load_dummy_cifar10]
for name, data_fn, dummy_data_fn in zip(names, data_fns, dummy_data_fns):
print(f'Checking {name}')
(x, y), _ = data_fn()
(dx, dy), _ = dummy_data_fn(x.shape[0])
assert x.shape == dx.shape, f'Original: {x.shape}, Dummy: {dx.shape}'
assert y.shape == dy.shape, f'Original: {y.shape}, Dummy: {dy.shape}'
assert x.dtype == dx.dtype, f'Original: {x.dtype}, Dummy: {dx.dtype}'
assert y.dtype == dy.dtype, f'Original: {y.dtype}, Dummy: {dy.dtype}'
| 5,648 | 36.410596 | 96 | py |
fast-dpsgd | fast-dpsgd-main/jaxdp.py | '''
Code for JAX implementations presented in: Enabling Fast
Differentially Private SGD via Just-in-Time Compilation and Vectorization
'''
import itertools
import time
from functools import partial
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from jax import grad, jit, random, vmap
from jax.experimental import optimizers, stax
from jax.lib import pytree
from jax.tree_util import tree_flatten, tree_multimap, tree_unflatten
from keras.utils.np_utils import to_categorical
from tensorflow_privacy.privacy.analysis.rdp_accountant import (compute_rdp, get_privacy_spent)
import data
import utils
def logistic_model(features, **_):
return hk.Sequential([hk.Linear(1), jax.nn.sigmoid])(features)
def ffnn_model(features, **_):
return hk.Sequential([hk.Linear(50), jax.nn.relu, hk.Linear(2)])(features)
def mnist_model(features, **_):
return hk.Sequential([
hk.Conv2D(16, (8, 8), padding='SAME', stride=(2, 2)),
jax.nn.relu,
hk.MaxPool(2, 1, padding='VALID'), # matches stax
hk.Conv2D(32, (4, 4), padding='VALID', stride=(2, 2)),
jax.nn.relu,
hk.MaxPool(2, 1, padding='VALID'), # matches stax
hk.Flatten(),
hk.Linear(32),
jax.nn.relu,
hk.Linear(10),
])(features)
def lstm_model(x, vocab_size=10_000, seq_len=256, args=None, **_):
embed_init = hk.initializers.TruncatedNormal(stddev=0.02)
token_embedding_map = hk.Embed(vocab_size + 4, 100, w_init=embed_init)
o2 = token_embedding_map(x)
o2 = jnp.reshape(o2, (o2.shape[1], o2.shape[0], o2.shape[2]))
# LSTM Part of Network
core = hk.LSTM(100)
if args and args.dynamic_unroll:
outs, state = hk.dynamic_unroll(core, o2, core.initial_state(x.shape[0]))
else:
outs, state = hk.static_unroll(core, o2, core.initial_state(x.shape[0]))
outs = outs.reshape(outs.shape[1], outs.shape[0], outs.shape[2])
# Avg Pool -> Linear
red_dim_outs = hk.avg_pool(outs, seq_len, seq_len, "SAME").squeeze()
final_layer = hk.Linear(2)
ret = final_layer(red_dim_outs)
return ret
def embedding_model(arr, vocab_size=10_000, seq_len=256, **_):
# embedding part of network
x = arr
embed_init = hk.initializers.TruncatedNormal(stddev=0.02)
token_embedding_map = hk.Embed(vocab_size + 4, 16, w_init=embed_init)
o2 = token_embedding_map(x)
# avg pool -> linear
o3 = hk.avg_pool(o2, seq_len, seq_len, "SAME").squeeze()
fcnn = hk.Sequential([hk.Linear(16), jax.nn.relu, hk.Linear(2)])
return fcnn(o3)
def cifar_model(features, **_):
out = hk.Conv2D(32, (3, 3), padding='SAME', stride=(1, 1))(features)
out = jax.nn.relu(out)
out = hk.Conv2D(32, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.AvgPool(2, strides=2, padding='VALID')(out)
out = hk.Conv2D(64, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.Conv2D(64, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.AvgPool(2, strides=2, padding='VALID')(out)
out = hk.Conv2D(128, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.Conv2D(128, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.AvgPool(2, strides=2, padding='VALID')(out)
out = hk.Conv2D(256, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.Conv2D(10, (3, 3), padding='SAME', stride=(1, 1))(out)
return out.mean((1, 2))
def multiclass_loss(model, params, batch):
inputs, targets = batch
logits = model.apply(params, None, inputs)
# convert the outputs to one hot shape according to the same shape as
# logits for vectorized dot product
one_hot = jax.nn.one_hot(targets, logits.shape[-1])
logits = stax.logsoftmax(logits) # log normalize
return -jnp.mean(jnp.sum(logits * one_hot, axis=-1)) # cross entropy loss
def logistic_loss(model, params, batch):
inputs, targets = batch[0], batch[1]
# have to always supply the RNG field
logits = model.apply(params, None, inputs)
logits = jnp.reshape(logits, -1) # needs to be only scalar per index
# max_val is required for numerical stability
max_val = jnp.clip(logits, 0, None)
loss = jnp.mean(logits - logits * targets + max_val +
jnp.log(jnp.exp(-max_val) + jnp.exp((-logits - max_val))))
return loss
def accuracy(model, params, batch):
inputs, targets = batch
target_class = jnp.argmax(targets, axis=1)
predicted_class = jnp.argmax(model.apply(params, None, inputs), axis=1)
return jnp.mean(predicted_class == target_class)
def clipped_grad(model, loss, params, l2_norm_clip, single_example_batch):
"""Evaluate gradient for a single-example batch and clip its grad norm."""
grads = grad(partial(loss, model))(params, single_example_batch)
nonempty_grads, tree_def = tree_flatten(grads)
total_grad_norm = jnp.linalg.norm([jnp.linalg.norm(neg.ravel()) for neg in nonempty_grads])
divisor = jnp.maximum(total_grad_norm / l2_norm_clip, 1.)
normalized_nonempty_grads = [g / divisor for g in nonempty_grads]
return tree_unflatten(tree_def, normalized_nonempty_grads)
def private_grad(model, loss, params, batch, rng, l2_norm_clip, noise_multiplier, batch_size):
"""Return differentially private gradients for params, evaluated on batch."""
clipped_grads = vmap(partial(clipped_grad, model, loss), (None, None, 0))(params, l2_norm_clip,
batch)
clipped_grads_flat, grads_treedef = tree_flatten(clipped_grads)
aggregated_clipped_grads = [g.sum(0) for g in clipped_grads_flat]
rngs = random.split(rng, len(aggregated_clipped_grads))
noised_aggregated_clipped_grads = [
g + l2_norm_clip * noise_multiplier * random.normal(r, g.shape)
for r, g in zip(rngs, aggregated_clipped_grads)
]
normalized_noised_aggregated_clipped_grads = [
g / batch_size for g in noised_aggregated_clipped_grads
]
return tree_unflatten(grads_treedef, normalized_noised_aggregated_clipped_grads)
def private_grad_no_vmap(model, loss, params, batch, rng, l2_norm_clip, noise_multiplier,
batch_size):
"""Return differentially private gradients for params, evaluated on batch."""
clipped_grads = tree_multimap(
lambda *xs: jnp.stack(xs),
*(clipped_grad(model, loss, params, l2_norm_clip, eg) for eg in zip(*batch)))
clipped_grads_flat, grads_treedef = tree_flatten(clipped_grads)
aggregated_clipped_grads = [g.sum(0) for g in clipped_grads_flat]
rngs = random.split(rng, len(aggregated_clipped_grads))
noised_aggregated_clipped_grads = [
g + l2_norm_clip * noise_multiplier * random.normal(r, g.shape)
for r, g in zip(rngs, aggregated_clipped_grads)
]
normalized_noised_aggregated_clipped_grads = [
g / batch_size for g in noised_aggregated_clipped_grads
]
return tree_unflatten(grads_treedef, normalized_noised_aggregated_clipped_grads)
model_dict = {
'mnist': mnist_model,
'lstm': lstm_model,
'embed': embedding_model,
'ffnn': ffnn_model,
'logreg': logistic_model,
'cifar10': cifar_model,
}
def main(args):
print(args)
if args.microbatches:
raise NotImplementedError('Microbatches < batch size not currently supported')
if args.experiment == 'lstm' and args.no_jit:
raise ValueError('LSTM with no JIT will fail.')
data_fn = data.data_fn_dict[args.experiment][int(args.dummy_data)]
kwargs = {
'max_features': args.max_features,
'max_len': args.max_len,
'format': 'NHWC',
}
if args.dummy_data:
kwargs['num_examples'] = args.batch_size * 2
(train_data, train_labels), _ = data_fn(**kwargs)
# train_labels, test_labels = to_categorical(train_labels), to_categorical(
# test_labels)
num_train = train_data.shape[0]
num_complete_batches, leftover = divmod(num_train, args.batch_size)
num_batches = num_complete_batches + bool(leftover)
key = random.PRNGKey(args.seed)
model = hk.transform(
partial(model_dict[args.experiment],
args=args,
vocab_size=args.max_features,
seq_len=args.max_len))
rng = jax.random.PRNGKey(42)
init_params = model.init(key, train_data[:args.batch_size])
opt_init, opt_update, get_params = optimizers.sgd(args.learning_rate)
loss = logistic_loss if args.experiment == 'logreg' else multiclass_loss
if args.dpsgd:
train_data, train_labels = train_data[:, None], train_labels[:, None]
# regular update -- non-private
def update(_, i, opt_state, batch):
params = get_params(opt_state)
return opt_update(i, grad(partial(loss, model))(params, batch), opt_state)
grad_fn = private_grad_no_vmap if args.no_vmap else private_grad
# differentially private update
def private_update(rng, i, opt_state, batch):
params = get_params(opt_state)
rng = random.fold_in(rng, i) # get new key for new random numbers
return opt_update(
i,
grad_fn(model, loss, params, batch, rng, args.l2_norm_clip, args.noise_multiplier,
args.batch_size), opt_state)
opt_state = opt_init(init_params)
itercount = itertools.count()
train_fn = private_update if args.dpsgd else update
if args.no_vmap:
print('No vmap for dpsgd!')
if not args.no_jit:
train_fn = jit(train_fn)
else:
print('No jit!')
dummy = jnp.array(1.)
timings = []
for epoch in range(1, args.epochs + 1):
start = time.perf_counter()
for i, batch in enumerate(data.dataloader(train_data, train_labels, args.batch_size)):
opt_state = train_fn(
key,
next(itercount),
opt_state,
batch,
)
(dummy * dummy).block_until_ready() # synchronize CUDA.
duration = time.perf_counter() - start
print("Time Taken: ", duration)
timings.append(duration)
if args.dpsgd:
print('Trained with DP SGD optimizer')
else:
print('Trained with vanilla non-private SGD optimizer')
if not args.no_save:
append_to_name = ''
if args.no_jit: append_to_name += '_nojit'
if args.no_vmap: append_to_name += '_novmap'
utils.save_runtimes(__file__.split('.')[0], args, timings, append_to_name)
else:
print('Not saving!')
print('Done!')
if __name__ == '__main__':
parser = utils.get_parser(model_dict.keys())
parser.add_argument('--no_vmap', dest='no_vmap', action='store_true')
parser.add_argument('--no_jit', dest='no_jit', action='store_true')
parser.add_argument('--dynamic_unroll', dest='dynamic_unroll', action='store_true')
args = parser.parse_args()
main(args)
| 11,056 | 35.734219 | 99 | py |
fast-dpsgd | fast-dpsgd-main/tf2dp.py | import time
from functools import partial
import tensorflow as tf
from tensorflow_privacy.privacy.analysis.gdp_accountant import (compute_eps_poisson,
compute_mu_poisson)
from jax.tree_util import tree_multimap
import data
import utils
def get_logreg_model(features, batch_size=None, **_):
return tf.keras.Sequential(
[tf.keras.Input(shape=features.shape[1:], batch_size=batch_size),
tf.keras.layers.Dense(1)])
def get_ffnn_model(features, batch_size=None, **_):
return tf.keras.Sequential([
tf.keras.Input(shape=features.shape[1:], batch_size=batch_size),
tf.keras.layers.Dense(50, activation='relu'),
tf.keras.layers.Dense(2, activation='relu')
])
def get_mnist_model(features, batch_size=None, **_):
return tf.keras.Sequential([
tf.keras.Input(shape=features.shape[1:], batch_size=batch_size),
tf.keras.layers.Conv2D(16, 8, strides=2, padding='same', activation='relu'),
tf.keras.layers.MaxPool2D(2, 1),
tf.keras.layers.Conv2D(32, 4, strides=2, padding='valid', activation='relu'),
tf.keras.layers.MaxPool2D(2, 1),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(10)
])
def get_imdb_model(features, max_features, args, batch_size=None, **_):
return tf.keras.Sequential([
tf.keras.Input(shape=features.shape[1:], batch_size=batch_size),
tf.keras.layers.Embedding(max_features + 4, 100),
tf.keras.layers.LSTM(100, return_sequences=True, unroll=(not args.no_unroll)),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(2)
])
def get_embedding_model(features, max_features, batch_size=None, **_):
return tf.keras.Sequential([
tf.keras.Input(shape=features.shape[1:], batch_size=batch_size),
tf.keras.layers.Embedding(max_features + 4, 16),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(2)
])
class CIFAR10Model(tf.keras.Model):
def __init__(self, features, batch_size=None, **_):
super().__init__()
layers = tf.keras.layers
self.layer_list = [
layers.Conv2D(32, (3, 3), padding='SAME', strides=(1, 1), activation='relu'),
layers.Conv2D(32, (3, 3), padding='SAME', strides=(1, 1), activation='relu'),
layers.AveragePooling2D(2, strides=2, padding='VALID'),
layers.Conv2D(64, (3, 3), padding='SAME', strides=(1, 1), activation='relu'),
layers.Conv2D(64, (3, 3), padding='SAME', strides=(1, 1), activation='relu'),
layers.AveragePooling2D(2, strides=2, padding='VALID'),
layers.Conv2D(128, (3, 3), padding='SAME', strides=(1, 1), activation='relu'),
layers.Conv2D(128, (3, 3), padding='SAME', strides=(1, 1), activation='relu'),
layers.AveragePooling2D(2, strides=2, padding='VALID'),
layers.Conv2D(256, (3, 3), padding='SAME', strides=(1, 1), activation='relu'),
layers.Conv2D(10, (3, 3), padding='SAME', strides=(1, 1)),
]
def call(self, x):
for layer in self.layer_list:
x = layer(x)
# print(x.shape)
return tf.reduce_mean(x, axis=(1, 2))
def reduce_noise_normalize_batch(args, stacked_grads):
summed_grads = tf.reduce_sum(input_tensor=stacked_grads, axis=0)
noise_stddev = args.l2_norm_clip * args.noise_multiplier
noise = tf.random.normal(tf.shape(input=summed_grads), stddev=noise_stddev)
noised_grads = summed_grads + noise
return noised_grads / tf.cast(args.microbatches, tf.float32)
def compute_per_eg_grad(model, optimizer, loss_fn, args, data):
features, labels = data
with tf.GradientTape() as tape:
# We need to add the extra dimension to features because model
# expects batched input.
logits = model(features[None])
loss = loss_fn(labels=labels, logits=tf.squeeze(logits))
grads_list = tape.gradient(
loss,
model.trainable_variables,
# This argument should not be necessary, but we include it in case:
unconnected_gradients=tf.UnconnectedGradients.ZERO)
# We expect grads_list to be flat already, but we use this structure to mirror TFP.
grads_flat = tf.nest.flatten(grads_list)
squared_l2_norms = [tf.reduce_sum(input_tensor=tf.square(g)) for g in grads_flat]
global_norm = tf.sqrt(tf.add_n(squared_l2_norms))
div = tf.maximum(global_norm / args.l2_norm_clip, 1.)
clipped_flat = [g / div for g in grads_flat]
clipped_grads = tf.nest.pack_sequence_as(grads_list, clipped_flat)
return loss, clipped_grads
def private_train_step(model, optimizer, loss_fn, args, data):
if args.no_vmap:
x, y = data
# Manually compute per-example gradients via a loop, then stack the results.
loss, clipped_grads = tree_multimap(
lambda *xs: tf.stack(xs),
*(compute_per_eg_grad(model, optimizer, loss_fn, args, (x[i], y[i]))
for i in range(x.shape[0])))
else:
loss, clipped_grads = tf.vectorized_map(
partial(compute_per_eg_grad, model, optimizer, loss_fn, args),
data) # , fallback_to_while_loop=False)
final_grads = tf.nest.map_structure(partial(reduce_noise_normalize_batch, args), clipped_grads)
optimizer.apply_gradients(zip(final_grads, model.trainable_variables))
return loss
def train_step(model, optimizer, loss_fn, args, data):
features, labels = data
with tf.GradientTape() as tape:
logits = model(features)
loss = tf.reduce_mean(loss_fn(labels=labels, logits=logits))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
def evaluate(model, test_data, test_labels):
# This function is unused.
loss_mean = tf.keras.metrics.Mean()
acc_mean = tf.keras.metrics.SparseCategoricalAccuracy()
for features, labels in zip(batch_gen(test_data), batch_gen(test_labels)):
loss, logits = compute_scalar_loss(model, features, labels)
loss_mean.update_state(loss)
acc_mean.update_state(labels, logits)
return {'loss': loss_mean.result(), 'accuracy': acc_mean.result()}
model_dict = {
'mnist': get_mnist_model,
'lstm': get_imdb_model,
'embed': get_embedding_model,
'ffnn': get_ffnn_model,
'logreg': get_logreg_model,
'cifar10': CIFAR10Model,
}
def main(args):
print(args)
if args.memory_limit: # Option to limit GPU memory.
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.config.experimental.set_virtual_device_configuration(
physical_devices[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=args.memory_limit)])
assert args.microbatches is None # Only support microbatches=1.
args.microbatches = args.batch_size
data_fn = data.data_fn_dict[args.experiment][int(args.dummy_data)]
kwargs = {
'max_features': args.max_features,
'max_len': args.max_len,
'format': 'NHWC',
}
if args.dummy_data:
kwargs['num_examples'] = args.batch_size * 2
(train_data, train_labels), _ = data_fn(**kwargs)
train_data, train_labels = tf.constant(train_data), tf.constant(train_labels)
num_train_eg = train_data[0].shape[0]
loss_fn = tf.nn.sparse_softmax_cross_entropy_with_logits
if args.experiment == 'logreg':
loss_fn = lambda labels, logits: tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=tf.squeeze(logits))
train_labels = tf.cast(train_labels, tf.float32)
model_bs = 1 if args.dpsgd else args.batch_size
model = model_dict[args.experiment](
train_data,
max_features=args.max_features,
# batch_size=model_bs,
args=args)
optimizer = tf.keras.optimizers.SGD(learning_rate=args.learning_rate)
train_fn = private_train_step if args.dpsgd else train_step
train_fn = partial(train_fn, model, optimizer, loss_fn, args)
if args.no_vmap:
print('No vmap for dpsgd!')
if args.no_jit:
print('No jit!')
else:
train_fn = tf.function(experimental_compile=args.use_xla)(train_fn)
with tf.device('GPU'):
dummy = tf.convert_to_tensor(1.) # we use this to force CUDA sychronization
timings = []
for epoch in range(1, args.epochs + 1):
start = time.perf_counter()
for i, batch in enumerate(data.dataloader(train_data, train_labels, args.batch_size)):
train_fn(batch)
_ = dummy.numpy() # forces a host->device transfer, synchronizing CUDA.
duration = time.perf_counter() - start
print("Time Taken: ", duration)
timings.append(duration)
if args.dpsgd:
# eps = compute_eps_poisson(epoch, args.noise_multiplier, num_train_eg, args.batch_size,
# 1e-5)
# mu = compute_mu_poisson(epoch, args.noise_multiplier, num_train_eg, args.batch_size)
# print('For delta=1e-5, the current epsilon is: %.2f' % eps)
# print('For delta=1e-5, the current mu is: %.2f' % mu)
print('Trained with DPSGD optimizer')
else:
print('Trained with vanilla non-private SGD optimizer')
if not args.no_save:
append_to_name = ''
if args.no_jit: append_to_name += '_nojit'
if args.no_vmap: append_to_name += '_novmap'
utils.save_runtimes(__file__.split('.')[0], args, timings, append_to_name)
else:
print('Not saving!')
print('Done!')
if __name__ == '__main__':
parser = utils.get_parser(model_dict.keys())
parser.add_argument('--xla', dest='use_xla', action='store_true')
parser.add_argument('--no_xla', dest='use_xla', action='store_false')
parser.add_argument('--memory_limit', default=None, type=int)
parser.add_argument('--no_unroll', dest='no_unroll', action='store_true')
parser.add_argument('--no_vmap', dest='no_vmap', action='store_true')
parser.add_argument('--no_jit', dest='no_jit', action='store_true')
args = parser.parse_args()
main(args)
| 10,368 | 39.346304 | 100 | py |
Subsets and Splits