repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/data/preprocessor.py | from __future__ import absolute_import
import os
import os.path as osp
from torch.utils.data import DataLoader, Dataset
import numpy as np
import random
import math
import torch
from PIL import Image
class Preprocessor(Dataset):
def __init__(self, dataset, root=None, transform=None, mutual=False):
super(Preprocessor, self).__init__()
self.dataset = []#dataset
for inds, item in enumerate(dataset):
self.dataset.append(item+(inds,))
self.root = root
self.transform = transform
self.mutual = mutual
def __len__(self):
return len(self.dataset)
def __getitem__(self, indices):
if self.mutual:
return self._get_mutual_item(indices)
else:
return self._get_single_item(indices)
def _get_single_item(self, index):
items = self.dataset[index] # fname, pid,pid1,pid2, camid, inds
fname, camid, inds =items[0],items[-2],items[-1]
pids = []
for i, pid in enumerate(items[1:-2]):
pids.append(pid)
fpath = fname
if self.root is not None:
fpath = osp.join(self.root, fname)
img = Image.open(fpath).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return [img, fname]+ pids+[ camid, inds]
def _get_mutual_item(self, index):
items = self.dataset[index] # fname, pid,pid1,pid2, camid, inds
fname, camid, inds = items[0], items[-2], items[-1]
pids = []
for i, pid in enumerate(items[1:-2]):
pids.append(pid)
fpath = fname
if self.root is not None:
fpath = osp.join(self.root, fname)
img_1 = Image.open(fpath).convert('RGB')
img_2 = img_1.copy()
if self.transform is not None:
img_1 = self.transform(img_1)
img_2 = self.transform(img_2)
return [img_1,img_2, fname] + pids + [camid, inds]
class UnsupervisedCamStylePreprocessor(Dataset):
def __init__(self, dataset, root=None, transform=None, num_cam=8, camstyle_dir='', mutual=False):
super(UnsupervisedCamStylePreprocessor, self).__init__()
self.dataset = []#dataset
for inds, item in enumerate(dataset):
self.dataset.append(item+(inds,))
self.root = root
self.transform = transform
self.mutual = mutual
self.num_cam = num_cam
self.camstyle_root = camstyle_dir
def __len__(self):
return len(self.dataset)
def __getitem__(self, indices):
if self.mutual:
return self._get_mutual_item(indices)
else:
return self._get_single_item(indices)
def _get_single_item(self, index):
items = self.dataset[index] # fname, pid,pid1,pid2, camid, inds
fname, camid, inds = items[0],items[-2],items[-1]
sel_cam = torch.randperm(self.num_cam)[0]
pids = []
for i, pid in enumerate(items[1:-2]):
pids.append(pid)
if sel_cam == camid:
fpath = osp.join(self.root, fname)
img = Image.open(fpath).convert('RGB')
else:
if 'msmt' in self.root:
fname = fname[:-4] + '_fake_' + str(sel_cam.numpy() + 1) + '.jpg'
else:
fname = fname[:-4] + '_fake_' + str(camid + 1) + 'to' + str(sel_cam.numpy() + 1) + '.jpg'
fpath = osp.join(self.camstyle_root, fname)
img = Image.open(fpath).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return [img, fname]+ pids+[ camid, inds]
def _get_mutual_item(self, index):
items = self.dataset[index] # fname, pid,pid1,pid2, camid, inds
fname, camid, inds = items[0], items[-2], items[-1]
pids = []
for i, pid in enumerate(items[1:-2]):
pids.append(pid)
fname_im = fname.split('/')[-1]
sel_cam = torch.randperm(self.num_cam)[0]
if sel_cam == camid:
try:
fpath = fname
except:
import ipdb
ipdb.set_trace()
img_1 = Image.open(fpath).convert('RGB')
else:
if 'msmt' in fname:
fname_im = fname_im[:-4] + '_fake_' + str(sel_cam.numpy() + 1) + '.jpg'
else:
fname_im = fname_im[:-4] + '_fake_' + str(camid + 1) + 'to' + str(sel_cam.numpy() + 1) + '.jpg'
fpath = osp.join(self.camstyle_root, fname_im)
img_1 = Image.open(fpath).convert('RGB')
img_2 = img_1.copy()
if self.transform is not None:
img_1 = self.transform(img_1)
img_2 = self.transform(img_2)
return [img_1,img_2, fpath] + pids + [camid, inds]
| 4,805 | 30.827815 | 111 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/data/functional_our.py | # encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
import numpy as np
import torch
from PIL import Image, ImageOps, ImageEnhance
def to_tensor(pic):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if isinstance(pic, np.ndarray):
assert len(pic.shape) in (2, 3)
# handle numpy array
if pic.ndim == 2:
pic = pic[:, :, None]
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
if isinstance(img, torch.ByteTensor):
return img.float()
else:
return img
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
elif pic.mode == 'F':
img = torch.from_numpy(np.array(pic, np.float32, copy=False))
elif pic.mode == '1':
img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float()
else:
return img
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / 10)
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval.
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / 10.
def sample_level(n):
return np.random.uniform(low=0.1, high=n)
def autocontrast(pil_img, *args):
return ImageOps.autocontrast(pil_img)
def equalize(pil_img, *args):
return ImageOps.equalize(pil_img)
def posterize(pil_img, level, *args):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(pil_img, level, *args):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(pil_img, level, *args):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(pil_img, level, image_size):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(image_size,
Image.AFFINE, (1, level, 0, 0, 1, 0),
resample=Image.BILINEAR)
def shear_y(pil_img, level, image_size):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(image_size,
Image.AFFINE, (1, 0, 0, level, 1, 0),
resample=Image.BILINEAR)
def translate_x(pil_img, level, image_size):
level = int_parameter(sample_level(level), image_size[0] / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(image_size,
Image.AFFINE, (1, 0, level, 0, 1, 0),
resample=Image.BILINEAR)
def translate_y(pil_img, level, image_size):
level = int_parameter(sample_level(level), image_size[1] / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(image_size,
Image.AFFINE, (1, 0, 0, 0, 1, level),
resample=Image.BILINEAR)
# operation that overlaps with ImageNet-C's test set
def color(pil_img, level, *args):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Color(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def contrast(pil_img, level, *args):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Contrast(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def brightness(pil_img, level, *args):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Brightness(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def sharpness(pil_img, level, *args):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Sharpness(pil_img).enhance(level)
augmentations_reid = [
autocontrast, equalize, posterize, shear_x, shear_y,
translate_x, translate_y, sharpness
]
augmentations = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y
]
augmentations_all = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y, color, contrast, brightness, sharpness
]
| 5,912 | 30.121053 | 79 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/data/transforms.py | from __future__ import absolute_import
__all__ = ['ToTensor', 'RandomErasing', 'RandomPatch', 'AugMix', 'ColorChange', ]
from torchvision.transforms import *
from PIL import Image
import random
import math
import numpy as np
import cv2
from collections import deque
from .functional_our import to_tensor, augmentations_reid, augmentations_all
class RectScale(object):
def __init__(self, height, width, interpolation=Image.BILINEAR):
self.height = height
self.width = width
self.interpolation = interpolation
def __call__(self, img):
w, h = img.size
if h == self.height and w == self.width:
return img
return img.resize((self.width, self.height), self.interpolation)
class RandomSizedRectCrop(object):
def __init__(self, height, width, interpolation=Image.BILINEAR):
self.height = height
self.width = width
self.interpolation = interpolation
def __call__(self, img):
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(0.64, 1.0) * area
aspect_ratio = random.uniform(2, 3)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert(img.size == (w, h))
return img.resize((self.width, self.height), self.interpolation)
# Fallback
scale = RectScale(self.height, self.width,
interpolation=self.interpolation)
return scale(img)
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(0.4914, 0.4822, 0.4465)):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) >= self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img
class RandomPatch(object):
"""Random patch data augmentation.
There is a patch pool that stores randomly extracted pathces from person images.
For each input image, RandomPatch
1) extracts a random patch and stores the patch in the patch pool;
2) randomly selects a patch from the patch pool and pastes it on the
input (at random position) to simulate occlusion.
Reference:
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
- Zhou et al. Learning Generalisable Omni-Scale Representations
for Person Re-Identification. arXiv preprint, 2019.
"""
def __init__(self, prob_happen=0.5, pool_capacity=50000, min_sample_size=100,
patch_min_area=0.01, patch_max_area=0.5, patch_min_ratio=0.1,
prob_rotate=0.5, prob_flip_leftright=0.5,
):
self.prob_happen = prob_happen
self.patch_min_area = patch_min_area
self.patch_max_area = patch_max_area
self.patch_min_ratio = patch_min_ratio
self.prob_rotate = prob_rotate
self.prob_flip_leftright = prob_flip_leftright
self.patchpool = deque(maxlen=pool_capacity)
self.min_sample_size = min_sample_size
def generate_wh(self, W, H):
area = W * H
for attempt in range(100):
target_area = random.uniform(self.patch_min_area, self.patch_max_area) * area
aspect_ratio = random.uniform(self.patch_min_ratio, 1. / self.patch_min_ratio)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < W and h < H:
return w, h
return None, None
def transform_patch(self, patch):
if random.uniform(0, 1) > self.prob_flip_leftright:
patch = patch.transpose(Image.FLIP_LEFT_RIGHT)
if random.uniform(0, 1) > self.prob_rotate:
patch = patch.rotate(random.randint(-10, 10))
return patch
def __call__(self, img):
if isinstance(img, np.ndarray):
img = Image.fromarray(img.astype(np.uint8))
W, H = img.size # original image size
# collect new patch
w, h = self.generate_wh(W, H)
if w is not None and h is not None:
x1 = random.randint(0, W - w)
y1 = random.randint(0, H - h)
new_patch = img.crop((x1, y1, x1 + w, y1 + h))
self.patchpool.append(new_patch)
if len(self.patchpool) < self.min_sample_size:
return img
if random.uniform(0, 1) > self.prob_happen:
return img
# paste a randomly selected patch on a random position
patch = random.sample(self.patchpool, 1)[0]
patchW, patchH = patch.size
x1 = random.randint(0, W - patchW)
y1 = random.randint(0, H - patchH)
patch = self.transform_patch(patch)
img.paste(patch, (x1, y1))
return img
class AugMix(object):
""" Perform AugMix augmentation and compute mixture.
Args:
aug_prob_coeff: Probability distribution coefficients.
mixture_width: Number of augmentation chains to mix per augmented example.
mixture_depth: Depth of augmentation chains. -1 denotes stochastic depth in [1, 3]'
severity: Severity of underlying augmentation operators (between 1 to 10).
"""
def __init__(self, aug_prob_coeff=1, mixture_width=3, mixture_depth=-1, severity=1):
self.aug_prob_coeff = aug_prob_coeff
self.mixture_width = mixture_width
self.mixture_depth = mixture_depth
self.severity = severity
self.aug_list = augmentations_reid
def __call__(self, image):
"""Perform AugMix augmentations and compute mixture.
Returns:
mixed: Augmented and mixed image.
"""
ws = np.float32(
np.random.dirichlet([self.aug_prob_coeff] * self.mixture_width))
m = np.float32(np.random.beta(self.aug_prob_coeff, self.aug_prob_coeff))
image = np.asarray(image, dtype=np.float32).copy()
mix = np.zeros_like(image)
for i in range(self.mixture_width):
image_aug = Image.fromarray(image.copy().astype(np.uint8))
depth = self.mixture_depth if self.mixture_depth > 0 else np.random.randint(1, 4)
for _ in range(depth):
op = np.random.choice(self.aug_list)
image_aug = op(image_aug, self.severity, (128, 256))
mix += ws[i] * np.asarray(image_aug, dtype=np.float32)
mixed = (1 - m) * image + m * mix
return mixed/255.0
class ColorChange(object):
"""docstring for do_color"""
def __init__(self, probability=0.5):
self.probability = probability
def do_brightness_shift(self, image, alpha=0.125):
image = image.astype(np.float32)
image = image + alpha * 255
image = np.clip(image, 0, 255).astype(np.uint8)
return image
def do_brightness_multiply(self, image, alpha=1):
image = image.astype(np.float32)
image = alpha * image
image = np.clip(image, 0, 255).astype(np.uint8)
return image
def do_contrast(self, image, alpha=1.0):
image = image.astype(np.float32)
gray = image * np.array([[[0.114, 0.587, 0.299]]]) # rgb to gray (YCbCr)
gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
image = alpha * image + gray
image = np.clip(image, 0, 255).astype(np.uint8)
return image
# https://www.pyimagesearch.com/2015/10/05/opencv-gamma-correction/
def do_gamma(self, image, gamma=1.0):
table = np.array([((i / 255.0) ** (1.0 / gamma)) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(image, table) # apply gamma correction using the lookup table
def do_clahe(self, image, clip=2, grid=16):
grid = int(grid)
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
gray, a, b = cv2.split(lab)
gray = cv2.createCLAHE(clipLimit=clip, tileGridSize=(grid, grid)).apply(gray)
lab = cv2.merge((gray, a, b))
image = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
return image
def __call__(self, image):
if random.uniform(0, 1) > self.probability:
return image
image = np.asarray(image, dtype=np.uint8).copy()
index = random.randint(0, 4)
if index == 0:
image = self.do_brightness_shift(image, 0.1)
elif index == 1:
image = self.do_gamma(image, 1)
elif index == 2:
image = self.do_clahe(image)
elif index == 3:
image = self.do_brightness_multiply(image)
elif index == 4:
image = self.do_contrast(image)
return image | 10,430 | 35.344948 | 96 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/evaluation_metrics/classification.py | from __future__ import absolute_import
import torch
from ..utils import to_torch
def accuracy(output, target, topk=(1,)):
with torch.no_grad():
output, target = to_torch(output), to_torch(target)
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
ret = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(dim=0, keepdim=True)
ret.append(correct_k.mul_(1. / batch_size))
return ret
| 604 | 26.5 | 77 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/evaluation_metrics/__init__.py | from __future__ import absolute_import
from .classification import accuracy
from .ranking import cmc, mean_ap
__all__ = [
'accuracy',
'cmc',
'mean_ap'
]
| 167 | 14.272727 | 38 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/evaluation_metrics/ranking.py | from __future__ import absolute_import
from collections import defaultdict
import numpy as np
from sklearn.metrics import average_precision_score
from ..utils import to_numpy
def _unique_sample(ids_dict, num):
mask = np.zeros(num, dtype=np.bool)
for _, indices in ids_dict.items():
i = np.random.choice(indices)
mask[i] = True
return mask
def cmc(distmat, query_ids=None, gallery_ids=None,
query_cams=None, gallery_cams=None, topk=100,
separate_camera_set=False,
single_gallery_shot=False,
first_match_break=False):
distmat = to_numpy(distmat)
m, n = distmat.shape
# Fill up default values
if query_ids is None:
query_ids = np.arange(m)
if gallery_ids is None:
gallery_ids = np.arange(n)
if query_cams is None:
query_cams = np.zeros(m).astype(np.int32)
if gallery_cams is None:
gallery_cams = np.ones(n).astype(np.int32)
# Ensure numpy array
query_ids = np.asarray(query_ids)
gallery_ids = np.asarray(gallery_ids)
query_cams = np.asarray(query_cams)
gallery_cams = np.asarray(gallery_cams)
# Sort and find correct matches
indices = np.argsort(distmat, axis=1)
matches = (gallery_ids[indices] == query_ids[:, np.newaxis])
# Compute CMC for each query
ret = np.zeros(topk)
num_valid_queries = 0
for i in range(m):
# Filter out the same id and same camera
valid = ((gallery_ids[indices[i]] != query_ids[i]) |
(gallery_cams[indices[i]] != query_cams[i]))
if separate_camera_set:
# Filter out samples from same camera
valid &= (gallery_cams[indices[i]] != query_cams[i])
if not np.any(matches[i, valid]): continue
if single_gallery_shot:
repeat = 10
gids = gallery_ids[indices[i][valid]]
inds = np.where(valid)[0]
ids_dict = defaultdict(list)
for j, x in zip(inds, gids):
ids_dict[x].append(j)
else:
repeat = 1
for _ in range(repeat):
if single_gallery_shot:
# Randomly choose one instance for each id
sampled = (valid & _unique_sample(ids_dict, len(valid)))
index = np.nonzero(matches[i, sampled])[0]
else:
index = np.nonzero(matches[i, valid])[0]
delta = 1. / (len(index) * repeat)
for j, k in enumerate(index):
if k - j >= topk: break
if first_match_break:
ret[k - j] += 1
break
ret[k - j] += delta
num_valid_queries += 1
if num_valid_queries == 0:
raise RuntimeError("No valid query")
return ret.cumsum() / num_valid_queries
def mean_ap(distmat, query_ids=None, gallery_ids=None,
query_cams=None, gallery_cams=None):
distmat = to_numpy(distmat)
m, n = distmat.shape
# Fill up default values
if query_ids is None:
query_ids = np.arange(m)
if gallery_ids is None:
gallery_ids = np.arange(n)
if query_cams is None:
query_cams = np.zeros(m).astype(np.int32)
if gallery_cams is None:
gallery_cams = np.ones(n).astype(np.int32)
# Ensure numpy array
query_ids = np.asarray(query_ids)
gallery_ids = np.asarray(gallery_ids)
query_cams = np.asarray(query_cams)
gallery_cams = np.asarray(gallery_cams)
# Sort and find correct matches
indices = np.argsort(distmat, axis=1)
matches = (gallery_ids[indices] == query_ids[:, np.newaxis])
# Compute AP for each query
aps = []
for i in range(m):
# Filter out the same id and same camera
valid = ((gallery_ids[indices[i]] != query_ids[i]) |
(gallery_cams[indices[i]] != query_cams[i]))
y_true = matches[i, valid]
y_score = -distmat[i][indices[i]][valid]
if not np.any(y_true): continue
aps.append(average_precision_score(y_true, y_score))
if len(aps) == 0:
raise RuntimeError("No valid query")
return np.mean(aps)
| 4,126 | 34.577586 | 72 | py |
yago3 | yago3-master/scripts/dumps/docopt.py | """Pythonic command-line interface parser that will make you smile.
* http://docopt.org
* Repository and issue-tracker: https://github.com/docopt/docopt
* Licensed under terms of MIT license (see LICENSE-MIT)
* Copyright (c) 2013 Vladimir Keleshev, [email protected]
"""
import sys
import re
__all__ = ['docopt']
__version__ = '0.6.2'
class DocoptLanguageError(Exception):
"""Error in construction of usage-message by developer."""
class DocoptExit(SystemExit):
"""Exit in case user invoked program with incorrect arguments."""
usage = ''
def __init__(self, message=''):
SystemExit.__init__(self, (message + '\n' + self.usage).strip())
class Pattern(object):
def __eq__(self, other):
return repr(self) == repr(other)
def __hash__(self):
return hash(repr(self))
def fix(self):
self.fix_identities()
self.fix_repeating_arguments()
return self
def fix_identities(self, uniq=None):
"""Make pattern-tree tips point to same object if they are equal."""
if not hasattr(self, 'children'):
return self
uniq = list(set(self.flat())) if uniq is None else uniq
for i, c in enumerate(self.children):
if not hasattr(c, 'children'):
assert c in uniq
self.children[i] = uniq[uniq.index(c)]
else:
c.fix_identities(uniq)
def fix_repeating_arguments(self):
"""Fix elements that should accumulate/increment values."""
either = [list(c.children) for c in self.either.children]
for case in either:
for e in [c for c in case if case.count(c) > 1]:
if type(e) is Argument or type(e) is Option and e.argcount:
if e.value is None:
e.value = []
elif type(e.value) is not list:
e.value = e.value.split()
if type(e) is Command or type(e) is Option and e.argcount == 0:
e.value = 0
return self
@property
def either(self):
"""Transform pattern into an equivalent, with only top-level Either."""
# Currently the pattern will not be equivalent, but more "narrow",
# although good enough to reason about list arguments.
ret = []
groups = [[self]]
while groups:
children = groups.pop(0)
types = [type(c) for c in children]
if Either in types:
either = [c for c in children if type(c) is Either][0]
children.pop(children.index(either))
for c in either.children:
groups.append([c] + children)
elif Required in types:
required = [c for c in children if type(c) is Required][0]
children.pop(children.index(required))
groups.append(list(required.children) + children)
elif Optional in types:
optional = [c for c in children if type(c) is Optional][0]
children.pop(children.index(optional))
groups.append(list(optional.children) + children)
elif AnyOptions in types:
optional = [c for c in children if type(c) is AnyOptions][0]
children.pop(children.index(optional))
groups.append(list(optional.children) + children)
elif OneOrMore in types:
oneormore = [c for c in children if type(c) is OneOrMore][0]
children.pop(children.index(oneormore))
groups.append(list(oneormore.children) * 2 + children)
else:
ret.append(children)
return Either(*[Required(*e) for e in ret])
class ChildPattern(Pattern):
def __init__(self, name, value=None):
self.name = name
self.value = value
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.value)
def flat(self, *types):
return [self] if not types or type(self) in types else []
def match(self, left, collected=None):
collected = [] if collected is None else collected
pos, match = self.single_match(left)
if match is None:
return False, left, collected
left_ = left[:pos] + left[pos + 1:]
same_name = [a for a in collected if a.name == self.name]
if type(self.value) in (int, list):
if type(self.value) is int:
increment = 1
else:
increment = ([match.value] if type(match.value) is str
else match.value)
if not same_name:
match.value = increment
return True, left_, collected + [match]
same_name[0].value += increment
return True, left_, collected
return True, left_, collected + [match]
class ParentPattern(Pattern):
def __init__(self, *children):
self.children = list(children)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(repr(a) for a in self.children))
def flat(self, *types):
if type(self) in types:
return [self]
return sum([c.flat(*types) for c in self.children], [])
class Argument(ChildPattern):
def single_match(self, left):
for n, p in enumerate(left):
if type(p) is Argument:
return n, Argument(self.name, p.value)
return None, None
@classmethod
def parse(class_, source):
name = re.findall('(<\S*?>)', source)[0]
value = re.findall('\[default: (.*)\]', source, flags=re.I)
return class_(name, value[0] if value else None)
class Command(Argument):
def __init__(self, name, value=False):
self.name = name
self.value = value
def single_match(self, left):
for n, p in enumerate(left):
if type(p) is Argument:
if p.value == self.name:
return n, Command(self.name, True)
else:
break
return None, None
class Option(ChildPattern):
def __init__(self, short=None, long=None, argcount=0, value=False):
assert argcount in (0, 1)
self.short, self.long = short, long
self.argcount, self.value = argcount, value
self.value = None if value is False and argcount else value
@classmethod
def parse(class_, option_description):
short, long, argcount, value = None, None, 0, False
options, _, description = option_description.strip().partition(' ')
options = options.replace(',', ' ').replace('=', ' ')
for s in options.split():
if s.startswith('--'):
long = s
elif s.startswith('-'):
short = s
else:
argcount = 1
if argcount:
matched = re.findall('\[default: (.*)\]', description, flags=re.I)
value = matched[0] if matched else None
return class_(short, long, argcount, value)
def single_match(self, left):
for n, p in enumerate(left):
if self.name == p.name:
return n, p
return None, None
@property
def name(self):
return self.long or self.short
def __repr__(self):
return 'Option(%r, %r, %r, %r)' % (self.short, self.long,
self.argcount, self.value)
class Required(ParentPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
l = left
c = collected
for p in self.children:
matched, l, c = p.match(l, c)
if not matched:
return False, left, collected
return True, l, c
class Optional(ParentPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
for p in self.children:
m, left, collected = p.match(left, collected)
return True, left, collected
class AnyOptions(Optional):
"""Marker/placeholder for [options] shortcut."""
class OneOrMore(ParentPattern):
def match(self, left, collected=None):
assert len(self.children) == 1
collected = [] if collected is None else collected
l = left
c = collected
l_ = None
matched = True
times = 0
while matched:
# could it be that something didn't match but changed l or c?
matched, l, c = self.children[0].match(l, c)
times += 1 if matched else 0
if l_ == l:
break
l_ = l
if times >= 1:
return True, l, c
return False, left, collected
class Either(ParentPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
outcomes = []
for p in self.children:
matched, _, _ = outcome = p.match(left, collected)
if matched:
outcomes.append(outcome)
if outcomes:
return min(outcomes, key=lambda outcome: len(outcome[1]))
return False, left, collected
class TokenStream(list):
def __init__(self, source, error):
self += source.split() if hasattr(source, 'split') else source
self.error = error
def move(self):
return self.pop(0) if len(self) else None
def current(self):
return self[0] if len(self) else None
def parse_long(tokens, options):
"""long ::= '--' chars [ ( ' ' | '=' ) chars ] ;"""
long, eq, value = tokens.move().partition('=')
assert long.startswith('--')
value = None if eq == value == '' else value
similar = [o for o in options if o.long == long]
if tokens.error is DocoptExit and similar == []: # if no exact match
similar = [o for o in options if o.long and o.long.startswith(long)]
if len(similar) > 1: # might be simply specified ambiguously 2+ times?
raise tokens.error('%s is not a unique prefix: %s?' %
(long, ', '.join(o.long for o in similar)))
elif len(similar) < 1:
argcount = 1 if eq == '=' else 0
o = Option(None, long, argcount)
options.append(o)
if tokens.error is DocoptExit:
o = Option(None, long, argcount, value if argcount else True)
else:
o = Option(similar[0].short, similar[0].long,
similar[0].argcount, similar[0].value)
if o.argcount == 0:
if value is not None:
raise tokens.error('%s must not have an argument' % o.long)
else:
if value is None:
if tokens.current() is None:
raise tokens.error('%s requires argument' % o.long)
value = tokens.move()
if tokens.error is DocoptExit:
o.value = value if value is not None else True
return [o]
def parse_shorts(tokens, options):
"""shorts ::= '-' ( chars )* [ [ ' ' ] chars ] ;"""
token = tokens.move()
assert token.startswith('-') and not token.startswith('--')
left = token.lstrip('-')
parsed = []
while left != '':
short, left = '-' + left[0], left[1:]
similar = [o for o in options if o.short == short]
if len(similar) > 1:
raise tokens.error('%s is specified ambiguously %d times' %
(short, len(similar)))
elif len(similar) < 1:
o = Option(short, None, 0)
options.append(o)
if tokens.error is DocoptExit:
o = Option(short, None, 0, True)
else: # why copying is necessary here?
o = Option(short, similar[0].long,
similar[0].argcount, similar[0].value)
value = None
if o.argcount != 0:
if left == '':
if tokens.current() is None:
raise tokens.error('%s requires argument' % short)
value = tokens.move()
else:
value = left
left = ''
if tokens.error is DocoptExit:
o.value = value if value is not None else True
parsed.append(o)
return parsed
def parse_pattern(source, options):
tokens = TokenStream(re.sub(r'([\[\]\(\)\|]|\.\.\.)', r' \1 ', source),
DocoptLanguageError)
result = parse_expr(tokens, options)
if tokens.current() is not None:
raise tokens.error('unexpected ending: %r' % ' '.join(tokens))
return Required(*result)
def parse_expr(tokens, options):
"""expr ::= seq ( '|' seq )* ;"""
seq = parse_seq(tokens, options)
if tokens.current() != '|':
return seq
result = [Required(*seq)] if len(seq) > 1 else seq
while tokens.current() == '|':
tokens.move()
seq = parse_seq(tokens, options)
result += [Required(*seq)] if len(seq) > 1 else seq
return [Either(*result)] if len(result) > 1 else result
def parse_seq(tokens, options):
"""seq ::= ( atom [ '...' ] )* ;"""
result = []
while tokens.current() not in [None, ']', ')', '|']:
atom = parse_atom(tokens, options)
if tokens.current() == '...':
atom = [OneOrMore(*atom)]
tokens.move()
result += atom
return result
def parse_atom(tokens, options):
"""atom ::= '(' expr ')' | '[' expr ']' | 'options'
| long | shorts | argument | command ;
"""
token = tokens.current()
result = []
if token in '([':
tokens.move()
matching, pattern = {'(': [')', Required], '[': [']', Optional]}[token]
result = pattern(*parse_expr(tokens, options))
if tokens.move() != matching:
raise tokens.error("unmatched '%s'" % token)
return [result]
elif token == 'options':
tokens.move()
return [AnyOptions()]
elif token.startswith('--') and token != '--':
return parse_long(tokens, options)
elif token.startswith('-') and token not in ('-', '--'):
return parse_shorts(tokens, options)
elif token.startswith('<') and token.endswith('>') or token.isupper():
return [Argument(tokens.move())]
else:
return [Command(tokens.move())]
def parse_argv(tokens, options, options_first=False):
"""Parse command-line argument vector.
If options_first:
argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ;
else:
argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ;
"""
parsed = []
while tokens.current() is not None:
if tokens.current() == '--':
return parsed + [Argument(None, v) for v in tokens]
elif tokens.current().startswith('--'):
parsed += parse_long(tokens, options)
elif tokens.current().startswith('-') and tokens.current() != '-':
parsed += parse_shorts(tokens, options)
elif options_first:
return parsed + [Argument(None, v) for v in tokens]
else:
parsed.append(Argument(None, tokens.move()))
return parsed
def parse_defaults(doc):
# in python < 2.7 you can't pass flags=re.MULTILINE
split = re.split('\n *(<\S+?>|-\S+?)', doc)[1:]
split = [s1 + s2 for s1, s2 in zip(split[::2], split[1::2])]
options = [Option.parse(s) for s in split if s.startswith('-')]
#arguments = [Argument.parse(s) for s in split if s.startswith('<')]
#return options, arguments
return options
def printable_usage(doc):
# in python < 2.7 you can't pass flags=re.IGNORECASE
usage_split = re.split(r'([Uu][Ss][Aa][Gg][Ee]:)', doc)
if len(usage_split) < 3:
raise DocoptLanguageError('"usage:" (case-insensitive) not found.')
if len(usage_split) > 3:
raise DocoptLanguageError('More than one "usage:" (case-insensitive).')
return re.split(r'\n\s*\n', ''.join(usage_split[1:]))[0].strip()
def formal_usage(printable_usage):
pu = printable_usage.split()[1:] # split and drop "usage:"
return '( ' + ' '.join(') | (' if s == pu[0] else s for s in pu[1:]) + ' )'
def extras(help, version, options, doc):
if help and any((o.name in ('-h', '--help')) and o.value for o in options):
print(doc.strip("\n"))
sys.exit()
if version and any(o.name == '--version' and o.value for o in options):
print(version)
sys.exit()
class Dict(dict):
def __repr__(self):
return '{%s}' % ',\n '.join('%r: %r' % i for i in sorted(self.items()))
def docopt(doc, argv=None, help=True, version=None, options_first=False):
"""Parse `argv` based on command-line interface described in `doc`.
`docopt` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv[1:] is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object
If passed, the object will be printed if --version is in
`argv`.
options_first : bool (default: False)
Set to True to require options preceed positional arguments,
i.e. to forbid options and positional arguments intermix.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docopt import docopt
>>> doc = '''
Usage:
my_program tcp <host> <port> [--timeout=<seconds>]
my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
my_program (-h | --help | --version)
Options:
-h, --help Show this screen and exit.
--baud=<n> Baudrate [default: 9600]
'''
>>> argv = ['tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docopt(doc, argv)
{'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* For video introduction see http://docopt.org
* Full documentation is available in README.rst as well as online
at https://github.com/docopt/docopt#readme
"""
if argv is None:
argv = sys.argv[1:]
DocoptExit.usage = printable_usage(doc)
options = parse_defaults(doc)
pattern = parse_pattern(formal_usage(DocoptExit.usage), options)
# [default] syntax for argument is disabled
#for a in pattern.flat(Argument):
# same_name = [d for d in arguments if d.name == a.name]
# if same_name:
# a.value = same_name[0].value
argv = parse_argv(TokenStream(argv, DocoptExit), list(options),
options_first)
pattern_options = set(pattern.flat(Option))
for ao in pattern.flat(AnyOptions):
doc_options = parse_defaults(doc)
ao.children = list(set(doc_options) - pattern_options)
#if any_options:
# ao.children += [Option(o.short, o.long, o.argcount)
# for o in argv if type(o) is Option]
extras(help, version, argv, doc)
matched, left, collected = pattern.fix().match(argv)
if matched and left == []: # better error message if left?
return Dict((a.name, a.value) for a in (pattern.flat() + collected))
raise DocoptExit()
| 19,946 | 33.391379 | 79 | py |
yago3 | yago3-master/scripts/dumps/downloadDumps.py | #!/usr/bin/env python
# encoding: utf-8
"""
Downloads Wikipedia, Wikidata and commonswiki dumps for the specified languages unless they are explicitly specified in the YAGO configuration file via the properties named "wikipedias", "wikidata" or "commons_wiki". For all dumps, the most recent version is downloaded unless an explicit date is set.
Usage:
downloadDumps.py -y YAGO_CONFIGURATION_FILE [(--date=DATE ...)] [--wikidata-date=WIKIDATA_DATE] [--commonswiki-date=COMMONSWIKI_DATE] [-s START_DATE]
Options:
-y YAGO_CONFIGURATION_FILE --yago-configuration-file=YAGO_CONFIGURATION_FILE the YAGO3 ini file that holds the configuration to be used
--date=DATE Date of the Wikipedia dump
--wikidata-date=WIKIDATA_DATE Date of the Wikidata dump
--commonswiki-date=COMMONSWIKI_DATE Date of the CommonsWiki dump
-s START_DATE --start-date=START_DATE Date from where the search for dumps starts backwards in time (default: today())
"""
from datetime import datetime
import getopt
import os
import re
import sys
import time
import requests
import shutil
import fileinput
import subprocess
from subprocess import PIPE, STDOUT
import inspect
from datetime import date, timedelta
from docopt import docopt
# Constants
DOWNLOAD_WIKIPEDIA_DUMP_SCRIPT = 'downloadWikipediaDump.sh'
DOWNLOAD_WIKIDATA_DUMP_SCRIPT = 'downloadWikidataDump.sh'
DOWNLOAD_COMMONSWIKI_DUMP_SCRIPT = 'downloadCommonsWikiDump.sh'
DOWNLOAD_GEONAMES_DUMP_SCRIPT = 'downloadGeonamesDump.sh'
WIKIPEDIA_DUMP_MAX_AGE_IN_DAYS = 365
YAGO3_ADAPTED_CONFIGURATION_EXTENSION = '.adapted.ini'
YAGO3_DUMPSFOLDER_PROPERTY = 'dumpsFolder'
YAGO3_LANGUAGES_PROPERTY = 'languages'
YAGO3_WIKIPEDIAS_PROPERTY = 'wikipedias'
YAGO3_WIKIDATA_PROPERTY = 'wikidata'
YAGO3_COMMONSWIKI_PROPERTY = "commons_wiki"
YAGO3_GEONAMES_PROPERTY = "geonames"
WIKIPEDIA_DUMPS_PAGE = 'https://dumps.wikimedia.org/'
WIKIDATA_DUMPS_PAGE = 'https://dumps.wikimedia.org/wikidatawiki/entities/'
COMMONSWIKI_DUMP_PAGE = 'https://dumps.wikimedia.org/commonswiki/'
WIKIDATA_DIR = 'wikidatawiki'
COMMONSWIKI_DIR = 'commonswiki'
GEONAMES_DIR = 'geonames'
# Initialize variables
dumpsFolder = None
languages = None
wikipedias = None
wikipediaIds = None
wikidata = None
wikidataUrl = None
commons_wiki = None
commonsWikiUrl = None
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def execute(cmd, customEnv=None):
process = subprocess.Popen(cmd, stdout=PIPE, stderr=STDOUT, universal_newlines=True, env=customEnv)
for line in iter(process.stdout.readline, ""):
print(line)
process.stdout.close()
return_code = process.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
def main(argv=None):
global dumpsFolder, languages, wikipedias, wikidata, wikipediaIds
print("Loading YAGO configuration...")
loadYagoConfiguration()
if wikipedias == None:
print("Downloading Wikipedia dump(s)...")
wikipedias = downloadWikipediaDumps(languages)
else:
print("Wikipedia dump(s) already present.")
wikipediaIds = getWikipediaIdsFromFile(wikipedias)
if (wikidata == None):
print("Downloading Wikidata dump(s)...")
downloadWikidataDumps()
else:
print("Wikidata dump(s) already present.")
if commons_wiki == None:
print("Downloading CommonsWiki dump...")
downloadCommonsWikiDump()
else:
print("CommonsWiki dump already present.")
downloadGeonames()
print("Adapting the YAGO3 configuration...")
adaptYagoConfiguration()
print("Wikipedia, Wikidata and Commonswiki dumps are ready.")
"""
Loads the YAGO configuration file.
"""
def loadYagoConfiguration():
global dumpsFolder, languages, wikipedias, wikidata, commons_wiki, geonames
for line in fileinput.input(yagoConfigurationFile):
if re.match('^' + YAGO3_DUMPSFOLDER_PROPERTY + '\s*=', line):
dumpsFolder = re.sub(r'\s', '', line).split("=")[1]
# Make sure the folder exists
if not os.path.exists(dumpsFolder):
os.makedirs(dumpsFolder)
elif re.match('^' + YAGO3_LANGUAGES_PROPERTY + '\s*=', line):
languages = re.sub(r'\s', '', line).split("=")[1].split(",")
elif re.match('^' + YAGO3_WIKIPEDIAS_PROPERTY + '\s*=', line):
wikipedias = re.sub(r'\s', '', line).split("=")[1].split(",")
elif re.match('^' + YAGO3_COMMONSWIKI_PROPERTY + '\s*=', line):
commons_wiki = re.sub(r'\s', '', line).split("=")[1]
elif re.match('^' + YAGO3_WIKIDATA_PROPERTY + '\s*=', line):
wikidata = re.sub(r'\s', '', line).split("=")[1]
elif re.match('^' + YAGO3_GEONAMES_PROPERTY + '\s*=', line):
geonames = re.sub(r'\s', '', line).split("=")[1]
if languages == None:
print("ERROR: 'languages' is a mandatory property and must be set in the configuration file.")
sys.exit(1)
if (wikipedias == None or wikidata == None or commons_wiki == None) and dumpsFolder == None:
print("ERROR: Some resources require downloading dumps before YAGO can be run. You must set the 'dumpsFolder' property in the configuration file.")
sys.exit(1)
"""
Invokes the external shell script for downloading and extracting the Wikipedia dumps.
"""
def downloadWikipediaDumps(languages):
global dumpsFolder
global wikipediaIds
# Determine the most recent Wikipedia dump versions.
urls = getWikipediaDumpUrls(languages)
execute(
[os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), DOWNLOAD_WIKIPEDIA_DUMP_SCRIPT), dumpsFolder, ' '.join(urls)])
wikipediaIds = getWikipediaIds(urls)
return getWikipedias(urls)
"""
Duplicates the YAGO3 template ini file and adapts the properties as necessary
"""
def adaptYagoConfiguration():
global wikipedias
global wikipediaIds
wikipediasDone = False
wikidataDone = False
commonsWikiDone = False
geonamesDone = False
yagoAdaptedConfigurationFile = os.path.join(
os.path.dirname(yagoConfigurationFile), os.path.basename(yagoConfigurationFile) + YAGO3_ADAPTED_CONFIGURATION_EXTENSION)
shutil.copy(yagoConfigurationFile, yagoAdaptedConfigurationFile)
for line in fileinput.input(yagoAdaptedConfigurationFile, inplace=1):
if re.match('^' + YAGO3_WIKIPEDIAS_PROPERTY + '\s*=', line):
wikipediasDone = True
elif re.match('^' + YAGO3_WIKIDATA_PROPERTY + '\s*=', line):
wikidataDone = True
elif re.match('^' + YAGO3_COMMONSWIKI_PROPERTY + '\s*=', line):
commonsWikiDone = True
elif re.match('^' + YAGO3_GEONAMES_PROPERTY + '\s*=', line):
geonamesDone = True
# Write the (possibly modified) line back to the configuration file
sys.stdout.write(line)
# If the values couldn't be replaced because the property wasn't in the configuration yet, add it.
with open(yagoAdaptedConfigurationFile, "a") as configFile:
# Make sure to start a new line first
configFile.write('\n')
if wikipediasDone == False:
configFile.write(YAGO3_WIKIPEDIAS_PROPERTY + ' = ' + ','.join(wikipedias) + '\n')
if wikidataDone == False:
configFile.write(YAGO3_WIKIDATA_PROPERTY + ' = ' + getWikidata() + '\n')
if commonsWikiDone == False:
configFile.write(YAGO3_COMMONSWIKI_PROPERTY + ' = ' + getCommonsWiki() + '\n')
if geonamesDone == False:
configFile.write(YAGO3_GEONAMES_PROPERTY + ' = ' + getGeonames() + '\n')
"""
Converts from ISO 639-1 into ISO 639-2 format. For creating the mapping, we referred to this website:
https://www.loc.gov/standards/iso639-2/php/code_list.php
"""
def getThreeLetterLanguageCode(twoLetterCode):
codeTable = {
'ar': "ara",
'de': "deu",
'en': "eng",
'fr': "fra",
'it': "ita",
'jp': "jpn",
'es': "spa",
'pt': "por",
'ru': "rus",
'zh': "zho"
}
return codeTable.get(twoLetterCode, "xx")
"""
Convenience method for getting languageId + date identifiers from Wikipedia dump URLs.
"""
def getWikipediaIds(urls):
wpIds = []
for url in urls:
wpIds.append(getLanguage(url) + getFormattedDate(url))
return wpIds
"""
Convenience method for getting languageId + date identifiers from Wikipedia dump file paths.
"""
def getWikipediaIdsFromFile(files):
wpIds = []
for file in files:
wpIds.append(getLanguageFromFile(file) + getFormattedDateFromFile(file))
return wpIds
"""
Constructs the database ID from a set of Wikipedia URLs.
"""
def getDatabaseId(urls):
languages = []
dates = []
for url in urls:
languages.append(getLanguage(url))
dates.append(getFormattedDate(url))
return max(dates) + '_' + '_'.join(sorted(languages))
"""
Convenience method for getting the ISO iso 639-1 language code out of a Wikipedia dump URL.
"""
def getLanguage(url):
return url[28:30]
"""
Convenience method for getting the date string out of a Wikipedia dump URL.
"""
def getFormattedDate(url):
return url[35:43]
"""
Convenience method for getting the ISO iso 639-1 language code out of a Wikipedia dump file path.
"""
def getLanguageFromFile(filePath):
return filePath[len(filePath)-34:len(filePath)-32]
"""
Convenience method for getting the date string out of a Wikipedia dump file path.
"""
def getFormattedDateFromFile(filePath):
return filePath[len(filePath)-27:len(filePath)-19]
"""
Convenience method for getting the ISO iso 639-1 language code out of a Wikipedia dump URL.
"""
def getExtractedFilename(url):
return url[44:-4]
"""
Gets a list of URLs that point to the most recent Wikipedia dump versions for the specified list of languages
"""
def getWikipediaDumpUrls(languages):
urls = []
for i in range(0, len(languages)):
language = languages[i]
# If a fixed data is set, use exactly this one.
if len(dates) > i and dates[i]:
dumpDate = datetime.strptime(dates[i], '%Y%m%d')
formattedDumpDate = dumpDate.strftime("%Y%m%d")
url = WIKIPEDIA_DUMPS_PAGE + language + 'wiki/' + formattedDumpDate + '/' + language + 'wiki-' + formattedDumpDate + '-pages-articles.xml.bz2'
r = requests.head(url)
if (r.status_code == 200):
print("Latest Wikipedia dump for " + language + ": " + formattedDumpDate)
urls.append(url)
elif os.path.isfile(getWikipedias([url])[0]):
print("Wikipedia dump for " + language + ": " + formattedDumpDate + " available offline")
urls.append(url)
else:
print("ERROR: No Wikipedia dump found (neither remotely nor in local cache) for language " + language + " and date " + formattedDumpDate + ".")
sys.exit(1)
else:
dumpDate = startDate
while True:
formattedDumpDate = dumpDate.strftime("%Y%m%d")
url = WIKIPEDIA_DUMPS_PAGE + language + 'wiki/' + formattedDumpDate + '/' + language + 'wiki-' + formattedDumpDate + '-pages-articles.xml.bz2'
r = requests.head(url)
if (r.status_code == 200):
print("Latest Wikipedia dump for " + language + ": " + formattedDumpDate)
urls.append(url)
break
elif os.path.isfile(getWikipedias([url])[0]):
print("Wikipedia dump for " + language + ": " + formattedDumpDate + " available offline")
urls.append(url)
break
else:
if (startDate - dumpDate).days <= WIKIPEDIA_DUMP_MAX_AGE_IN_DAYS:
dumpDate -= timedelta(days=1)
else:
print("ERROR: No Wikipedia dump found (neither remotely nor in local cache) for language " + language + " (oldest dump date tried was " + formattedDumpDate + ").")
sys.exit(1)
return urls
"""
Gets a list of Wikipedia dump filenames (*-pages-articles.xml) in the specified YAGO target folder
"""
def getWikipedias(urls):
wps = []
for url in urls:
wps.append(os.path.join(dumpsFolder, getLanguage(url), getFormattedDate(url), getExtractedFilename(url)))
return wps
"""
Get the URL to the most recent wikidata dump
"""
def getWikidataUrl():
resultUrl = None
# Use the given date if it is available.
if wikidataDate:
dumpDate = datetime.strptime(wikidataDate, "%Y%m%d")
formattedDumpDate = dumpDate.strftime("%Y%m%d")
url= WIKIDATA_DUMPS_PAGE + formattedDumpDate + '/wikidata-' + formattedDumpDate + '-all-BETA.ttl.bz2'
r = requests.head(url)
if (r.status_code == requests.codes.ok):
print("Wikidata dump is available for the given date: " + formattedDumpDate)
resultUrl = url
elif os.path.isfile(os.path.join(dumpsFolder, WIKIDATA_DIR, formattedDumpDate, 'wikidata-' + formattedDumpDate + '-all-BETA.ttl')):
print("Wikidata dump exist: " + formattedDumpDate)
resultUrl = url
else:
print("ERROR: No Wikidata dump file found (neither remotely nor in local cache) for date: " + formattedDumpDate)
sys.exit(1)
else:
dumpDate = startDate
while True:
formattedDumpDate = dumpDate.strftime("%Y%m%d")
url= WIKIDATA_DUMPS_PAGE + formattedDumpDate + '/wikidata-' + formattedDumpDate + '-all-BETA.ttl.bz2'
r = requests.head(url)
if (r.status_code == requests.codes.ok):
print("Latest Wikidata dump: " + formattedDumpDate)
resultUrl = url
break
elif os.path.isfile(os.path.join(dumpsFolder, WIKIDATA_DIR, formattedDumpDate, 'wikidata-' + formattedDumpDate + '-all-BETA.ttl')):
print("Wikidata dump exist: " + formattedDumpDate)
resultUrl = url
break
elif (startDate - dumpDate).days <= WIKIPEDIA_DUMP_MAX_AGE_IN_DAYS:
dumpDate -= timedelta(days = 1)
else:
print("ERROR: No Wikidata dump file found (neither remotely nor in local cache), oldest dump date tried was: " + formattedDumpDate)
sys.exit(1)
return resultUrl
"""
Invokes the external shell script for downloading and extracting the Wikidata dump
"""
def downloadWikidataDumps():
global wikidataUrl
# Determine the most recent Wikidata dump version.
wikidataUrl = getWikidataUrl()
execute(
[os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), DOWNLOAD_WIKIDATA_DUMP_SCRIPT),
dumpsFolder, wikidataUrl])
"""
Invokes the external shell script for downloading and extracting the Commonswiki dump
"""
def downloadCommonsWikiDump():
global commonsWikiUrl
# Determine the most recent CommonsWiki dump version
commonsWikiUrl = getCommonsWikiUrl()
subprocess.call(
[os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), DOWNLOAD_COMMONSWIKI_DUMP_SCRIPT),
dumpsFolder, commonsWikiUrl])
"""
Invokes the external shell script for downloading and extracting Geonames dump
"""
def downloadGeonames():
subprocess.call(
[os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), DOWNLOAD_GEONAMES_DUMP_SCRIPT),
dumpsFolder,
startDate.strftime("%Y%m%d")])
"""
Gets the url that point to the most recent Commonswiki dump version
"""
def getCommonsWikiUrl():
resultUrl = None
# Use the given date if it is available.
if commonswikiDate:
dumpDate = datetime.strptime(commonswikiDate, "%Y%m%d")
formattedDumpDate = dumpDate.strftime("%Y%m%d")
url= COMMONSWIKI_DUMP_PAGE + formattedDumpDate + '/commonswiki-' + formattedDumpDate + '-pages-articles.xml.bz2'
r = requests.head(url)
if (r.status_code == requests.codes.ok):
print("Commonswiki dump is available for the given date: " + formattedDumpDate)
resultUrl = url
elif os.path.isfile(os.path.join(dumpsFolder, COMMONSWIKI_DIR, formattedDumpDate, 'commonswiki-' + formattedDumpDate + '-pages-articles.xml')):
print("Commonswiki dump exist: " + formattedDumpDate)
resultUrl = url
else:
print("ERROR: No Commonswiki dump file found (neither remotely nor in local cache) for date: " + formattedDumpDate)
sys.exit(1)
else:
dumpDate = startDate
while True:
formattedDumpDate = dumpDate.strftime("%Y%m%d")
url= COMMONSWIKI_DUMP_PAGE + formattedDumpDate + '/commonswiki-' + formattedDumpDate + '-pages-articles.xml.bz2'
r = requests.head(url)
if (r.status_code == requests.codes.ok):
print("Latest Commonswiki dump: " + formattedDumpDate)
resultUrl = url
break
elif os.path.isfile(os.path.join(dumpsFolder, COMMONSWIKI_DIR, formattedDumpDate, 'commonswiki-' + formattedDumpDate + '-pages-articles.xml')):
print("Commonswiki dump exist: " + formattedDumpDate)
resultUrl = url
break
elif (startDate - dumpDate).days <= WIKIPEDIA_DUMP_MAX_AGE_IN_DAYS:
dumpDate -= timedelta(days = 1)
else:
print("ERROR: No Commonswiki dump file found (neither remotely nor in local cache), oldest dump date tried was: " + formattedDumpDate)
sys.exit(1)
return resultUrl
"""
Gets the path to wikidata dump
"""
def getWikidata():
global wikidataUrls
date = wikidataUrl[50:58]
return os.path.join(dumpsFolder, WIKIDATA_DIR, date, 'wikidata-' + date + '-all-BETA.ttl' )
"""
Gets the path to wikidata dump
"""
def getCommonsWiki():
global commonsWikiUrl
date = commonsWikiUrl[40:48]
return os.path.join(dumpsFolder, COMMONSWIKI_DIR, date, 'commonswiki-' + date + '-pages-articles.xml')
"""
Gets the path to wikidata dump
"""
def getGeonames():
# There will always be a folder which is named according to startDate. Either it's a symlink or a real dir,
# but this doesn't matter here.
return os.path.join(dumpsFolder, GEONAMES_DIR, startDate.strftime("%Y-%m-%d"))
if __name__ == "__main__":
# parse options
options = docopt(__doc__)
dates = options['--date']
wikidataDate = options['--wikidata-date']
commonswikiDate = options['--commonswiki-date']
yagoConfigurationFile = options['--yago-configuration-file']
# Read optional arguments with dynamic defaults
if options['--start-date']:
startDate = datetime.strptime(options['--start-date'], '%Y%m%d')
else:
startDate = datetime.today()
sys.exit(main())
| 17,912 | 32.357542 | 301 | py |
RefNet | RefNet-master/inference.py | from __future__ import print_function
import os
import tensorflow as tf
import greed_search
import data
import util
import evaluate
import json
import glob
import shutil
FLAGS = tf.app.flags.FLAGS
class Inference:
"""greed search decoder."""
def __init__(self, model, batcher, vocab, ckpt_path):
self._model = model
self._model.build_graph()
self._batcher = batcher
self._vocab = vocab
self.ckpt_path = ckpt_path
self._saver = tf.train.Saver()
self._sess = tf.Session(config=util.get_config())
self._saver.restore(self._sess, self.ckpt_path)
print("load mode from %s" % self.ckpt_path)
self.model_num = self.ckpt_path.split('-')[-1]
ckpt_name = "ckpt-" + self.model_num # this is something of the form "ckpt-123456"
self._decode_dir = os.path.join(FLAGS.log_root, get_infer_dir(ckpt_name))
# Make the decode dir if necessary
if not os.path.exists(self._decode_dir):
os.mkdir(self._decode_dir)
else:
raise Exception("infer directory %s should not already exist")
def infer(self):
"""Decode examples until data is exhausted (if FLAGS.single_pass) and return"""
counter = 0
output = {}
while True:
batch = self._batcher.next_batch() # 1 example repeated across batch
if batch is None: # finished decoding dataset in single_pass mode
print("Decoder has finished reading dataset for single_pass.")
# log original information
with open(os.path.join(self._decode_dir, "output.json"), 'w', encoding='utf-8') as w:
json.dump(output, w)
print("Output has been saved in %s." % self._decode_dir)
#start evaluation
evaluate.main(self.ckpt_path, FLAGS.log_root, self._decode_dir, FLAGS.mode, FLAGS.multi_label_eval)
return
background_span = data.show_background_span(batch.original_backgrounds_token[0], batch.original_b_starts[0], batch.original_b_ends[0])
response_span = data.show_background_span(batch.original_responses_token[0], batch.original_r_starts[0], batch.original_r_ends[0])
# Run greed search to get best Hypothesis
best_hyp = greed_search.run_greed_search(self._sess, self._model, self._vocab, batch)
best_hyp.tokens = [token for token in best_hyp.tokens if token not in [None]]
# Extract the output ids from the hypothesis and convert back to words
output_ids = best_hyp.tokens[1:]
decoded_token, highlights_decoded_token, spans = data.outputids2words(output_ids, self._vocab, batch.bac_oovs[0], batch.original_backgrounds_token[0])
if output_ids[-1] == 3:
output_ids_semantic = output_ids[:(len(output_ids)-1)]
else:
output_ids_semantic = output_ids
ids_for_print = [str(i)for i in output_ids_semantic]
ids_for_print = ' '.join(ids_for_print)
switch_ref_probs = best_hyp.switch_ref_probs
switch_ref_probs = [str(i) for i in switch_ref_probs]
switch_ref_probs = ' '.join(switch_ref_probs)
switch_gen_probs = best_hyp.switch_gen_probs
switch_gen_probs = [str(i) for i in switch_gen_probs]
switch_gen_probs = ' '.join(switch_gen_probs)
switch_gen_pred_probs = best_hyp.switch_gen_pred_probs
switch_gen_pred_probs = [str(i) for i in switch_gen_pred_probs]
switch_gen_pred_probs = ' '.join(switch_gen_pred_probs)
switch_gen_copy_probs = best_hyp.switch_gen_copy_probs
switch_gen_copy_probs = [str(i) for i in switch_gen_copy_probs]
switch_gen_copy_probs = ' '.join(switch_gen_copy_probs)
# Remove the [STOP] token from decoded_words, if necessary
try:
fst_stop_idx = decoded_token.index(data.STOP_DECODING) # index of the (first) [STOP] symbol
fst_stop_idx1 = highlights_decoded_token.index(data.STOP_DECODING)
decoded_token = decoded_token[:fst_stop_idx]
highlights_decoded_token = highlights_decoded_token[:fst_stop_idx1]
if len(decoded_token) == 0:
decoded_token.append(".")
except ValueError:
decoded_token = decoded_token
highlights_decoded_token = highlights_decoded_token
spans_output = ' '.join(spans)
decoded_output = ' '.join(decoded_token)
highlights_decoded_output = ' '.join(highlights_decoded_token)
output[batch.original_example_ids[0]] = {"background": background_span, "context": batch.original_contexts[0], "highlights_ref_response": response_span,
"highlights_inferred_response": highlights_decoded_output, "ref_response": batch.original_responses[0],
"inferred_response": decoded_output, "ref_span": batch.original_spans[0],"inferred_spans": spans_output, "output_index": output_ids_semantic,
"switch_ref_probs": switch_ref_probs, "switch_gen_probs": switch_gen_probs,
"switch_gen_pred_probs": switch_gen_pred_probs,"switch_gen_copy_probs": switch_gen_copy_probs}
self.write_for_observation(batch.original_example_ids[0], background_span, batch.original_contexts[0], response_span, highlights_decoded_output, ids_for_print, switch_ref_probs, switch_gen_probs, switch_gen_pred_probs, switch_gen_copy_probs, counter)
counter += 1 # this is how many examples we've decoded
def write_for_observation(self, example_ids, background, contexts, ref_response, decoded_output, ids_for_print, switch_ref_probs, switch_gen_probs, switch_gen_pred_probs, switch_gen_copy_probs, ex_index):
ref_file = os.path.join(self._decode_dir, "%s_%s_Inferred_Examples.txt" % (self.model_num, FLAGS.mode))
with open(ref_file, "a", encoding="utf-8") as f:
f.write("Example_ids:\n" + example_ids + "\n\n")
f.write("Background:\n"+ background+"\n\n")
f.write("Context:\n"+contexts + "\n\n")
f.write("Reference_response:\n"+ ref_response + "\n\n")
f.write("Inferenced_response:\n" + decoded_output+"\n\n")
f.write("Ids_for_print:\n" + ids_for_print + "\n\n")
f.write("Switch_Ref_Probs:\n" + switch_ref_probs + "\n\n")
f.write("Switch_Gen_Probs:\n" + switch_gen_probs + "\n\n")
f.write("Switch_Gen_Pred_Probs:\n" + switch_gen_pred_probs + "\n\n")
f.write("Switch_Gen_Copy_Probs:\n" + switch_gen_copy_probs+ "\n\n\n\n")
print("Wrote %s example %i to file" % (self.ckpt_path, ex_index))
def get_infer_dir(ckpt_name):
if "val" in FLAGS.mode:
dataset = "Validation"
elif "test" in FLAGS.mode:
dataset = "Test"
dirname = "%s_Infer" % dataset
if ckpt_name is not None:
dirname += "_%s" % ckpt_name
return dirname
| 7,162 | 48.743056 | 262 | py |
RefNet | RefNet-master/greed_search.py | import tensorflow as tf
import data
FLAGS = tf.app.flags.FLAGS
class Hypothesis:
"""Class to represent a hypothesis during beam search. Holds all the information needed for the hypothesis."""
def __init__(self, tokens, probs, state, attn_dists, switch_ref_probs, switch_gen_probs, switch_gen_pred_probs, switch_gen_copy_probs):
self.tokens = tokens
self.probs = probs
self.state = state
self.attn_dists = attn_dists
self.switch_ref_probs = switch_ref_probs
self.switch_gen_probs = switch_gen_probs
self.switch_gen_pred_probs = switch_gen_pred_probs
self.switch_gen_copy_probs = switch_gen_copy_probs
def extend(self, token, prob, state, attn_dist, switch_ref_prob, switch_gen_prob, switch_gen_pred_prob, switch_gen_copy_prob):
return Hypothesis(tokens=self.tokens + [token],
probs=self.probs + [prob],
state=state,
attn_dists=self.attn_dists + [attn_dist],
switch_ref_probs=self.switch_ref_probs + [switch_ref_prob],
switch_gen_probs=self.switch_gen_probs + [switch_gen_prob],
switch_gen_pred_probs=self.switch_gen_pred_probs + [switch_gen_pred_prob],
switch_gen_copy_probs=self.switch_gen_copy_probs + [switch_gen_copy_prob])
@property
def latest_token(self):
return self.tokens[-1]
def run_greed_search(sess, model, vocab, batch):
enc_batch, enc_states, que_states, dec_in_state = model.run_encoder(sess, batch)
hyp = Hypothesis(tokens=[vocab.word2id(data.START_DECODING)], probs=[], state=dec_in_state, attn_dists=[], switch_ref_probs=[], switch_gen_probs=[], switch_gen_pred_probs=[], switch_gen_copy_probs=[])
steps = 0
while True:
latest_token = hyp.latest_token
if isinstance(latest_token, list):
span_length = latest_token[1]-latest_token[0]+1
mask_lenth = span_length - 1
for i in range(mask_lenth):
mask_one_token = [[enc_batch[0][latest_token[0]+i]]]
state = hyp.state
(_, _, _, _, _, _, _,_,_,new_state) = model.inference_step(sess=sess, batch=batch, latest_tokens=mask_one_token, bac_states=enc_states, que_states=que_states, dec_init_states=state)
hyp = hyp.extend(token=None, prob=None, state=new_state, attn_dist="<mask>", switch_ref_prob="<mask>", switch_gen_prob="<mask>", switch_gen_pred_prob="<mask>", switch_gen_copy_prob="<mask>")
latest_token = [[enc_batch[0][latest_token[1]]]]
else:
latest_token = [[latest_token if latest_token in range(vocab.size()) else vocab.word2id(data.UNKNOWN_TOKEN)]]
state = hyp.state
# Run one step of the decoder to get the new info
(word_ids, word_probs, span_ids, span_probs, switch_ref_prob, switch_gen_prob, switch_gen_pred_prob, switch_gen_copy_prob, attn_dist, new_state) = model.inference_step(sess=sess, batch=batch, latest_tokens=latest_token, bac_states=enc_states, que_states=que_states, dec_init_states=state)
# span level
if switch_ref_prob >= switch_gen_prob:
token = span_ids
prob = span_probs
step = span_ids[1]-span_ids[0] + 1
# word level
else:
token = word_ids # int
prob = word_probs # float
step = 1
# Extend each hypothesis and collect them all in all_hyps
hyp = hyp.extend(token=token, prob=prob, state=new_state, attn_dist=attn_dist, switch_ref_prob=switch_ref_prob, switch_gen_prob=switch_gen_prob, switch_gen_pred_prob=switch_gen_pred_prob, switch_gen_copy_prob=switch_gen_copy_prob)
steps += step
# Filter and collect any hypotheses that have produced the end token.
if hyp.latest_token == vocab.word2id(data.STOP_DECODING): # if stop token is reached...
break
if steps >= FLAGS.max_dec_steps:
break
# Return the hypothesis with highest average log prob
return hyp
| 4,122 | 46.390805 | 296 | py |
RefNet | RefNet-master/batcher.py | """This file contains code to process data into batches"""
import queue
from random import shuffle
from threading import Thread
import time
import numpy as np
from collections import namedtuple
import tensorflow as tf
import data
class Example:
"""Class representing a train/val/test example for response generation.
"""
def __init__(self, background_text, context_text, response_text, span_text, b_start, b_end, r_start, r_end,
example_id, vocab, hps):
self.hps = hps
self.b_start = int(b_start)
self.b_end = int(b_end)
self.r_start = int(r_start)
self.r_end = int(r_end)
# Get ids of special tokens
start_decoding = vocab.word2id(data.START_DECODING)
stop_decoding = vocab.word2id(data.STOP_DECODING)
# Process the background
background_token = background_text.split()
if len(background_token) > hps.max_bac_enc_steps:
background_token = background_token[:hps.max_bac_enc_steps]
background_text = " ".join(b for b in background_token)
self.background_len = len(background_token)
self.background_input = [vocab.word2id(w) for w in background_token] # list of word ids; OOVs are represented by the id for UNK token
self.example_id = example_id
# Process the context
context_token = context_text.split()
if len(context_token) > hps.max_con_enc_steps:
context_token = context_token[len(context_token) - hps.max_con_enc_steps:]
context_text = " ".join(c for c in context_token)
self.context_len = len(context_token) # store the length after truncation but before padding
self.context_input = [vocab.word2id(w) for w in context_token] # list of word ids; OOVs are represented by the id for UNK token
# Process the response
response_token = response_text.split()
response_ids = [vocab.word2id(w) for w in response_token] # list of word ids; OOVs are represented by the id for UNK token
# Get the decoder input sequence and target sequence
self.dec_input, self.target = self.get_dec_inp_targ_seqs(response_ids, hps.max_dec_steps, start_decoding, stop_decoding)
self.dec_len = len(self.dec_input)
# Store a version of the background_input where in-article OOVs are represented by their temporary OOV id;
# also store the in-article OOVs words themselves
self.background_input_extend_vocab, self.background_oovs = data.background2ids(background_token, vocab)
# Get a verison of the reference summary where in-article OOVs are represented by their temporary article OOV id
response_ids_extend_vocab = data.response2ids(response_token, vocab, self.background_oovs)
# Overwrite decoder target sequence so it uses the temp article OOV ids
_, self.target = self.get_dec_inp_targ_seqs(response_ids_extend_vocab, hps.max_dec_steps, start_decoding, stop_decoding)
# Store the original strings
self.original_background_token = background_token
self.original_background = background_text
self.original_context = context_text
self.original_response = response_text
self.original_response_token = response_token
self.original_span = span_text
self.original_b_start = self.b_start
self.original_b_end = self.b_end
self.original_r_start = self.r_start
self.original_r_end = self.r_end
self.original_example_id = example_id
def get_dec_inp_targ_seqs(self, sequence, max_len, start_id, stop_id):
inp = [start_id] + sequence[:]
target = sequence[:]
if len(inp) > max_len: # truncate
inp = inp[:max_len]
target = target[:max_len] # no end_token
else: # no truncation
target.append(stop_id) # end token
assert len(inp) == len(target)
return inp, target
def pad_decoder_inp_targ(self, max_len, pad_id):
"""Pad decoder input and target sequences with pad_id up to max_len."""
while len(self.dec_input) < max_len:
self.dec_input.append(pad_id)
while len(self.target) < max_len:
self.target.append(pad_id)
def pad_bac_encoder_input(self, max_len, pad_id):
"""Pad the background encoder input sequence with pad_id up to max_len."""
while len(self.background_input) < max_len:
self.background_input.append(pad_id)
while len(self.background_input_extend_vocab) < max_len:
self.background_input_extend_vocab.append(pad_id)
def pad_con_encoder_input(self, max_len, pad_id):
"""Pad the context input sequence with pad_id up to max_len."""
while len(self.context_input) < max_len:
self.context_input.append(pad_id)
class Batch:
"""Class representing a minibatch of train/val/test examples for text summarization.
"""
def __init__(self, example_list, hps, vocab):
self.pad_id = vocab.word2id(data.PAD_TOKEN) # id of the PAD token used to pad sequences
self.init_bac_encoder_seq(example_list, hps) # initialize the input to the encoder
self.init_con_encoder_seq(example_list, hps)
self.init_decoder_seq(example_list, hps)
self.init_switch_label(example_list, hps)
self.init_start_end_label(example_list, hps)
self.store_orig_strings(example_list) # store the original strings
def init_bac_encoder_seq(self, example_list, hps):
# Determine the maximum length of the encoder input sequence in this batch
max_bac_encoder_seq_len = max([ex.background_len for ex in example_list])
# Pad the encoder input sequences up to the length of the longest sequence
for ex in example_list:
ex.pad_bac_encoder_input(max_bac_encoder_seq_len, self.pad_id)
# Initialize the numpy arrays
# Note: our enc_batch can have different length (second dimension) for each batch because we use dynamic_rnn for the encoder.
self.bac_enc_batch = np.zeros((hps.batch_size, max_bac_encoder_seq_len),dtype=np.int32)
self.background_lens = np.zeros((hps.batch_size), dtype=np.int32)
self.bac_enc_padding_mask = np.zeros((hps.batch_size, max_bac_encoder_seq_len), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.bac_enc_batch[i, :] = ex.background_input[:]
self.background_lens[i] = ex.background_len
for j in range(ex.background_len):
self.bac_enc_padding_mask[i][j] = 1
# For pointer-generator mode, need to store some extra info
# Determine the max number of in-article OOVs in this batch
self.max_bac_oovs = max([len(ex.background_oovs) for ex in example_list])
# Store the in-article OOVs themselves
self.bac_oovs = [ex.background_oovs for ex in example_list]
# Store the version of the enc_batch that uses the article OOV ids
self.bac_enc_batch_extend_vocab = np.zeros((hps.batch_size, max_bac_encoder_seq_len), dtype=np.int32)
for i, ex in enumerate(example_list):
self.bac_enc_batch_extend_vocab[i, :] = ex.background_input_extend_vocab[:]
def init_con_encoder_seq(self, example_list, hps):
# Determine the maximum length of the encoder input sequence in this batch
max_con_encoder_seq_len = max([ex.context_len for ex in example_list])
# Pad the encoder input sequences up to the length of the longest sequence
for ex in example_list:
ex.pad_con_encoder_input(max_con_encoder_seq_len, self.pad_id)
# Initialize the numpy arrays
# Note: our enc_batch can have different length (second dimension) for each batch because we use dynamic_rnn for the encoder.
self.con_enc_batch = np.zeros((hps.batch_size, max_con_encoder_seq_len), dtype=np.int32)
self.context_lens = np.zeros((hps.batch_size), dtype=np.int32)
self.con_enc_padding_mask = np.zeros((hps.batch_size, max_con_encoder_seq_len), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.con_enc_batch[i, :] = ex.context_input[:]
self.context_lens[i] = ex.context_len
for j in range(ex.context_len):
self.con_enc_padding_mask[i][j] = 1
def init_decoder_seq(self, example_list, hps):
# Pad the inputs and targets
for ex in example_list:
ex.pad_decoder_inp_targ(hps.max_dec_steps, self.pad_id)
self.dec_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)
self.target_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)
self.dec_padding_mask = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.float32)
self.dec_switch_mask = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.dec_batch[i, :] = ex.dec_input[:]
self.target_batch[i, :] = ex.target[:]
for j in range(ex.dec_len):
if (j > ex.r_start) and (j <= ex.r_end):
self.dec_switch_mask[i][j] = 0
self.dec_padding_mask[i][j] = 1
else:
self.dec_switch_mask[i][j] = 1
self.dec_padding_mask[i][j] = 1
def init_switch_label(self, example_list, hps):
self.switch_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.float32)
for i, ex in enumerate(example_list):
if ex.r_start < hps.max_dec_steps:
self.switch_batch[i][ex.r_start] = 1
else:
continue
def init_start_end_label(self, example_list, hps):
max_bac_encoder_seq_len = max([ex.background_len for ex in example_list])
self.bac_start_batch = np.zeros((hps.batch_size, max_bac_encoder_seq_len), dtype=np.float32)
self.bac_end_batch = np.zeros((hps.batch_size, max_bac_encoder_seq_len), dtype=np.float32)
for i, ex in enumerate(example_list):
if ex.b_start >= max_bac_encoder_seq_len:
continue
else:
self.bac_start_batch[i][ex.b_start] = 1.0
if ex.b_end >= max_bac_encoder_seq_len:
modified_b_end = max_bac_encoder_seq_len - 1
self.bac_end_batch[i][modified_b_end] = 1.0
else:
self.bac_end_batch[i][ex.b_end] = 1.0
def store_orig_strings(self, example_list):
"""Store the original article and abstract strings in the Batch object """
self.original_backgrounds_token = [ex.original_background_token for ex in example_list]
self.original_backgrounds = [ex.original_background for ex in example_list] # list of lists
self.original_contexts = [ex.original_context for ex in example_list] # list of lists
self.original_responses = [ex.original_response for ex in example_list]
self.original_responses_token = [ex.original_response_token for ex in example_list]
self.original_spans = [ex.original_span for ex in example_list]
self.original_b_starts = [ex.original_b_start for ex in example_list]
self.original_b_ends = [ex.original_b_end for ex in example_list]
self.original_r_starts = [ex.original_r_start for ex in example_list]
self.original_r_ends = [ex.original_r_end for ex in example_list]
self.original_example_ids = [ex.original_example_id for ex in example_list]
class Batcher:
"""A class to generate minibatches of data. Buckets examples together based on length of the encoder sequence."""
BATCH_QUEUE_MAX = 100 # max number of batches the batch_queue can hold
def __init__(self, data_path, vocab, hps, single_pass):
self._data_path = data_path
self._vocab = vocab
self._hps = hps
self._single_pass = single_pass
# Initialize a queue of Batches waiting to be used, and a queue of Examples waiting to be batched
self._batch_queue = queue.Queue(self.BATCH_QUEUE_MAX)
self._example_queue = queue.Queue(self.BATCH_QUEUE_MAX * self._hps.batch_size)
# Different settings depending on whether we're in single_pass mode or not
if single_pass:
self._num_example_q_threads = 1 # just one thread, so we read through the dataset just once
self._num_batch_q_threads = 1 # just one thread to batch examples
self._bucketing_cache_size = 1 # only load one batch's worth of examples before bucketing; this essentially means no bucketing
self._finished_reading = False # this will tell us when we're finished reading the dataset
else:
self._num_example_q_threads = 16 # num threads to fill example queue
self._num_batch_q_threads = 4 # num threads to fill batch queue
self._bucketing_cache_size = 100
# Start the threads that load the queues
self._example_q_threads = []
for _ in range(self._num_example_q_threads):
self._example_q_threads.append(
Thread(target=self.fill_example_queue))
self._example_q_threads[-1].daemon = True
self._example_q_threads[-1].start()
self._batch_q_threads = []
for _ in range(self._num_batch_q_threads):
self._batch_q_threads.append(Thread(target=self.fill_batch_queue))
self._batch_q_threads[-1].daemon = True
self._batch_q_threads[-1].start()
# Start a thread that watches the other threads and restarts them if they're dead
if not single_pass: # We don't want a watcher in single_pass mode because the threads shouldn't run forever
self._watch_thread = Thread(target=self.watch_threads)
self._watch_thread.daemon = True
self._watch_thread.start() # 启动线程活动。
def next_batch(self):
"""Return a Batch from the batch queue.
"""
# If the batch queue is empty, print a warning
if self._batch_queue.qsize() == 0:
tf.logging.warning('Bucket input queue is empty when calling next_batch. Bucket queue size: %i, Input queue size: %i',self._batch_queue.qsize(), self._example_queue.qsize())
if self._single_pass and self._finished_reading:
tf.logging.info("Finished reading dataset in single_pass mode.")
return None
batch = self._batch_queue.get() # get the next Batch
return batch
def fill_example_queue(self):
"""Reads data from file and processes into Examples which are then placed into the example queue."""
input_gen = self.text_generator(data.example_generator(self._data_path, self._single_pass))
while True:
try:
(background_text, context_text, response_text, span_text, b_start, b_end, r_start, r_end, example_id) = next(input_gen)
except StopIteration: # if there are no more examples:
tf.logging.info("The example generator for this example queue filling thread has exhausted data.")
if self._single_pass:
tf.logging.info("single_pass mode is on, so we've finished reading dataset. This thread is stopping.")
self._finished_reading = True
break
else:
raise Exception("single_pass mode is off but the example generator is out of data; error.")
example = Example(background_text, context_text, response_text, span_text, b_start, b_end, r_start, r_end, example_id, self._vocab, self._hps)
self._example_queue.put(example) # place the Example in the example queue.
def fill_batch_queue(self):
"""Takes Examples out of example queue, sorts them by encoder sequence length, processes into Batches and places them in the batch queue.
"""
while True:
if self._hps.mode == 'train':
# Get bucketing_cache_size-many batches of Examples into a list, then sort
inputs = []
for _ in range(self._hps.batch_size * self._bucketing_cache_size):
inputs.append(
self._example_queue.get())
inputs = sorted(inputs, key=lambda inp: inp.background_len) # sort by length of encoder sequence
# Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.
batches = []
for i in range(0, len(inputs), self._hps.batch_size):
batches.append(inputs[i:i + self._hps.batch_size])
if not self._single_pass:
shuffle(batches)
for b in batches:
self._batch_queue.put(Batch(b, self._hps, self._vocab))
else: # greed search inference mode
ex = self._example_queue.get()
b = [ex for _ in range(self._hps.batch_size)]
self._batch_queue.put(Batch(b, self._hps, self._vocab))
def watch_threads(self):
"""Watch example queue and batch queue threads and restart if dead."""
while True:
time.sleep(60)
# 一个
for idx, t in enumerate(self._example_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found example queue thread dead. Restarting.')
new_t = Thread(target=self.fill_example_queue)
self._example_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
for idx, t in enumerate(self._batch_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found batch queue thread dead. Restarting.')
new_t = Thread(target=self.fill_batch_queue)
self._batch_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
def text_generator(self, example_generator):
"""Generates article and abstract text from tf.Example.
Args:
example_generator: a generator of tf.Examples from file. See data.example_generator"""
while True:
e = next(example_generator)
try:
background_text = e.features.feature['background'].bytes_list.value[0].decode()
context_text = e.features.feature['context'].bytes_list.value[0].decode()
response_text = e.features.feature['response'].bytes_list.value[0].decode()
span_text = e.features.feature['span'].bytes_list.value[0].decode()
b_start = e.features.feature['b_start'].bytes_list.value[0].decode()
b_end = e.features.feature['b_end'].bytes_list.value[0].decode()
r_start = e.features.feature['r_start'].bytes_list.value[0].decode()
r_end = e.features.feature['r_end'].bytes_list.value[0].decode()
example_id = e.features.feature['example_id'].bytes_list.value[0].decode()
except ValueError:
tf.logging.error('Failed to get article or abstract from example')
continue
if len(background_text) == 0:
tf.logging.warning('Found an example with empty article text. Skipping it.')
else:
yield (background_text, context_text, response_text, span_text, b_start, b_end, r_start, r_end, example_id)
if __name__ == '__main__':
hps_dict = {'mode':'train', 'batch_size': 16, 'max_bac_enc_steps': 300,'max_con_enc_steps': 65, 'max_dec_steps': 95}
hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)
vocab = data.Vocab('data/mixed_context/finished_files/vocab', 25000)
batcher = Batcher('data/mixed_context/finished_files/chunked/train_*', vocab, hps, single_pass=False)
batch = batcher.next_batch()
# print("batch.target_batch: ",batch.target_batch)
i = 0
print()
print("backgrounds: ", batch.original_backgrounds[i], "\n")
print("contexts: ", batch.original_contexts[i], "\n")
print("responses: ", batch.original_responses[i], "\n")
print("spans: ", batch.original_spans[i], "\n")
print("b_starts: ", batch.original_b_starts[i], "\n")
print("b_ends: ", batch.original_b_ends[i], "\n")
print("r_starts: ", batch.original_r_starts[i], "\n")
print("r_ends: ", batch.original_r_ends[i], "\n")
print("example_ids: ", batch.original_example_ids[i], "\n")
print("batch.dec_padding_mask: ", batch.dec_padding_mask[i], "\n")
print("batch.switch_mask: ", batch.dec_switch_mask[i], "\n")
print("batch.switch_batch: ", batch.switch_batch[i], "\n")
print("batch.bac_start_batch: ", batch.bac_start_batch[i], "\n")
print("batch.bac_end_batch: ", batch.bac_end_batch[i], "\n")
batch = batcher.next_batch()
print("======================================================----------")
i = 0
print()
print("backgrounds: ", batch.original_backgrounds[i], "\n")
print("contexts: ", batch.original_contexts[i], "\n")
print("responses: ", batch.original_responses[i], "\n")
print("spans: ", batch.original_spans[i], "\n")
print("b_starts: ", batch.original_b_starts[i], "\n")
print("b_ends: ", batch.original_b_ends[i], "\n")
print("r_starts: ", batch.original_r_starts[i], "\n")
print("r_ends: ", batch.original_r_ends[i], "\n")
print("example_ids: ", batch.original_example_ids[i], "\n")
print("batch.dec_padding_mask: ", batch.dec_padding_mask[i], "\n")
print("batch.switch_mask: ", batch.dec_switch_mask[i], "\n")
print("batch.switch_batch: ", batch.switch_batch[i], "\n")
print("batch.bac_start_batch: ", batch.bac_start_batch[i], "\n")
print("batch.bac_end_batch: ", batch.bac_end_batch[i], "\n")
| 22,095 | 50.990588 | 185 | py |
RefNet | RefNet-master/evaluate.py | import sys
import glob
import json
import os
import time
from metrics import rouge, bleu, f1
def rounder(num):
return round(num, 2)
def bleu_max_over_ground_truths(prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = cal_bleu([prediction], [ground_truth])
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def rouge_max_over_ground_truths(prediction, ground_truths):
scores_for_rouge1 = []
scores_for_rouge2 = []
scores_for_rougel = []
for ground_truth in ground_truths:
score = cal_rouge([prediction], [ground_truth])
scores_for_rouge1.append(score[0])
scores_for_rouge2.append(score[1])
scores_for_rougel.append(score[2])
return max(scores_for_rouge1), max(scores_for_rouge2), max(scores_for_rougel)
def cal_bleu(infer, ref):
while True:
try:
bleu_score = bleu.moses_multi_bleu(infer, ref)
return bleu_score
except FileNotFoundError:
print("Failed to test bleu_score. Sleeping for %i secs...", 0.01)
time.sleep(3)
def cal_rouge(infer, ref):
x = rouge.rouge(infer, ref)
return x['rouge_1/f_score'] * 100, x['rouge_2/f_score'] * 100, x['rouge_l/f_score'] * 100
def evaluate(infer, ref, inferred_spans, ref_spans):
bl = cal_bleu(infer, ref)
x = rouge.rouge(infer, ref)
f, e, total = f1.evaluate(inferred_spans, ref_spans)
return bl, x['rouge_1/f_score'] * 100, x['rouge_2/f_score'] * 100, x['rouge_l/f_score'] * 100, f, e, total
def evaluate_multi_ref(inferred_response, inferred_spans, example_id):
ref_spans = []
ref_responses = []
print("load multi reference data")
with open("data/modified_multi_reference_test.json", 'r', encoding='utf-8') as r:
multi_reference_test = json.load(r)
assert len(multi_reference_test) == len(example_id), "the length of multi_ref example should be same as pre"
for i in example_id:
ref_spans.append(multi_reference_test[i]["spans"])
ref_responses.append(multi_reference_test[i]["responses"])
print("calculate f1 metric")
# calculate f1 metric
f, e, total_span = f1.evaluate(inferred_spans, ref_spans)
# calculate bleu and rouge
print("multi_f1:", f)
print("multi_em:", e)
print("span total:", total_span)
print("calculate bleu and rouge")
bleu = rouge_1 = rouge_2 = rouge_l = total = 0
assert len(inferred_response) == len(ref_responses), "the length of predicted span and ground_truths span should be same"
for i, pre in enumerate(inferred_response):
print("calculating %d " % (i+1))
bleu += bleu_max_over_ground_truths(pre, ref_responses[i])
rouge_result = rouge_max_over_ground_truths(pre, ref_responses[i])
rouge_1 += rouge_result[0]
rouge_2 += rouge_result[1]
rouge_l += rouge_result[2]
total += 1
bleu = bleu / total
rouge_1 = rouge_1 / total
rouge_2 = rouge_2 / total
rouge_l = rouge_l / total
return bleu, rouge_1, rouge_2, rouge_l, f, e, total_span
def main(model_path, log_root, decode_dir, mode, multi_label_eval=False):
# statr evaluation
with open(os.path.join(decode_dir, "output.json"), 'r', encoding='utf-8') as r:
output = json.load(r)
example_index = list(output.keys())
ref_response = []
inferred_response = []
ref_spans = []
inferred_spans = []
gen_ref_num = 0
for i in example_index:
ref_response.append(output[i]["ref_response"])
inferred_response.append(output[i]["inferred_response"])
ref_spans.append([output[i]["ref_span"]])
inferred_spans.append(output[i]["inferred_spans"])
num_ref = False
num_gen = False
for item in output[i]["output_index"]:
if isinstance(item, list):
num_ref = True
elif isinstance(item, int):
num_gen = True
if num_ref and num_gen:
gen_ref_num = gen_ref_num+1
assert len(inferred_response) == len(ref_response), "the length of infer_response and ref_responses should be same "
print("start single reference evaluation")
result = evaluate(inferred_response, ref_response, inferred_spans, ref_spans)
try:
with open(os.path.join(log_root, str(mode)+"_result.json"), 'r', encoding='utf-8') as r:
result_log = json.load(r)
except FileNotFoundError:
with open(os.path.join(log_root, str(mode)+"_result.json"), 'w', encoding='utf-8') as w:
result_log = {}
json.dump(result_log, w)
result_log[model_path] = {"bleu": rounder(float(result[0])), "rouge_1": rounder(float(result[1])), "rouge_2": rounder(float(result[2])), "rouge_l": rounder(float(result[3])), "f1": rounder(float(result[4])), "exact_match": rounder(float(result[5])), "span_num": result[6],"gen_ref_num":gen_ref_num}
with open(os.path.join(log_root, str(mode)+"_result.json"), 'w', encoding='utf-8') as w:
json.dump(result_log, w)
print("finish single reference evaluation")
if mode == "test" and multi_label_eval:
print("start multi reference evaluation for test")
multi_ref_result_log = {}
multi_ref_result = evaluate_multi_ref(inferred_response, inferred_spans, example_index)
multi_ref_result_log[model_path] = {"multi_ref_bleu": rounder(float(multi_ref_result[0])), "multi_ref_rouge_1": rounder(float(multi_ref_result[1])), "multi_ref_rouge_2": rounder(float(multi_ref_result[2])),
"multi_ref_rouge_l": rounder(float(multi_ref_result[3])), "multi_ref_f1": rounder(float(multi_ref_result[4])), "multi_ref_exact_match": rounder(float(multi_ref_result[5])),
"span_num": multi_ref_result[6],"gen_ref_num": gen_ref_num}
with open(os.path.join(log_root, str(mode)+"_multi_result.json"), 'w', encoding='utf-8') as w:
json.dump(multi_ref_result_log, w)
print("all evaluation is finished")
if __name__ == '__main__':
mode = "test"
train_dir = "log/Camera_Ready_2_RefNet/train/"
model_dir = "log/Camera_Ready_2_RefNet/train/model.ckpt-10775"
main(model_dir, "log/Camera_Ready_2_RefNet", "log/Camera_Ready_2_RefNet/55_Test_Infer_ckpt-10775", mode, True)
r = open(os.path.join(train_dir, "finished_"+mode+"_models.json"), 'r', encoding='utf-8')
finished_option_models = json.load(r)
r.close()
finished_option_models["finished_"+mode+"_models"].append(model_dir)
w = open(os.path.join(train_dir, "finished_"+mode+"_models.json"), 'w', encoding='utf-8')
json.dump(finished_option_models, w)
w.close()
| 6,732 | 38.145349 | 302 | py |
RefNet | RefNet-master/model.py | import time
import numpy as np
import tensorflow as tf
from hybrid_decoder import hybrid_decoder
import util
FLAGS = tf.app.flags.FLAGS
class Model:
def __init__(self, hps, vocab):
self._hps = hps
self._vocab = vocab
def _add_placeholders(self):
"""Add placeholders to the graph. These are entry points for any input data."""
hps = self._hps
# background encoder part
self._enc_batch = tf.placeholder(tf.int32, [hps.batch_size, None], name='background_batch')
self._enc_lens = tf.placeholder(tf.int32, [hps.batch_size], name='background_lens')
self._enc_padding_mask = tf.placeholder(tf.float32, [hps.batch_size, None], name='background_padding_mask')
self._enc_batch_extend_vocab = tf.placeholder(tf.int32, [hps.batch_size, None], name='background_batch_extend_vocab')
self._max_art_oovs = tf.placeholder(tf.int32, [], name='max_art_oovs')
# context encoder part
self._que_batch = tf.placeholder(tf.int32, [hps.batch_size, None], name='context_batch')
self._que_lens = tf.placeholder(tf.int32, [hps.batch_size], name='context_lens')
self._que_padding_mask = tf.placeholder(tf.float32, [hps.batch_size, None], name='context_padding_mask')
# decoder part
self._dec_batch = tf.placeholder(tf.int32, [hps.batch_size, hps.max_dec_steps], name='dec_batch')
self._target_batch = tf.placeholder(tf.int32, [hps.batch_size, hps.max_dec_steps], name='target_batch')
self._dec_padding_mask = tf.placeholder(tf.float32, [hps.batch_size, hps.max_dec_steps], name='dec_padding_mask')
self._dec_switch_mask = tf.placeholder(tf.float32, [hps.batch_size, hps.max_dec_steps], name='dec_switch_mask')
# train label part
self._bac_start_batch = tf.placeholder(tf.float32, [hps.batch_size, None], name='bac_start_batch')
self._bac_end_batch = tf.placeholder(tf.float32, [hps.batch_size, None], name='bac_end_batch')
self._switch_batch = tf.placeholder(tf.float32, [hps.batch_size, hps.max_dec_steps], name='switch_batch')
def _make_feed_dict(self, batch, just_enc=False):
feed_dict ={}
feed_dict[self._enc_batch] = batch.bac_enc_batch
feed_dict[self._enc_lens] = batch.background_lens
feed_dict[self._enc_padding_mask] = batch.bac_enc_padding_mask
feed_dict[self._que_batch] = batch.con_enc_batch
feed_dict[self._que_lens] = batch.context_lens
feed_dict[self._que_padding_mask] = batch.con_enc_padding_mask
feed_dict[self._enc_batch_extend_vocab] = batch.bac_enc_batch_extend_vocab
feed_dict[self._max_art_oovs] = batch.max_bac_oovs
if not just_enc:
feed_dict[self._dec_batch] = batch.dec_batch
feed_dict[self._target_batch] = batch.target_batch # batch_size*decoder_max_time_step
feed_dict[self._dec_padding_mask] = batch.dec_padding_mask
feed_dict[self._dec_switch_mask] = batch.dec_switch_mask
feed_dict[self._switch_batch] = batch.switch_batch
feed_dict[self._bac_start_batch] = batch.bac_start_batch
feed_dict[self._bac_end_batch] = batch.bac_end_batch
return feed_dict
def _add_backgroud_encoder(self, encoder_inputs, seq_len):
with tf.variable_scope('background_encoder'):
cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)
cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)
(encoder_outputs, (fw_st, bw_st)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, encoder_inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True)
encoder_outputs = tf.concat(encoder_outputs, 2)
return encoder_outputs, fw_st, bw_st
def _add_context_encoder(self, encoder_inputs, seq_len):
with tf.variable_scope('context_encoder'):
cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)
cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)
(encoder_outputs, (fw_st, bw_st)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, encoder_inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True)
encoder_outputs = tf.concat(encoder_outputs, 2) # bz*timestep*2h
return encoder_outputs, fw_st, bw_st
def _add_matching_layer(self, bac_encoder_inputs, con_encoder_inputs, bac_seq_len, gate=None):
with tf.variable_scope('matching_layer'):
background_max_len = tf.shape(bac_encoder_inputs)[1]
context_max_len = tf.shape(con_encoder_inputs)[1]
expanded_context = tf.tile(tf.expand_dims(con_encoder_inputs, -3), (1, background_max_len, 1, 1)) # (batch_size, max_nodes, query_len, node_feature_dim)
expanded_background = tf.tile(tf.expand_dims(bac_encoder_inputs, -2), (1, 1, context_max_len, 1)) # (batch_size, max_nodes, query_len, node_feature_dim)
dot_product_matrix = expanded_background * expanded_context
concat_similarity_matrix = tf.concat((expanded_background, expanded_context, dot_product_matrix), -1)
similarity_matrix = tf.reduce_mean(util.dense(concat_similarity_matrix, 1, use_bias=False, scope="similarity_matrix"), -1) # (batch_size, max_nodes, max_query)
# mask similarity_matrix
context_mask = tf.tile(tf.expand_dims(self._que_padding_mask, axis=1), [1, background_max_len, 1]) # Tensor shape(batch * bac_len * con_len )
context_masked_similarity_matrix = util.mask_softmax(context_mask, similarity_matrix) # Tensor shape(batch * bac_len * con_len )
# background2context
similarity_matrix_softmax = tf.nn.softmax(context_masked_similarity_matrix, -1) # Tensor shape(batch, bac_len, con_len)
background2context = tf.matmul(similarity_matrix_softmax, con_encoder_inputs) # Tensor shape(batch, bac_len, 2hz)
# context2background
background_mask = self._enc_padding_mask # Tensor shape(batch * bac_len)
squeezed_context_masked_similarity_matrix = tf.reduce_max(context_masked_similarity_matrix, -1) # Tensor shape(batch * bac_len)
background_masked_similarity_matrix = util.mask_softmax(background_mask, squeezed_context_masked_similarity_matrix) # Tensor shape(batch * bac_len)
b = tf.nn.softmax(background_masked_similarity_matrix, -1) # Tensor shape(batch * bac_len)
context2background = tf.matmul(tf.expand_dims(b, 1), bac_encoder_inputs) # (batch_size,1,bac_len) (batch_size, bac_len, feature_dim) = (batch_size,1,2hz)
context2background = tf.tile(context2background, (1, background_max_len, 1)) # (batch_size,background_max_len, 2hz)
G = tf.concat((bac_encoder_inputs, background2context, bac_encoder_inputs * background2context, bac_encoder_inputs * context2background), -1)
with tf.variable_scope('modeling_layer1'):
cell_fw_1 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)
cell_bw_1 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)
(encoder_outputs1, (fw_st1, bw_st1)) = tf.nn.bidirectional_dynamic_rnn(cell_fw_1, cell_bw_1, G, dtype=tf.float32, sequence_length= bac_seq_len, swap_memory=True)
matching_output1 = tf.concat(encoder_outputs1, 2) # Tensor shape(batch, bac_len, 2*hz)
with tf.variable_scope('modeling_layer2'):
cell_fw_2 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)
cell_bw_2 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)
(encoder_outputs2, (fw_st2, bw_st2)) = tf.nn.bidirectional_dynamic_rnn(cell_fw_2, cell_bw_2, matching_output1, dtype=tf.float32, sequence_length= bac_seq_len, swap_memory=True)
matching_output2 = tf.concat(encoder_outputs2, 2) # Tensor shape(batch, bac_len, 2*hz)
return matching_output2, fw_st2, bw_st2
def _reduce_states(self, fw_st, bw_st, fw_st_q, bw_st_q):
hidden_dim = self._hps.hidden_dim
with tf.variable_scope('reduce_final_st'):
# Define weights and biases to reduce the cell and reduce the state
w_reduce_c = tf.get_variable('w_reduce_c', [hidden_dim * 4, hidden_dim], dtype=tf.float32,initializer=self.trunc_norm_init)
w_reduce_h = tf.get_variable('w_reduce_h', [hidden_dim * 4, hidden_dim], dtype=tf.float32,initializer=self.trunc_norm_init)
bias_reduce_c = tf.get_variable('bias_reduce_c', [hidden_dim], dtype=tf.float32,initializer=self.trunc_norm_init)
bias_reduce_h = tf.get_variable('bias_reduce_h', [hidden_dim], dtype=tf.float32,initializer=self.trunc_norm_init)
# Apply linear layer
old_c = tf.concat([fw_st.c, bw_st.c, fw_st_q.c, bw_st_q.c], 1) # Concatenation of fw and bw cell
old_h = tf.concat([fw_st.h, bw_st.h, fw_st_q.h, bw_st_q.h], 1) # Concatenation of fw and bw state
new_c = tf.nn.relu(tf.matmul(old_c, w_reduce_c) + bias_reduce_c) # Get new cell from old cell
new_h = tf.nn.relu(tf.matmul(old_h, w_reduce_h) + bias_reduce_h) # Get new state from old state
return tf.contrib.rnn.LSTMStateTuple(new_c, new_h)
def _add_decoder(self, inputs):
hps = self._hps
cell = tf.contrib.rnn.LSTMCell(hps.hidden_dim, state_is_tuple=True, initializer=self.rand_unif_init)
outputs, out_state, attn_dists, switch_ref_time_step, switch_gen_time_step, switch_gen_pred_time_step, switch_gen_copy_time_step = hybrid_decoder(inputs,
self._dec_in_state,
self._background_final_state,
self._enc_padding_mask,
self._que_states,
self._que_padding_mask,
cell,
initial_state_attention=(hps.mode in ["test","val"]))
return outputs, out_state, attn_dists, switch_ref_time_step, switch_gen_time_step, switch_gen_pred_time_step, switch_gen_copy_time_step
def _calc_word_level_dist(self, vocab_dists, attn_dists):
with tf.variable_scope('calc_word_level_dist'):
vocab_dists = [switch_gen_pred_one_step * dist for (switch_gen_pred_one_step, dist) in zip(self.switch_gen_pred_time_step, vocab_dists)]
attn_dists = [switch_gen_copy_one_step * dist for (switch_gen_copy_one_step, dist) in zip(self.switch_gen_copy_time_step, attn_dists)]
extended_vsize = self._vocab.size() + self._max_art_oovs
extra_zeros = tf.zeros((self._hps.batch_size, self._max_art_oovs)) # batch_size*max_art_oovs
vocab_dists_extended = [tf.concat([dist, extra_zeros], 1) for dist in
vocab_dists] # list length max_dec_steps of shape (batch_size, extended_vsize)
# Project the values in the attention distributions onto the appropriate entries in the final distributions
# This means that if a_i = 0.1 and the ith encoder word is w, and w has index 500 in the vocabulary, then we add 0.1 onto the 500th entry of the final distribution
# This is done for each decoder timestep.
# This is fiddly; we use tf.scatter_nd to do the projection
batch_nums = tf.range(0, limit=self._hps.batch_size) # shape (batch_size)
batch_nums = tf.expand_dims(batch_nums, 1) # shape (batch_size, 1)
attn_len = tf.shape(self._enc_batch_extend_vocab)[1] # number of states we attend over encode
batch_nums = tf.tile(batch_nums, [1, attn_len]) # shape (batch_size, attn_len)
indices = tf.stack((batch_nums, self._enc_batch_extend_vocab), axis=2) # shape (batch_size, enc_t, 2)
shape = [self._hps.batch_size, extended_vsize] # 画布
attn_dists_projected = [tf.scatter_nd(indices, copy_dist, shape) for copy_dist in attn_dists] # list length max_dec_steps (batch_size, extended_vsize)
# Add the vocab distributions and the copy distributions together to get the final distributions
# final_dists is a list length max_dec_steps; each entry is a tensor shape (batch_size, extended_vsize) giving the final distribution for that decoder timestep
# Note that for decoder timesteps and examples corresponding to a [PAD] token, this is junk - ignore.
word_level_dists = [vocab_dist + copy_dist for (vocab_dist, copy_dist) in zip(vocab_dists_extended, attn_dists_projected)]
return word_level_dists # [(batch_size, extended_vsize) ,(batch_size, extended_vsize) ...]
def _add_seq2seq(self):
hps = self._hps
vsize = self._vocab.size() # size of the vocabulary
with tf.variable_scope('seq2seq'):
# Some initializers
self.rand_unif_init = tf.random_uniform_initializer(-hps.rand_unif_init_mag, hps.rand_unif_init_mag,seed=123)
self.trunc_norm_init = tf.truncated_normal_initializer(stddev=hps.trunc_norm_init_std)
# Add embedding matrix (shared by the encoder and decoder inputs)
with tf.variable_scope('embedding'):
embedding = tf.get_variable('embedding', [vsize, hps.emb_dim], dtype=tf.float32,initializer=self.trunc_norm_init)
emb_enc_inputs = tf.nn.embedding_lookup(embedding, self._enc_batch) # tensor with shape (batch_size, max_enc_steps, emb_size)
emb_que_inputs = tf.nn.embedding_lookup(embedding, self._que_batch) # tensor with shape (batch_size, max_que_steps, emb_size)
emb_dec_inputs = [tf.nn.embedding_lookup(embedding, x) for x in tf.unstack(self._dec_batch, axis=1)] # list length max_dec_steps containing shape (batch_size, emb_size)
# Add the backgrpind encoder.
enc_outputs, fw_st_b, bw_st_b = self._add_backgroud_encoder(emb_enc_inputs, self._enc_lens)
self._enc_states = enc_outputs
# Add the context encoder.
que_outputs, fw_st_q, bw_st_q = self._add_context_encoder(emb_que_inputs, self._que_lens)
self._que_states = que_outputs
# Add matching layer
if self._hps.matching_layer is True:
matching_outputs, fw_st_m, bw_st_m = self._add_matching_layer(self._enc_states, self._que_states, self._enc_lens, gate=True)
self._matching_states = matching_outputs # Tensor shape(batch*bac_len*2hz)
self._background_final_state = self._matching_states
fw_st = fw_st_m
bw_st = bw_st_m
else:
self._background_final_state = self._enc_states
fw_st = fw_st_b
bw_st = bw_st_b
# Our encoder is bidirectional and our decoder is unidirectional so we need to reduce the final encoder hidden state to the right size to be the initial decoder hidden state
self._dec_in_state = self._reduce_states(fw_st, bw_st, fw_st_q, bw_st_q)
# Add the decoder.
with tf.variable_scope('hybrid_decoder'):
decoder_outputs, self._dec_out_state, self.attn_dists, self.switch_ref_time_step, self.switch_gen_time_step, self.switch_gen_pred_time_step, self.switch_gen_copy_time_step = self._add_decoder(emb_dec_inputs)
# Add the output projection to obtain the vocabulary distribution
with tf.variable_scope('generation_decoding'):
w = tf.get_variable('w', [hps.hidden_dim, vsize], dtype=tf.float32, initializer=self.trunc_norm_init)
v = tf.get_variable('v', [vsize], dtype=tf.float32, initializer=self.trunc_norm_init)
vocab_scores = [] # vocab_scores is the vocabulary distribution before applying softmax. Each entry on the list corresponds to one decoder step
for i, output in enumerate(decoder_outputs):
if i > 0:
tf.get_variable_scope().reuse_variables()
vocab_scores.append(tf.nn.xw_plus_b(output, w, v))
vocab_dists = [tf.nn.softmax(s) for s in vocab_scores] # The vocabulary distributions. List length max_dec_steps of (batch_size, vsize) arrays. The words are in the order they appear in the vocabulary file.
# calc final distribution from copy distribution and vocabulary distribution
self.word_level_dists = self._calc_word_level_dist(vocab_dists, self.attn_dists)
with tf.variable_scope('reference_decoding'):
# v^T tanh(W_b b_i + W_o output_t + b_attn)
self.start_dist = []
self.end_dist = []
background_states = self._background_final_state # [batch_size x max_encode_length x 2hidden_size]
encode_state_length = background_states.get_shape()[2].value # 2hidden_size
attention_vec_size = encode_state_length # 2hidden_size
w_b = tf.get_variable("W_b", [encode_state_length, attention_vec_size])
w_step = tf.get_variable('W_step', [hps.hidden_dim, attention_vec_size], dtype=tf.float32,initializer=self.trunc_norm_init)
bias_step = tf.get_variable('bias_step', [attention_vec_size], dtype=tf.float32, initializer=self.trunc_norm_init)
v = tf.get_variable("v", [attention_vec_size])
background_features = tf.einsum("ijk,kl->ijl", background_states, w_b) # shape (batch_size,max_encode_length,attention_vec_size)
if hps.multi_hop_span_pre_mode == 'mlp':
w_mlp = tf.get_variable('W_mlp', [3 * hps.hidden_dim, hps.hidden_dim], dtype=tf.float32,initializer=self.trunc_norm_init)
bias_mlp = tf.get_variable('bias_mlp', [hps.hidden_dim], dtype=tf.float32,initializer=self.trunc_norm_init)
for i, hop_1 in enumerate(decoder_outputs):
#start step
hop_1_features = tf.nn.xw_plus_b(hop_1, w_step, bias_step) # shape (batch_size,attention_vec_size)
hop_1_features = tf.expand_dims(hop_1_features, 1) # shape (batch_size,1, attention_vec_size)
start_dist = tf.reduce_sum(v * tf.tanh(background_features + hop_1_features), 2) # (batch_size,max_encode_length)
start_dist = tf.nn.softmax(util.mask_softmax(self._enc_padding_mask, start_dist)) # take softmax. shape (batch_size, max_encode_length)
self.start_dist.append(start_dist)
start_dist_ex_dim = tf.expand_dims(start_dist, 2) # shape (batch_size, max_encode_length, 1)
start_vector = tf.reduce_sum(start_dist_ex_dim * background_states, 1) # shape (batch_size, * 2hidden_size).
start_vector = tf.reshape(start_vector, [-1, encode_state_length]) # shape (batch_size, * 2hidden_size).
#end_step
concat_vector = tf.concat([hop_1, start_vector], 1) #batch_size*3hidden_size
hop_2 = tf.nn.xw_plus_b(concat_vector, w_mlp, bias_mlp) #batch_size*hidden_size
hop_2_features = tf.nn.xw_plus_b(hop_2, w_step, bias_step) # shape (batch_size,attention_vec_size )
hop_2_features = tf.expand_dims(hop_2_features, 1) #shape(batch_size,1, attention_vec_size)
end_dist = tf.reduce_sum(v * tf.tanh(background_features + hop_2_features), 2)
end_dist = tf.nn.softmax(util.mask_softmax(self._enc_padding_mask, end_dist))
self.end_dist.append(end_dist)
elif hps.multi_hop_span_pre_mode == 'rnn':
cell_pre_span = tf.nn.rnn_cell.GRUCell(hps.hidden_dim, kernel_initializer=self.rand_unif_init)
for i, hop_1 in enumerate(decoder_outputs):
initial_state = hop_1
hop_1_features = tf.nn.xw_plus_b(initial_state, w_step, bias_step) # shape (batch_size,attention_vec_size)
hop_1_features = tf.expand_dims(hop_1_features, 1) # shape (batch_size,1, attention_vec_size)
start_dist = tf.reduce_sum(v * tf.tanh(background_features + hop_1_features),2) # (batch_size,max_encode_length)
start_dist = tf.nn.softmax(util.mask_softmax(self._enc_padding_mask,start_dist)) # take softmax. shape (batch_size, max_encode_length)
self.start_dist.append(start_dist)
start_dist_ex_dim = tf.expand_dims(start_dist, 2) # shape (batch_size, max_encode_length, 1)
start_vector = tf.reduce_sum(start_dist_ex_dim * background_states, 1) # shape (batch_size, * 2hidden_size).
start_vector = tf.reshape(start_vector, [-1, encode_state_length]) # shape (batch_size, * 2hidden_size).
output, state = cell_pre_span(start_vector, initial_state)
hop_2_features = tf.nn.xw_plus_b(state, w_step, bias_step) # shape (batch_size,attention_vec_size)
hop_2_features = tf.expand_dims(hop_2_features, 1) # shape(batch_size,1, attention_vec_size)
end_dist = tf.reduce_sum(v * tf.tanh(background_features + hop_2_features), 2)
end_dist = tf.nn.softmax(util.mask_softmax(self._enc_padding_mask, end_dist))
self.end_dist.append(end_dist)
with tf.variable_scope('train_loss'):
if hps.mode == 'train':
# Calculate the loss
self.gen_mode_work_num = tf.cast(tf.count_nonzero(self._dec_padding_mask), tf.float32)
self.switch_work_num = tf.cast(tf.count_nonzero(self._dec_switch_mask), tf.float32)
self.ref_mode_work_num = tf.cast(tf.count_nonzero(self._bac_start_batch), tf.float32) + tf.cast(tf.count_nonzero(self._bac_end_batch), tf.float32)
with tf.variable_scope('switch_loss'):
switch_gen_matrix = tf.reshape(tf.transpose(tf.convert_to_tensor(self.switch_gen_time_step), perm=[1, 0, 2]), [hps.batch_size, hps.max_dec_steps])
switch_ref_matrix = tf.reshape(tf.transpose(tf.convert_to_tensor(self.switch_ref_time_step), perm=[1, 0, 2]), [hps.batch_size, hps.max_dec_steps])
switch_ref_loss = - tf.reduce_sum(self._switch_batch * tf.log(switch_ref_matrix + 1e-10) * self._dec_switch_mask)
switch_gen_loss = - tf.reduce_sum((1 - self._switch_batch) * tf.log(switch_gen_matrix + 1e-10) * self._dec_switch_mask)
self.switch_loss = (switch_ref_loss + switch_gen_loss) / self.switch_work_num
with tf.variable_scope('generation_loss'):
word_level_dists = tf.convert_to_tensor(self.word_level_dists)
word_level_dists = tf.transpose(word_level_dists, perm=[1, 0, 2]) # batch * decoder_max_len * (vocab_size + OOV_size)
word_level_outputs_one_hot = tf.one_hot(self._target_batch, vsize + self._max_art_oovs)
word_level_crossent = - tf.reduce_sum(word_level_outputs_one_hot * tf.log(word_level_dists + 1e-10),-1)
self.generation_loss = tf.reduce_sum(word_level_crossent * self._dec_padding_mask) / self.gen_mode_work_num
with tf.variable_scope('reference_loss'):
start_dist_matrix = tf.transpose(tf.convert_to_tensor(self.start_dist), perm=[1, 0, 2]) # batch * max_dec_steps * max_encode_length
end_dist_matrix = tf.transpose(tf.convert_to_tensor(self.end_dist), perm=[1, 0, 2]) # batch * max_dec_steps * max_encode_length
start_label = tf.expand_dims(self._bac_start_batch, 1) # batch * 1* max_encode_length
end_label = tf.expand_dims(self._bac_end_batch, 1) # batch * 1* max_encode_length
start_loss_all_step = - tf.reduce_sum(start_label * tf.log(start_dist_matrix + 1e-10), -1)
end_losss_all_step = - tf.reduce_sum(end_label * tf.log(end_dist_matrix + 1e-10), -1) # batch * max_dec_steps
start_loss = tf.reduce_sum(start_loss_all_step * self._switch_batch)
end_loss = tf.reduce_sum(end_losss_all_step * self._switch_batch)
switch_adhere_loss = - tf.reduce_sum(self._switch_batch * tf.log(switch_ref_matrix + 1e-10) * self._dec_switch_mask)
self.reference_loss = (start_loss + end_loss + switch_adhere_loss) / self.ref_mode_work_num
with tf.variable_scope('total_loss'):
self.total_loss = self.switch_loss + self.generation_loss + self.reference_loss
tf.summary.scalar('total_loss', self.total_loss)
with tf.variable_scope('inference'):
if hps.mode in ['val','test']:
# We run inference greed search mode one decoder step or multi decoder steps at a time
# generation mode
infer_word_level_dists = self.word_level_dists[0]
self.word_probs, self.word_ids = tf.nn.top_k(infer_word_level_dists, 1) # take the 1 #1*1
#reference mode
outer = tf.matmul(tf.expand_dims(self.start_dist[0], 2), tf.expand_dims(self.end_dist[0], axis=1)) # shape(batch * bac_len * bac_len)
outer = tf.matrix_band_part(outer, 0, hps.max_span_len)
self.start_prob, self.start_index = tf.nn.top_k(tf.reduce_max(outer, 2), 1) # shape(batch*l_start)=> batch*1
self.end_prob, self.end_index = tf.nn.top_k(tf.reduce_max(outer, 1), 1) # shape(batch*l_end)=> batch*1
#switcher
self.infer_switch_ref = self.switch_ref_time_step[0] # 1*1
self.infer_switch_gen = self.switch_gen_time_step[0] # 1*1
self.infer_switch_gen_pred = self.switch_gen_pred_time_step[0] # 1*1
self.infer_switch_gen_copy = self.switch_gen_copy_time_step[0] # 1*1
self.infer_attn_dists = self.attn_dists[0]
def _add_train_op(self):
"""Sets self._train_op, the op to run for training."""
# Take gradients of the trainable variables w.r.t. the loss function to minimize
hps = self._hps
self._lr = tf.Variable(hps.lr, trainable=False, name='learning_rate')
loss_to_minimize = self.total_loss
tvars = tf.trainable_variables()
gradients = tf.gradients(loss_to_minimize, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE)
# Clip the gradients
grads, _ = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)
optimizer = tf.train.AdamOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step, name='train_step')
def build_graph(self):
tf.logging.info('Building graph...')
t0 = time.time()
self._add_placeholders()
self._add_seq2seq()
self.global_step = tf.Variable(0, name='global_step', trainable=False)
if self._hps.mode == 'train':
self._add_train_op()
self._summaries = tf.summary.merge_all()
t1 = time.time()
tf.logging.info('Time to build graph: %i seconds', t1 - t0)
def run_train_step(self, sess, batch):
"""Runs one training iteration. Returns a dictionary containing train op, summaries, loss, global_step ."""
feed_dict = self._make_feed_dict(batch)
to_return = {
'train_op': self._train_op,
'summaries': self._summaries,
'total_loss': self.total_loss,
'switch_loss': self.switch_loss,
'generation_loss': self.generation_loss,
'reference_loss': self.reference_loss,
'global_step': self.global_step,
}
return sess.run(to_return, feed_dict)
def run_encoder(self, sess, batch):
feed_dict = self._make_feed_dict(batch, just_enc=True) # feed the batch into the placeholders
(enc_batch, bac_states, que_states, dec_in_state, global_step) = sess.run([self._enc_batch, self._background_final_state, self._que_states, self._dec_in_state, self.global_step], feed_dict) # run the encoder
dec_in_state = tf.contrib.rnn.LSTMStateTuple(dec_in_state.c, dec_in_state.h)
return enc_batch, bac_states, que_states, dec_in_state
def inference_step(self, sess, batch, latest_tokens, bac_states, que_states, dec_init_states):
new_dec_in_state = tf.contrib.rnn.LSTMStateTuple(dec_init_states.c, dec_init_states.h)
feed = {
self._background_final_state: bac_states,
self._que_states: que_states,
self._enc_padding_mask: batch.bac_enc_padding_mask,
self._que_padding_mask: batch.con_enc_padding_mask,
self._enc_batch_extend_vocab: batch.bac_enc_batch_extend_vocab,
self._max_art_oovs: batch.max_bac_oovs,
self._dec_in_state: new_dec_in_state,
self._dec_batch: np.array(latest_tokens)
}
to_return = {
"word_ids": self.word_ids,
"word_probs": self.word_probs,
"start_index": self.start_index,
"end_index": self.end_index,
"start_prob": self.start_prob,
"end_prob": self.end_prob,
"switch_ref": self.infer_switch_ref,
"switch_gen": self.infer_switch_gen,
"switch_gen_pred": self.infer_switch_gen_pred,
"switch_gen_copy": self.infer_switch_gen_copy,
"attn_dists": self.infer_attn_dists,
"states": self._dec_out_state
}
results = sess.run(to_return, feed_dict=feed) # infer step
# Convert results['states'] (a single LSTMStateTuple) into a list of LSTMStateTuple -- one for each hypothesis
word_ids = results['word_ids'][0].tolist()[0]
word_probs = results['word_probs'][0].tolist()[0]
span_ids = [results['start_index'].tolist()[0][0], results['end_index'].tolist()[0][0]]
span_probs = [results['start_prob'].tolist()[0][0], results['end_prob'].tolist()[0][0]]
switch_ref_prob = results['switch_ref'][0].tolist()[0]
switch_gen_prob = results['switch_gen'][0].tolist()[0]
switch_gen_pred_prob = results['switch_gen_pred'][0].tolist()[0]
switch_gen_copy_prob = results['switch_gen_copy'][0].tolist()[0]
attn_dists = results['attn_dists'][0].tolist()
new_states = tf.contrib.rnn.LSTMStateTuple(results['states'].c, results['states'].h)
return word_ids, word_probs, span_ids, span_probs, switch_ref_prob, switch_gen_prob, switch_gen_pred_prob, switch_gen_copy_prob, attn_dists, new_states
| 32,058 | 68.092672 | 223 | py |
RefNet | RefNet-master/data.py | import glob
import random
import struct
import copy
from tensorflow.core.example import example_pb2
PAD_TOKEN = '[PAD]' # This has a vocab id, which is used to pad the encoder input, decoder input and target sequence
UNKNOWN_TOKEN = '[UNK]' # This has a vocab id, which is used to represent out-of-vocabulary words
START_DECODING = '[START]' # This has a vocab id, which is used at the start of every decoder input sequence
STOP_DECODING = '[STOP]' # This has a vocab id, which is used at the end of untruncated target sequences
# Note: none of [PAD], [UNK], [START], [STOP] should appear in the vocab file.
class Vocab:
"""Vocabulary class for mapping between words and ids (integers) """
def __init__(self, vocab_file, max_size):
"""Creates a vocab of up to max_size words, reading from the vocab_file. If max_size is 0, reads the entire vocab file.
Args:
vocab_file: path to the vocab file, which is assumed to contain "<word> <frequency>" on each line, sorted with most frequent word first. This code doesn't actually use the frequencies, though.
max_size: integer. The maximum size of the resulting Vocabulary."""
self._word_to_id = {}
self._id_to_word = {}
self._count = 0 # keeps track of total number of words in the Vocab
# [PAD],[UNK], [START] and [STOP] get the ids 0,1,2,3.
for w in [PAD_TOKEN, UNKNOWN_TOKEN, START_DECODING, STOP_DECODING]:
self._word_to_id[w] = self._count
self._id_to_word[self._count] = w
self._count += 1
# Read the vocab file and add words up to max_size
with open(vocab_file, 'r', encoding="utf-8") as vocab_f:
for line in vocab_f:
pieces = line.split(" ")
if len(pieces) != 2:
print('Warning: incorrectly formatted line in vocabulary file: %s\n' % line)
continue
w = pieces[0]
if w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:
raise Exception('[UNK], [PAD], [START] and [STOP] shouldn\'t be in the vocab file, but %s is' % w)
if w in self._word_to_id:
raise Exception('Duplicated word in vocabulary file: %s' % w)
self._word_to_id[w] = self._count
self._id_to_word[self._count] = w
self._count += 1
if max_size != 0 and self._count >= max_size:
print("max_size of vocab was specified as %i; we now have %i words. Stopping reading." % (max_size, self._count))
break
print("Finished constructing vocabulary of %i total words. Last word added: %s" % (self._count, self._id_to_word[self._count-1]))
def word2id(self, word):
"""
Returns the id (integer) of a word (string). Returns [UNK] id if word is OOV."""
if word not in self._word_to_id:
return self._word_to_id[UNKNOWN_TOKEN]
return self._word_to_id[word]
def id2word(self, word_id):
"""
Returns the word (string) corresponding to an id (integer)."""
if word_id not in self._id_to_word:
raise ValueError('Id not found in vocab: %d' % word_id)
return self._id_to_word[word_id]
def size(self):
"""Returns the total size of the vocabulary"""
return self._count
def example_generator(data_path, single_pass):
"""Generates tf.Examples from data files.
Binary data format: <length><blob>. <length> represents the byte size of <blob>.
<blob> is serialized tf.Example proto .
The tf.Example contains the tokenized article text and summary.
Args:
data_path:
Path to tf.Example data files. Can include wildcards( *), e.g. if you have several training data chunk files train_001.bin, train_002.bin, etc, then pass data_path=train_* to access them all.
single_pass:
Boolean. If True, go through the dataset exactly once, 只
generating examples in the order they appear, then return. Otherwise, generate random examples indefinitely
Yields:
Deserialized tf.Example .
"""
epoch = 0
while True:
filelist = glob.glob(data_path)
assert filelist, ('Error: Empty filelist at %s' % data_path)
if single_pass:
filelist = sorted(filelist)
else:
random.shuffle(filelist)
for f in filelist:
reader = open(f, 'rb')
while True:
len_bytes = reader.read(8)
if not len_bytes:
break # finished reading this file
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]
yield example_pb2.Example.FromString(example_str)
if single_pass:
print("example_generator completed reading all datafiles. No more data.")
break
epoch += 1
def background2ids(background_token, vocab):
"""Map the article words to their ids. Also return a list of OOVs in the article.
Args:
background_token: list of words (strings)
vocab: Vocabulary object
Returns:
ids:
A list of word ids (integers); OOVs are represented by their temporary article OOV number. If the vocabulary size is 50k and the article has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002.
oovs:
A list of the OOV words in the article (strings), in the order corresponding to their temporary article OOV numbers. """
ids = []
oovs = []
unk_id = vocab.word2id(UNKNOWN_TOKEN)
for w in background_token:
i = vocab.word2id(w)
if i == unk_id:
if w not in oovs:
oovs.append(w)
oov_num = oovs.index(w) # This is 0 for the first article OOV, 1 for the second article OOV...
ids.append(vocab.size() + oov_num) # This is e.g. 50000 for the first article OOV, 50001 for the second...
else:
ids.append(i)
return ids, oovs
def context2ids(context_token, vocab):
"""Map the context words to their ids. Also return a list of OOVs in the context
Args:
context_token: list of words (strings)
vocab: Vocabulary object
Returns:
ids:
A list of word ids (integers); OOVs are represented by their temporary query OOV number. If the vocabulary size is 50k and the article has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002.
oovs:
A list of the OOV words in the article (strings), in the order corresponding to their temporary article OOV numbers."""
ids = []
oovs = []
unk_id = vocab.word2id(UNKNOWN_TOKEN)
for w in context_token:
i = vocab.word2id(w)
if i == unk_id: # If w is OOV
if w not in oovs: # Add to list of OOVs
oovs.append(w)
oov_num = oovs.index(w) # This is 0 for the first article OOV, 1 for the second article OOV...
ids.append(vocab.size() + oov_num) # This is e.g. 50000 for the first article OOV, 50001 for the second...
else:
ids.append(i)
return ids, oovs
def response2ids(response_token , vocab, background_oovs):
"""Map the abstract words to their ids. In-article OOVs are mapped to their temporary OOV numbers.
Args:
response_token: list of words (strings)
vocab: Vocabulary object
background_oovs: list of in-article OOV words (strings), in the order corresponding to their temporary article OOV numbers
Returns:
ids: List of ids (integers). In-article OOV words are mapped to their temporary OOV numbers. Out-of-article OOV words are mapped to the UNK token id."""
ids = []
unk_id = vocab.word2id(UNKNOWN_TOKEN)
for w in response_token:
i = vocab.word2id(w)
if i == unk_id: # If w is an OOV word
if w in background_oovs: # If w is an in-article OOV
vocab_idx = vocab.size() + background_oovs.index(w) # Map to its temporary article OOV number
ids.append(vocab_idx)
else: # If w is an out-of-article OOV
ids.append(unk_id)
else:
ids.append(i)
return ids
def outputids2words(id_list, vocab, background_oovs, backgrounds_token): # response in our case
"""Maps output ids to words, including mapping in-article OOVs from their temporary ids to the original OOV string.
Args:
id_list: list of ids (integers)
vocab: Vocabulary object
background_oovs: list of OOV words (strings) in the order corresponding to their temporary article OOV ids
Returns:
words: list of words (strings)
"""
words = []
highlights = []
spans =[]
for i in id_list:
if isinstance(i, list):
words = words + backgrounds_token[i[0]:(i[1]+1)]
spans = spans + backgrounds_token[i[0]:(i[1]+1)]
highlights = highlights + ["<start<"] + backgrounds_token[i[0]:(i[1]+1)] + [">end>"]
else:
try:
w = vocab.id2word(i)
except ValueError as e:
assert background_oovs is not None, "Error: model produced a word ID that isn't in the vocabulary. This should not happen in baseline (no pointer-generator) mode"
background_oov_idx = i - vocab.size()
try:
w = background_oovs[background_oov_idx]
except IndexError as e: # i doesn't correspond to an article oov
raise ValueError('Error: model produced word ID %i which corresponds to article OOV %i but this example only has %i article OOVs' % (i, background_oov_idx, len(background_oovs)))
words.append(w)
highlights.append(w)
return words, highlights,spans
def show_background_span(background_token, b_start, b_end):
modify_background_token = copy.copy(background_token)
modify_background_token.insert(b_start, "<start<")
modify_background_token.insert(b_end+2, ">end>")
out_str = ' '.join(modify_background_token)
return out_str
def show_response_span(response_token, r_start, r_end):
modify_response_token = copy.copy(response_token)
modify_response_token.insert(r_start, "<start<")
modify_response_token.insert(r_end+2, ">end>")
out_str = ' '.join(modify_response_token)
return out_str
def show_background_oovs(background_text, vocab):
"""Returns the article string, highlighting the OOVs by placing __underscores__ around them"""
unk_token = vocab.word2id(UNKNOWN_TOKEN)
words = background_text.split(' ')
words = [("__%s__" % w) if vocab.word2id(w)==unk_token else w for w in words]
out_str = ' '.join(words)
return out_str
def show_context_oovs(context_text, vocab):
"""Returns the query string, highlighting the OOVs by placing __underscores__ around them"""
unk_token = vocab.word2id(UNKNOWN_TOKEN)
words = context_text.split(' ')
words = [("__%s__" % w) if vocab.word2id(w)==unk_token else w for w in words]
out_str = ' '.join(words)
return out_str
def show_response_oovs(response_text, vocab, background_oovs):
"""Returns the abstract string, highlighting the article OOVs with __underscores__.
If a list of article_oovs is provided, non-article OOVs are differentiated like !!__this__!!.
Args:
response_text: string
vocab: Vocabulary object
background_oovs: list of words (strings)
"""
unk_token = vocab.word2id(UNKNOWN_TOKEN)
words = response_text.split(' ')
new_words = []
for w in words:
if vocab.word2id(w) == unk_token: # w is oov
if background_oovs is None: # baseline mode
new_words.append("__%s__" % w)
else: # pointer-generator mode
if w in background_oovs:
new_words.append("__%s__" % w)
else:
new_words.append("!!__%s__!!" % w)
else: # w is in-vocab word
new_words.append(w)
out_str = ' '.join(new_words)
return out_str
| 10,802 | 36.380623 | 213 | py |
RefNet | RefNet-master/hybrid_decoder.py | import tensorflow as tf
import util
def hybrid_decoder(decoder_inputs, initial_state, encoder_states, enc_padding_mask, query_states, que_padding_mask, cell, initial_state_attention=False):
with tf.variable_scope("attention_decoder"):
batch_size = encoder_states.get_shape()[0].value # batch_size if this line fails, it's because the batch size isn't defined
attn_size = encoder_states.get_shape()[2].value # 2*hz if this line fails, it's because the attention length isn't defined
q_attn_size = query_states.get_shape()[2].value # 2*hz
# Reshape encoder_states (need to insert a dim)
encoder_states = tf.expand_dims(encoder_states, 2) # now is shape (batch_size, attn_len, 1, attn_size)
query_states = tf.expand_dims(query_states, 2)
# To calculate attention, we calculate
# v^T tanh(W_h h_i + W_s s_t + b_attn)
# where h_i is an encoder state, and s_t a decoder state.
# attn_vec_size is the length of the vectors v, b_attn, (W_h h_i) and (W_s s_t).
# We set it to be equal to the size of the encoder states.
attention_vec_size = attn_size
q_attention_vec_size = q_attn_size
# Get the weight matrix W_h and apply it to each encoder state to get (W_h h_i), the encoder features
W_h = tf.get_variable("W_h", [1, 1, attn_size, attention_vec_size])
encoder_features = tf.nn.conv2d(encoder_states, W_h, [1, 1, 1, 1],"SAME") # shape (batch_size,attn_length,1,attention_vec_size)
# Get the weight vectors v
v = tf.get_variable("v", [attention_vec_size])
# Get the weight matrix W_q and apply it to each encoder state to get (W_q q_i), the query features
W_q = tf.get_variable("W_q", [1, 1, q_attn_size, q_attention_vec_size])
query_features = tf.nn.conv2d(query_states, W_q, [1, 1, 1, 1],"SAME") # shape (batch_size,q_attn_length,1,q_attention_vec_size)
# Get the weight vectors v_q
v_q = tf.get_variable("v_q", [q_attention_vec_size])
def background_attention(decoder_state):
with tf.variable_scope("background_attention"):
# Pass the decoder state through a linear layer (this is W_s s_t + b_attn in the paper) pass through
decoder_features = util.linear(decoder_state, attention_vec_size, True) # shape (batch_size, attention_vec_size)
decoder_features = tf.expand_dims(tf.expand_dims(decoder_features, 1), 1) # reshape to (batch_size, 1, 1, attention_vec_size)
def masked_background_attention(e):
"""Take softmax of e then apply enc_padding_mask"""
attn_dist = tf.nn.softmax(util.mask_softmax(enc_padding_mask, e)) # take softmax. shape (batch_size, attn_length)
return attn_dist
# Calculate v^T tanh(W_h h_i + W_s s_t + b_attn)
e = tf.reduce_sum(v * tf.tanh(encoder_features + decoder_features), [2, 3]) # calculate e
# Calculate attention distribution
attn_dist = masked_background_attention(e) # batch_size,attn_length
# Calculate the context vector from attn_dist and encoder_states
context_vector = tf.reduce_sum(tf.reshape(attn_dist, [batch_size, -1, 1, 1]) * encoder_states, [1, 2])
context_vector = tf.reshape(context_vector, [-1, attn_size])
return context_vector, attn_dist
def context_attention(decoder_state):
with tf.variable_scope("context_attention"):
# Pass the decoder state through a linear layer (this is W_s s_t + b_attn in the paper)
decoder_features = util.linear(decoder_state, q_attention_vec_size, True) # shape (batch_size, q_attention_vec_size)
decoder_features = tf.expand_dims(tf.expand_dims(decoder_features, 1),1) # reshape to (batch_size, 1, 1, attention_vec_size)
def masked_context_attention(e):
"""Take softmax of e then apply enc_padding_mask"""
attn_dist = tf.nn.softmax(util.mask_softmax(que_padding_mask, e)) # take softmax. shape (batch_size, attn_length)
return attn_dist
# Calculate v^T tanh(W_q q_i + W_s s_t + b_attn)
f = tf.reduce_sum(v_q * tf.tanh(query_features + decoder_features), [2, 3])
# Calculate attention distribution
q_attn_dist = masked_context_attention(f)
# Calculate the context vector from attn_dist and encoder_states
q_context_vector = tf.reduce_sum(tf.reshape(q_attn_dist, [batch_size, -1, 1, 1]) * query_states, [1, 2]) # shape (batch_size, attn_size).
q_context_vector = tf.reshape(q_context_vector, [-1, q_attn_size])
return q_context_vector, q_attn_dist
outputs = []
background_attn_dists = []
switcher_gen_pred_time_step = []
switcher_gen_copy_time_step = []
switcher_ref_time_step = []
switcher_gen_time_step = []
state = initial_state
context_vector = tf.zeros([batch_size, attn_size])
context_vector.set_shape([None, attn_size])
q_context_vector = tf.zeros([batch_size, q_attn_size])
q_context_vector.set_shape([None, q_attn_size])
if initial_state_attention: # true in decode mode
context_vector, _ = background_attention(initial_state)
q_context_vector, _ = context_attention(initial_state)
for i, inp in enumerate(decoder_inputs):
tf.logging.info("Adding hybrid_decoder timestep %i of %i", i + 1, len(decoder_inputs))
if i > 0:
tf.get_variable_scope().reuse_variables()
# Merge input and previous attentions into one vector x of the same size as inp
input_size = inp.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from input: %s" % inp.name)
x = util.linear([inp] + [context_vector] + [q_context_vector], input_size, True)
# Run the decoder RNN cell. cell_output = decoder state
cell_output, state = cell(x, state)
# Run the attention mechanism.
if i == 0 and initial_state_attention: # always true in decode mode
with tf.variable_scope(tf.get_variable_scope(), reuse=True): # you need this because you've already run the initial attention(...) call
context_vector, attn_dist = background_attention(state)
with tf.variable_scope(tf.get_variable_scope(), reuse=True): # you need this because you've already run the initial attention(...) call
q_context_vector, q_attn_dist = context_attention(state)
else:
context_vector, attn_dist = background_attention(state)
q_context_vector, q_attn_dist = context_attention(state)
background_attn_dists.append(attn_dist)
# Calculate switcher
with tf.variable_scope('calculate_switcher'):
switcher_matrix = util.linear([context_vector, q_context_vector, state.c, state.h, x], 3, True)
switcher_matrix = tf.nn.softmax(switcher_matrix)
switcher_gen_pred_prob = tf.expand_dims(switcher_matrix[:, 0], 1) # batch*1
switcher_gen_copy_prob = tf.expand_dims(switcher_matrix[:, 1], 1) # batch*1
switcher_gen_prob = switcher_gen_pred_prob + switcher_gen_copy_prob # batch*1
switcher_ref_prob = tf.expand_dims(switcher_matrix[:, 2], 1) # batch*1
switcher_gen_pred_time_step.append(switcher_gen_pred_prob)
switcher_gen_copy_time_step.append(switcher_gen_copy_prob)
switcher_gen_time_step.append(switcher_gen_prob)
switcher_ref_time_step.append(switcher_ref_prob)
with tf.variable_scope("AttnOutputProjection"):
output = util.linear([cell_output] + [context_vector] + [q_context_vector], cell.output_size, True)
outputs.append(output)
return outputs, state, background_attn_dists, switcher_ref_time_step, switcher_gen_time_step, switcher_gen_pred_time_step, switcher_gen_copy_time_step
| 8,361 | 55.884354 | 158 | py |
RefNet | RefNet-master/run.py | import time
import os
import tensorflow as tf
import numpy as np
from collections import namedtuple
from data import Vocab
from batcher import Batcher
from model import Model
from inference import Inference
import util
import yaml
import json
import time
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('config_file', 'config.yaml', 'pass the config_file through command line if new expt')
config = yaml.load(open(FLAGS.config_file, 'r'))
os.environ["CUDA_VISIBLE_DEVICES"] = config['gpu_device_id']
tf.app.flags.DEFINE_string('mode', config['mode'], 'must be one of train/test')
tf.app.flags.DEFINE_string('train_path', config['train_path'], 'Default path to the chunked files')
tf.app.flags.DEFINE_string('dev_path', config['dev_path'], 'Default path to the chunked files')
tf.app.flags.DEFINE_string('test_path', config['test_path'], 'Default path to the chunked files')
tf.app.flags.DEFINE_string('vocab_path', config['vocab_path'], 'Path expression to text vocabulary file.')
tf.app.flags.DEFINE_string('appoint_test', None, 'appoint a model to test')
# storage
tf.app.flags.DEFINE_string('log_root', config['log_root'], 'Root directory for all logging.')
tf.app.flags.DEFINE_string('exp_name', config['exp_name'],'Name for experiment. Logs will be saved in a directory with this name, under log_root.')
# Hyperparameters
tf.app.flags.DEFINE_integer('epoch_num', config['epoch_num'], 'the max num of train epoch num')
tf.app.flags.DEFINE_integer('hidden_dim', config['hidden_dim'], 'dimension of RNN hidden states')
tf.app.flags.DEFINE_integer('emb_dim', config['emb_dim'], 'dimension of word embeddings')
tf.app.flags.DEFINE_integer('batch_size', config['batch_size'], 'minibatch size')
tf.app.flags.DEFINE_integer('max_bac_enc_steps', config['max_bac_enc_steps'], 'max timesteps of encoder (max source text tokens)')
tf.app.flags.DEFINE_integer('max_con_enc_steps', config['max_con_enc_steps'],'max timesteps of query encoder (max source query tokens)')
tf.app.flags.DEFINE_integer('max_dec_steps', config['max_dec_steps'], 'max timesteps of decoder (max summary tokens)')
tf.app.flags.DEFINE_integer('vocab_size', config['vocab_size'],'Size of vocabulary. These will be read from the vocabulary file in order. If the vocabulary file contains fewer words than this number, or if this number is set to 0, will take all words in the vocabulary file.')
tf.app.flags.DEFINE_float('lr', config['lr'], 'learning rate')
tf.app.flags.DEFINE_float('rand_unif_init_mag', config['rand_unif_init_mag'], 'magnitude for lstm cells random uniform inititalization')
tf.app.flags.DEFINE_float('trunc_norm_init_std', config['trunc_norm_init_std'], 'std of trunc norm init, used for initializing everything else')
tf.app.flags.DEFINE_float('max_grad_norm', config['max_grad_norm'], 'for gradient clipping')
tf.app.flags.DEFINE_integer('max_span_len', config['max_span_len'], 'the max length of predicted span')
tf.app.flags.DEFINE_string('multi_hop_span_pre_mode', config['multi_hop_span_pre_mode'], 'the mode of muilti_hop_span prediction.[rnn|mlp]')
tf.app.flags.DEFINE_bool('multi_label_eval', config['multi_label_eval'], 'do multi_label_evalation for testset, only for test')
tf.app.flags.DEFINE_bool('matching_layer', config['matching_layer'], 'whether use matching layer or not ')
tf.app.flags.DEFINE_bool('matching_gate', config['matching_gate'], 'whether use gate in matching layer')
def train(model, batcher):
train_dir = os.path.join(FLAGS.log_root, "train")
if not os.path.exists(train_dir):
os.makedirs(train_dir)
print("Preparing or waiting for session...")
model.build_graph()
saver = tf.train.Saver(max_to_keep=FLAGS.epoch_num)
print("Created session.")
sess = tf.Session(config=util.get_config())
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(train_dir, sess.graph)
resume = True
if resume:
print("Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(train_dir)
if ckpt and ckpt.model_checkpoint_path:
last_global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
saver.restore(sess, ckpt.model_checkpoint_path)
print('Loading success, last_global_step is %s' % last_global_step)
else:
print('No checkpoint file found')
print("starting run_training")
train_set_num = 34486 # The number of examples in training set
log_epoch = {}
steps_for_one_epoch = int(train_set_num / FLAGS.batch_size)
while True:
batch = batcher.next_batch()
print('Training: %s' % FLAGS.exp_name)
t0 = time.time()
results = model.run_train_step(sess, batch)
t1 = time.time()
total_loss = results['total_loss']
switch_loss = results['switch_loss']
generation_loss = results['generation_loss']
reference_loss = results['reference_loss']
global_step = results['global_step']
epoch = int(global_step/steps_for_one_epoch)
if global_step % steps_for_one_epoch == 0:
log_epoch[epoch] = int(global_step)
saver.save(sess, os.path.join(train_dir, "model.ckpt"), global_step=global_step)
if epoch == FLAGS.epoch_num:
print("Compele %d epochs, train is finished" % FLAGS.epoch_num)
file = open(os.path.join(train_dir, "log_epoch.json"), 'w', encoding='utf-8')
json.dump(log_epoch, file)
file.close()
break
print('Epoch:%d, Step:%d, Train Loss: %f, Switch_Loss: %f, Generation_Loss: %f, Reference_Loss: %f, Seconds: %.3f' % (epoch, global_step, total_loss, switch_loss, generation_loss, reference_loss, t1 - t0))
if not np.isfinite(total_loss):
raise Exception("Loss is not finite. Stopping.")
summaries = results['summaries']
summary_writer.add_summary(summaries, global_step) # write the summaries
if global_step % 100 == 0: # flush the summary writer every so often
summary_writer.flush()
sess.close()
def main(unused_argv):
global config
if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly
raise Exception("Problem with flags: %s" % unused_argv)
print('Starting %s in %s mode...' % (FLAGS.exp_name, FLAGS.mode))
FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
if not os.path.exists(FLAGS.log_root):
if FLAGS.mode == "train":
os.makedirs(FLAGS.log_root)
else:
raise Exception("Logdir %s doesn't exist. Run in train mode to create it." % (FLAGS.log_root))
hparam_list = ['mode', 'lr', 'rand_unif_init_mag', 'trunc_norm_init_std', 'max_grad_norm','hidden_dim', 'emb_dim', 'batch_size', 'max_dec_steps','max_bac_enc_steps', 'max_con_enc_steps', 'max_span_len', 'multi_hop_span_pre_mode', 'matching_layer', 'matching_gate']
hps_dict = {}
for key, val in FLAGS.__flags.items():
if key in hparam_list:
hps_dict[key] = val
hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)
vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)
tf.set_random_seed(111)
if hps.mode == 'train':
batcher = Batcher(FLAGS.train_path, vocab, hps, single_pass=False)
print("creating model...")
model = Model(hps, vocab)
train(model, batcher)
elif hps.mode == 'val':
train_dir = os.path.join(FLAGS.log_root, "train")
hps = hps._replace(batch_size=1)
infer_model_hps = hps._replace(max_dec_steps=1)
try:
r = open(os.path.join(train_dir, "finished_val_models.json"), 'r', encoding='utf-8')
finished_val_models = json.load(r)
r.close()
except FileNotFoundError:
finished_val_models = {"finished_val_models": []}
w = open(os.path.join(train_dir, "finished_val_models.json"), 'w', encoding='utf-8')
json.dump(finished_val_models, w)
w.close()
while True:
ckpt = tf.train.get_checkpoint_state(train_dir)
if ckpt and ckpt.model_checkpoint_path:
for ckpt_path in list(ckpt.all_model_checkpoint_paths):
if ckpt_path in finished_val_models["finished_val_models"]:
print("Val_mode: %s already has been evaluated, skip it" % ckpt_path)
pass
else:
print("Val_mode: start new eval %s" % ckpt_path)
batcher = Batcher(FLAGS.dev_path, vocab, hps, single_pass=True)
model = Model(infer_model_hps, vocab)
val_infer = Inference(model, batcher, vocab, ckpt_path)
val_infer.infer()
tf.reset_default_graph()
finished_val_models["finished_val_models"].append(ckpt_path)
w = open(os.path.join(train_dir, "finished_val_models.json"), 'w', encoding='utf-8')
json.dump(finished_val_models, w)
w.close()
print("Val_mode: finished one eval %s" % ckpt_path)
print("Val_mode: current iterations of all_model_checkpoint_paths are completed...")
print("Val_mode: finished %d modes" % len(finished_val_models["finished_val_models"]))
if len(finished_val_models["finished_val_models"]) == FLAGS.epoch_num:
print("All val is ended")
break
else:
print('Val_mode: wait train finish the first epoch...')
time.sleep(60)
elif hps.mode == 'test':
hps = hps._replace(batch_size=1)
batcher = Batcher(FLAGS.test_path, vocab, hps, single_pass=True)
infer_model_hps = hps._replace(max_dec_steps=1)
model = Model(infer_model_hps, vocab)
if FLAGS.test_model_dir is None:
raise Exception("should appoint the test_model_dir")
test_infer = Inference(model, batcher, vocab, FLAGS.test_model_dir)
test_infer.infer()
else:
raise ValueError("The 'mode' flag must be one of train/val/test")
if __name__ == '__main__':
tf.app.run()
| 10,254 | 48.302885 | 276 | py |
RefNet | RefNet-master/util.py | import tensorflow as tf
import time
import os
FLAGS = tf.app.flags.FLAGS
def get_config():
"""Returns config for tf.session"""
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
return config
def mask_softmax(seq_mask, scores):
seq_mask = tf.cast(seq_mask, tf.bool)
score_mask_values = -1e10 * tf.ones_like(scores, dtype=tf.float32)
return tf.where(seq_mask, scores, score_mask_values)
def load_ckpt(saver, sess, ckpt_dir="train"):
while True:
try:
ckpt_dir = os.path.join(FLAGS.log_root, ckpt_dir)
ckpt_state = tf.train.get_checkpoint_state(
ckpt_dir)
print('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
saver.restore(sess, ckpt_state.model_checkpoint_path)
return ckpt_state.model_checkpoint_path
except:
print("Failed to load checkpoint from %s. Sleeping for %i secs...", ckpt_dir, 10)
time.sleep(10)
def linear(args, output_size, bias, bias_start=0.0, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (isinstance(args, (
list,
tuple)) and not args): # isinstance(args, (list, tuple))
raise ValueError("`args` must be specified")
if not isinstance(args, (list, tuple)):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
# Now the computation.
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix",
[total_arg_size, output_size])
if len(args) == 1:
res = tf.matmul(args[0], matrix)
else:
res = tf.matmul(tf.concat(args, 1), matrix)
if not bias:
return res
bias_term = tf.get_variable("Bias", [output_size], initializer=tf.constant_initializer(bias_start))
return res + bias_term
def dense(inputs, hidden, use_bias=True, scope="dense"):
with tf.variable_scope(scope):
shape = tf.shape(inputs)
dim = inputs.get_shape().as_list()[-1]
out_shape = [shape[idx] for idx in range(
len(inputs.get_shape().as_list()) - 1)] + [hidden]
flat_inputs = tf.reshape(inputs, [-1, dim])
W = tf.get_variable("W", [dim, hidden])
res = tf.matmul(flat_inputs, W)
if use_bias:
b = tf.get_variable(
"b", [hidden], initializer=tf.constant_initializer(0.))
res = tf.nn.bias_add(res, b)
res = tf.reshape(res, out_shape)
return res | 3,441 | 34.122449 | 103 | py |
RefNet | RefNet-master/metrics/f1.py | """ Official evaluation script for v1.1 of the SQuAD dataset. """
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(prediction, ground_truths):
f1 = exact_match = total = 0
assert len(prediction) == len(ground_truths),"the length of predicted span and ground_truths span should be same"
for i, pre in enumerate(prediction):
if len(pre) == 0:
continue # if the length of the span predicted is 0, we skip it.
exact_match += metric_max_over_ground_truths(exact_match_score, pre, ground_truths[i])
f1 += metric_max_over_ground_truths(f1_score, pre, ground_truths[i])
total += 1
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return f1, exact_match, total
| 2,259 | 31.285714 | 117 | py |
RefNet | RefNet-master/metrics/bleu.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BLEU metric implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import subprocess
import tempfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
def moses_multi_bleu(hypotheses, references, lowercase=False):
#print('This was called')
"""Calculate the bleu score for hypotheses and references
using the MOSES ulti-bleu.perl script.
Args:
hypotheses: A numpy array of strings where each string is a single example.
references: A numpy array of strings where each string is a single example.
lowercase: If true, pass the "-lc" flag to the multi-bleu script
Returns:
The BLEU score as a float32 value.
"""
if np.size(hypotheses) == 0:
return np.float32(0.0)
# Get MOSES multi-bleu script
"""
try:
multi_bleu_path, _ = urllib.request.urlretrieve(
"https://raw.githubusercontent.com/moses-smt/mosesdecoder/"
"master/scripts/generic/multi-bleu.perl")
os.chmod(multi_bleu_path, 0o755)
except: #pylint: disable=W0702
tf.logging.info("Unable to fetch multi-bleu.perl script, using local.")
metrics_dir = os.path.dirname(os.path.realpath(__file__))
bin_dir = os.path.abspath(os.path.join(metrics_dir, "..", "..", "bin"))
multi_bleu_path = os.path.join(bin_dir, "tools/multi-bleu.perl")
"""
multi_bleu_path="metrics/multi-bleu.perl"
os.chmod(multi_bleu_path, 0o755)
# Dump hypotheses and references to tempfiles
hypothesis_file = tempfile.NamedTemporaryFile()
hypothesis_file.write("\n".join(hypotheses).encode("utf-8"))
hypothesis_file.write(b"\n")
hypothesis_file.flush()
reference_file = tempfile.NamedTemporaryFile()
reference_file.write("\n".join(references).encode("utf-8"))
reference_file.write(b"\n")
reference_file.flush()
# Calculate BLEU using multi-bleu script
with open(hypothesis_file.name, "r") as read_pred:
bleu_cmd = [multi_bleu_path]
if lowercase:
bleu_cmd += ["-lc"]
bleu_cmd += [reference_file.name]
try:
bleu_out = subprocess.check_output(
bleu_cmd, stdin=read_pred, stderr=subprocess.STDOUT)
bleu_out = bleu_out.decode("utf-8")
bleu_score = re.search(r"BLEU = (.+?),", bleu_out).group(1)
bleu_score = float(bleu_score)
except subprocess.CalledProcessError as error:
if error.output is not None:
tf.logging.warning("multi-bleu.perl script returned non-zero exit code")
tf.logging.warning(error.output)
bleu_score = np.float32(0.0)
# Close temp files
hypothesis_file.close()
reference_file.close()
return np.float32(bleu_score) | 3,320 | 32.887755 | 80 | py |
RefNet | RefNet-master/metrics/rouge.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ROUGe metric implementation.
This is a modified and slightly extended verison of
https://github.com/miso-belica/sumy/blob/dev/sumy/evaluation/rouge.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import numpy as np
#pylint: disable=C0103
def _get_ngrams(n, text):
"""Calcualtes n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _split_into_words(sentences):
"""Splits multiple sentences into words and flattens the result"""
return list(itertools.chain(*[_.split(" ") for _ in sentences]))
def _get_word_ngrams(n, sentences):
"""Calculates word n-grams for multiple sentences.
"""
assert len(sentences) > 0
assert n > 0
words = _split_into_words(sentences)
return _get_ngrams(n, words)
def _len_lcs(x, y):
"""
Returns the length of the Longest Common Subsequence between sequences x
and y.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""
Computes the length of the longest common subsequence (lcs) between two
strings. The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _recon_lcs(x, y):
"""
Returns the Longest Subsequence between x and y.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns:
sequence: LCS of x and y
"""
i, j = len(x), len(y)
table = _lcs(x, y)
def _recon(i, j):
"""private recon calculation"""
if i == 0 or j == 0:
return []
elif x[i - 1] == y[j - 1]:
return _recon(i - 1, j - 1) + [(x[i - 1], i)]
elif table[i - 1, j] > table[i, j - 1]:
return _recon(i - 1, j)
else:
return _recon(i, j - 1)
recon_tuple = tuple(map(lambda x: x[0], _recon(i, j)))
return recon_tuple
def rouge_n(evaluated_sentences, reference_sentences, n=2):
"""
Computes ROUGE-N of two text collections of sentences.
Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/
papers/rouge-working-note-v1.3.1.pdf
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentences: The sentences from the referene set
n: Size of ngram. Defaults to 2.
Returns:
A tuple (f1, precision, recall) for ROUGE-N
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences)
reference_ngrams = _get_word_ngrams(n, reference_sentences)
reference_count = len(reference_ngrams)
evaluated_count = len(evaluated_ngrams)
# Gets the overlapping ngrams between evaluated and reference
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if evaluated_count == 0:
precision = 0.0
else:
precision = overlapping_count / evaluated_count
if reference_count == 0:
recall = 0.0
else:
recall = overlapping_count / reference_count
f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))
# return overlapping_count / reference_count
return f1_score, precision, recall
def _f_p_r_lcs(llcs, m, n):
"""
Computes the LCS-based F-measure score
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta**2)) * r_lcs * p_lcs
denom = r_lcs + ((beta**2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs, p_lcs, r_lcs
def rouge_l_sentence_level(evaluated_sentences, reference_sentences):
"""
Computes ROUGE-L (sentence level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentences: The sentences from the referene set
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
reference_words = _split_into_words(reference_sentences)
evaluated_words = _split_into_words(evaluated_sentences)
m = len(reference_words)
n = len(evaluated_words)
lcs = _len_lcs(evaluated_words, reference_words)
return _f_p_r_lcs(lcs, m, n)
def _union_lcs(evaluated_sentences, reference_sentence):
"""
Returns LCS_u(r_i, C) which is the LCS score of the union longest common
subsequence between reference sentence ri and candidate summary C. For example
if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8 and
c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1 is
“w1 w2” and the longest common subsequence of r_i and c2 is “w1 w3 w5”. The
union longest common subsequence of r_i, c1, and c2 is “w1 w2 w3 w5” and
LCS_u(r_i, C) = 4/5.
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
float: LCS_u(r_i, C)
ValueError:
Raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
lcs_union = set()
reference_words = _split_into_words([reference_sentence])
combined_lcs_length = 0
for eval_s in evaluated_sentences:
evaluated_words = _split_into_words([eval_s])
lcs = set(_recon_lcs(reference_words, evaluated_words))
combined_lcs_length += len(lcs)
lcs_union = lcs_union.union(lcs)
union_lcs_count = len(lcs_union)
union_lcs_value = union_lcs_count / combined_lcs_length
return union_lcs_value
def rouge_l_summary_level(evaluated_sentences, reference_sentences):
"""
Computes ROUGE-L (summary level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m
P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
SUM(i,u) = SUM from i through u
u = number of sentences in reference summary
C = Candidate summary made up of v sentences
m = number of words in reference summary
n = number of words in candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
# total number of words in reference sentences
m = len(_split_into_words(reference_sentences))
# total number of words in evaluated sentences
n = len(_split_into_words(evaluated_sentences))
union_lcs_sum_across_all_references = 0
for ref_s in reference_sentences:
union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,
ref_s)
return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)
def rouge(hypotheses, references):
"""Calculates average rouge scores for a list of hypotheses and
references"""
# Filter out hyps that are of 0 length
# hyps_and_refs = zip(hypotheses, references)
# hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]
# hypotheses, references = zip(*hyps_and_refs)
# Calculate ROUGE-1 F1, precision, recall scores
rouge_1 = [
rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)
]
rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1))
# Calculate ROUGE-2 F1, precision, recall scores
rouge_2 = [
rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references)
]
rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2))
# Calculate ROUGE-L F1, precision, recall scores
rouge_l = [
rouge_l_sentence_level([hyp], [ref])
for hyp, ref in zip(hypotheses, references)
]
rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l))
return {
"rouge_1/f_score": rouge_1_f,
"rouge_1/r_score": rouge_1_r,
"rouge_1/p_score": rouge_1_p,
"rouge_2/f_score": rouge_2_f,
"rouge_2/r_score": rouge_2_r,
"rouge_2/p_score": rouge_2_p,
"rouge_l/f_score": rouge_l_f,
"rouge_l/r_score": rouge_l_r,
"rouge_l/p_score": rouge_l_p,
} | 10,971 | 29.060274 | 80 | py |
RefNet | RefNet-master/data/preprocress.py | import os
import json
import struct
import collections
from tensorflow.core.example import example_pb2
import re
import spacy
nlp = spacy.load('en', disable=['tagger', 'ner'], vectors=False)
print('Spacy loaded')
def get_tokens(doc):
doc = nlp(doc)
new_tokens = []
for k in doc:
new_tokens.append(k.text)
return new_tokens
# We use these to separate the summary sentences in the .bin datafiles
CHUNK_SIZE = 1000
def chunk_file(chunks_dir, finished_files_dir, set_name):
in_file = finished_files_dir + '/%s.bin' % set_name
reader = open(in_file, "rb")
chunk = 0
finished = False
while not finished:
chunk_fname = os.path.join(chunks_dir, '%s_%03d.bin' % (set_name, chunk)) # new chunk
with open(chunk_fname, 'wb') as writer:
for _ in range(CHUNK_SIZE):
len_bytes = reader.read(8)
if not len_bytes:
finished = True
break
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]
writer.write(struct.pack('q', str_len))
writer.write(struct.pack('%ds' % str_len, example_str))
chunk += 1
def chunk_all(chunks_dir, finished_files_dir):
# Make a dir to hold the chunks
if not os.path.isdir(chunks_dir):
os.mkdir(chunks_dir)
# Chunk the data
for set_name in ['train', 'val', 'test']:
print("Splitting %s data into chunks..." % set_name)
chunk_file(chunks_dir, finished_files_dir, set_name)
print("Saved chunked data in %s" % chunks_dir)
def write_to_bin(url_file, out_file, finished_files_dir, makevocab=False):
url_list = url_file
VOCAB_SIZE = 25000
if makevocab:
vocab_counter = collections.Counter()
with open(out_file, 'wb') as writer:
for idx, s in enumerate(url_list):
if idx % 1000 == 0:
print("Writing story %i percent done" % idx)
background, context, response, span, b_start, b_end, r_start, r_end, example_id = get_art_abs(s)
# Write to tf.Example
tf_example = example_pb2.Example()
tf_example.features.feature['background'].bytes_list.value.extend([background.encode()])
tf_example.features.feature['context'].bytes_list.value.extend([context.encode()])
tf_example.features.feature['response'].bytes_list.value.extend([response.encode()])
tf_example.features.feature['span'].bytes_list.value.extend([span.encode()])
tf_example.features.feature['b_start'].bytes_list.value.extend([b_start.encode()])
tf_example.features.feature['b_end'].bytes_list.value.extend([b_end.encode()])
tf_example.features.feature['r_start'].bytes_list.value.extend([r_start.encode()])
tf_example.features.feature['r_end'].bytes_list.value.extend([r_end.encode()])
tf_example.features.feature['example_id'].bytes_list.value.extend([example_id.encode()])
tf_example_str = tf_example.SerializeToString()
str_len = len(tf_example_str)
writer.write(struct.pack('q', str_len))
writer.write(struct.pack('%ds' % str_len, tf_example_str))
# Write the vocab to file, if applicable
if makevocab:
art_tokens = background.split(' ')
abs_tokens = response.split(' ')
que_tokens = context.split(' ')
tokens = art_tokens + abs_tokens + que_tokens
tokens = [t.strip() for t in tokens]
tokens = [t for t in tokens if t != ""]
vocab_counter.update(tokens)
print("Finished writing file %s\n" % out_file)
# write vocab to file
if makevocab:
print("Writing vocab file...")
with open(os.path.join(finished_files_dir, "vocab"), 'w', encoding='utf-8') as writer:
for word, count in vocab_counter.most_common(VOCAB_SIZE):
writer.write(word + ' ' + str(count) + '\n')
print("Finished writing vocab file")
def get_art_abs(story_file):
background = str(story_file['background'])
context = str(story_file['context'])
response = str(story_file['response'])
span = str(story_file['span'])
b_start = str(story_file['b_start'])
b_end = str(story_file['b_end'])
r_start = str(story_file['r_start'])
r_end = str(story_file['r_end'])
example_id = str(story_file['example_id'])
re.sub("\s+", " ", background)
re.sub("\s+", " ", context)
re.sub("\s+", " ", response)
re.sub("\s+", " ", span)
return background, context, response, span, b_start, b_end, r_start, r_end, example_id
def process_tokens(st):
return " ".join(st)
def convert_idx(text, tokens):
current = 0
spans = []
for token in tokens:
current = text.find(token, current)
if current < 0:
print("Token {} cannot be found".format(token))
raise Exception()
spans.append((current, current + len(token)))
current += len(token)
return spans
def RefNet(data, query_type, data_type, start_type):
folder_name = data_type + '_' + query_type
if not os.path.exists(folder_name):
os.makedirs(folder_name) #
finished_files_dir = folder_name + "/finished_files"
chunks_dir = os.path.join(finished_files_dir, "chunked")
train_data = []
valid_data = []
test_data = []
background_span_error = 0
background_span_error1 = 0
response_span_error = 0
for k, type_data in enumerate(data):
for count, i in enumerate(type_data):
if count % 1000 == 0:
print(count)
background = i[data_type].lower()
context = i[query_type].lower()
response = i['response'].lower()
span = i['span'].lower().strip()
background_span_char_start = background.find(span)
response_spans_char_start = response.find(span)
span_lenth = len(span)
background_span_char_end = span_lenth + background_span_char_start
response_spans_char_end = span_lenth + response_spans_char_start
if (background_span_char_start != i[start_type]) and (i[start_type]!=None):
background_span_error = background_span_error + 1
print("No.{}, The matching answer_start is different with the author label".format(background_span_error))
print("The author label:", i[start_type])
print("The matching number:", background_span_char_start)
print(i['example_id'])
print("background: ", background)
print("context: ", context)
print("response: ", response)
print("span: ", span)
background_span_char_start = i[start_type]
background_span_char_end = span_lenth + background_span_char_start
print("Modify according to the the author label" % background_span_char_start)
background_token = get_tokens(background)
context_token = get_tokens(context)
response_token = get_tokens(response)
span_token = get_tokens(span)
background_refine_text = process_tokens(background_token)
context_token_refine_text = process_tokens(context_token)
response_token_refine_text = process_tokens(response_token)
span_token_refine_text = process_tokens(span_token)
background_token = background_refine_text.split()
context_token = context_token_refine_text.split()
response_token = response_token_refine_text.split()
span_token = span_token_refine_text.split()
background_spans = convert_idx(background, background_token)
response_spans = convert_idx(response, response_token)
background_span = []
for idx, b_span in enumerate(background_spans):
if not (background_span_char_end <= b_span[0] or background_span_char_start >= b_span[1]):
background_span.append(idx)
b_start, b_end = background_span[0], background_span[-1]
response_span = []
for idx, r_span in enumerate(response_spans):
if not (response_spans_char_end <= r_span[0] or response_spans_char_start >= r_span[1]):
response_span.append(idx)
r_start, r_end = response_span[0], response_span[-1]
if response_token[r_start:(r_end + 1)] != span_token:
response_span_error = response_span_error + 1
print("No.{}, The span extracted from response is different from the span labeled by author".format(response_span_error))
print("The author label:", span_token)
print("The span extracted from response:", response_token[r_start:(r_end + 1)])
print(i['example_id'])
print("background: ", background)
print("context: ", context)
print("response: ", response)
print("span: ", span)
if background_token[b_start:(b_end + 1)] != span_token:
background_span_error1 = background_span_error1 + 1
print("No.{}, The span extracted from background is different from the span labeled by author".format(background_span_error1))
print("The author label:", span_token)
print("The span extracted from response:", background_token[b_start:(b_end + 1)])
print(i['example_id'])
print("background: ", background)
print("context: ", context)
print("response: ", response)
print("span: ", span)
example = {'background': process_tokens(background_token),
'context': process_tokens(context_token),
'response': process_tokens(response_token),
'span': process_tokens(span_token),
'b_start': b_start,
'b_end': b_end,
'r_start': r_start,
'r_end': r_end,
'example_id': i['example_id']
}
if k == 0:
train_data.append(example) # [{train_example1},{train_example2}...]
elif k == 1:
valid_data.append(example)
else:
test_data.append(example)
all_train_urls = train_data
all_val_urls = valid_data
all_test_urls = test_data
if not os.path.exists(finished_files_dir):
os.makedirs(finished_files_dir)
write_to_bin(all_test_urls, os.path.join(finished_files_dir, "test.bin"), finished_files_dir)
write_to_bin(all_val_urls, os.path.join(finished_files_dir, "val.bin"), finished_files_dir)
write_to_bin(all_train_urls, os.path.join(finished_files_dir, "train.bin"), finished_files_dir,makevocab=True)
# Chunk the data.
# This splits each of train.bin, val.bin and test.bin into smaller chunks, each containing e.g. 1000 examples, and saves them in finished_files/chunks
chunk_all(chunks_dir, finished_files_dir)
print(len(train_data))
print(len(valid_data))
print(len(test_data))
train_data = json.load(open('train_data.json', 'r'))
test_data = json.load(open('test_data.json', 'r'))
valid_data = json.load(open('dev_data.json', 'r'))
data = [train_data, valid_data, test_data]
RefNet(data, 'context', 'mixed', "answer_start_mixed")
| 11,726 | 40.438163 | 154 | py |
RefNet | RefNet-master/data/preprocress_multi_ref.py | import os
import json
import struct
import collections
import re
import spacy
nlp = spacy.load('en', disable=['tagger', 'ner'], vectors=False)
print('Spacy loaded')
def get_tokens(doc): # 分词用
doc = nlp(doc)
new_tokens = []
for k in doc:
new_tokens.append(k.text)
return new_tokens
def process_tokens(st):
return " ".join(st)
multi_ref = json.load(open('multi_reference_test.json', 'r', encoding='utf-8'))
for example_id, value in multi_ref.items():
temp_res = []
for response in value["responses"]:
response = response.lower()
modified_response = process_tokens(get_tokens(response))
temp_res.append(modified_response)
value["responses"] = temp_res
temp_span = []
for span in value["spans"]:
if isinstance(span, int):
span = str(span)
try:
span = span.lower()
modified_span = process_tokens(get_tokens(span))
temp_span.append(modified_span)
except TypeError as e:
print(value["spans"])
print(span)
value["spans"] = temp_span
with open(os.path.join("modified_multi_reference_test.json"), 'w', encoding='utf-8') as w:
json.dump(multi_ref, w)
| 1,227 | 24.061224 | 90 | py |
coling2016-pcrf-seq2seq | coling2016-pcrf-seq2seq-master/src/makeAlign-seg_complex.py | #! /usr/bin/python
import sys
try:
WINDOW = int(sys.argv[1])
MY_N = int(sys.argv[2])
except IndexError:
WINDOW = 6
MY_N = 6
def createNgramTemplate(integer,mystring,n,window,stringIndex):
counter = 0
m = len(mystring)
features = []
joinSym=""
for i in xrange(-window,window-n+2,1):
#print "s%d:"%(counter) + "/".join(["%%x[%d,%d]"%(i+j,stringIndex) for j in xrange(n)])
f = []
for j in xrange(n):
if stringIndex+i+j>=0 and stringIndex+i+j<m:
f.append( mystring[stringIndex+i+j] )
else:
f.append( "-" )
features.append( "feat_%d_%d=%s"%(n,integer+counter,joinSym.join(f)) )
counter += 1
return features,integer+counter
def printForCRF(splittedStrngX,splittedStrngY=None):
#strAsList = [x for x in str]
window = WINDOW # NUMBER MUST MATCH WITH NUMBER IN makeSeg.py in the decoder
N = MY_N # NUMBER MUST MATCH WITH NUMBER IN makeSeg.py in the decoder
for i in xrange(len(splittedStrngX)):
globalCounter = 0
allFeatures = []
for n in xrange(1,N+1,1):
features,globalCounter = createNgramTemplate(globalCounter,splittedStrngX,n,window,i)
globalCounter = 0
allFeatures += features
#print n,features
try:
if splittedStrngY!=None:
print "%s\t%s\t%s"%(splittedStrngX[i].encode("utf-8"),splittedStrngY[i].encode("utf-8"),"#".join(allFeatures).encode("utf-8"))
else:
print "%s\t%s"%(splittedStrngX[i].encode("utf-8"),"#".join(allFeatures).encode("utf-8"))
except IndexError:
sys.stderr.write("ERROR:%s\t%s\n"%(" ".join(splittedStrngX)," ".join(splittedStrngY))) #b,str
sys.exit(1)
if __name__ == "__main__":
splitSym="-"
splitSym="|"
coding = "utf-8"
#coding = "latin-1"
i = 0
for line in sys.stdin.readlines():
if i%100==0:
sys.stderr.write("\r>>%d"%i)
#if i>20000: break
try:
line = line.decode(coding)
except UnicodeDecodeError:
sys.stderr.write("FAULTY LINE (%d):|%s|\n"%(i,line))
sys.exit(1)
try:
x,y = line.strip().split("\t")
except ValueError:
x = line.strip()
if x.strip()=="": continue
printForCRF(x.split(splitSym),y.split(splitSym))
print
i+=1
| 2,231 | 27.987013 | 138 | py |
coling2016-pcrf-seq2seq | coling2016-pcrf-seq2seq-master/src/extractStrings.py | #! /usr/bin/python
import sys
chars = []
preds = []
EMPTY="EMPTY"
for line in sys.stdin:
line = line.strip()
if line=="":
print "%s\t%s"%(" ".join(chars)," ".join(preds))
chars = []
preds = []
else:
x = line.split("\t")
char = x[1]
pred = x[5]
#if pred[-1] in ["0","1","2"]: pred = pred[:-1]
if char!=EMPTY:
chars.append(char)
if pred!=EMPTY:
preds.append(pred)
| 448 | 17.708333 | 56 | py |
coling2016-pcrf-seq2seq | coling2016-pcrf-seq2seq-master/src/removeLast.py | #! /usr/bin/python
import sys
for line in sys.stdin:
line = line.strip()
a,b = line.split()
print "%s\t%s"%(a[:-1],b[:-1])
| 131 | 13.666667 | 32 | py |
coling2016-pcrf-seq2seq | coling2016-pcrf-seq2seq-master/src/makeSeg_complex.py | #! /usr/bin/python
import sys
try:
WINDOW = int(sys.argv[1])
MY_N = int(sys.argv[2])
except IndexError:
WINDOW = 6
MY_N = 6
def createNgramTemplate(integer,mystring,n,window,stringIndex):
counter = 0
m = len(mystring)
features = []
for i in xrange(-window,window-n+2,1):
#print "s%d:"%(counter) + "/".join(["%%x[%d,%d]"%(i+j,stringIndex) for j in xrange(n)])
f = []
for j in xrange(n):
if stringIndex+i+j>=0 and stringIndex+i+j<m:
f.append( mystring[stringIndex+i+j] )
else:
f.append( "-" )
features.append( "feat_%d_%d=%s"%(n,integer+counter,"".join(f)) )
counter += 1
return features,integer+counter
def printForCRF(splittedStrng):
str = "".join(splittedStrng)
#b = splittedString2Binary(splittedStrng)
#print splittedStrng,b
strAsList = [x for x in str]
strAsList = splittedStrng
window = WINDOW
N = MY_N
#globalCounter = 0
for i in xrange(len(strAsList)):
globalCounter = 0
allFeatures = []
for n in xrange(1,N+1,1):
features,globalCounter = createNgramTemplate(globalCounter,strAsList,n,window,i)
globalCounter = 0
allFeatures += features
#print n,features
try:
print "%s\t%s\t%s"%(strAsList[i].encode("utf-8"),strAsList[i].encode("utf-8"),"#".join(allFeatures).encode("utf-8"))
except IndexError:
print b,str
sys.exit(1)
if __name__ == "__main__":
splitSym="-"
splitSym=" "
encoding = "utf-8"
#encoding = "latin1"
for line in sys.stdin.readlines():
line = line.decode(encoding)
try:
x,y = line.strip().split("\t")
except ValueError:
x = line.strip()
if x.strip()=="": continue
printForCRF(x.split(splitSym))
print
| 1,770 | 24.3 | 126 | py |
articles | articles-master/Classifying Processes Instances Using Recurrent Neural Networks/testframework/main.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 25 11:59:43 2017
Test framework sources used to perform the tests required by paper: "Classifying Processes Instances Using Recurrent Neural Networks"
by Markku Hinkka, Teemu Lehto, Keijo Heljanko and Alexander Jung
"""
import lasagne
from lasagne.layers import *
import numpy as np
import theano as theano
import theano.tensor as T
import time
import operator
from utils import load_traces, generate_traces, draw_charts, configure
from model import Model, trace_registry
import matplotlib.pyplot as plt
configure(output_path = "C:/Users/User/Dropbox/Aalto/testing/testruns/")
filePath = "D:/dev/aalto/papers/nn-predictions/src/"
trace_registry["bpic14_dur"] = lambda trace_length_modifier: load_traces("bpic14_dur", filePath + "rabobank.csv", lambda row: row[1] == "1", trace_length_modifier, 40000)
trace_registry["bpic14_rfi"] = lambda trace_length_modifier: load_traces("bpic14_rfi", filePath + "rabobank.csv", lambda row: row[2] == "request for information", trace_length_modifier, 40000)
trace_registry["bpic12_dur"] = lambda trace_length_modifier: load_traces("bpic12_dur", filePath + "BPIC12.csv", lambda row: row[1] == "1", trace_length_modifier)
trace_registry["bpic13_dur"] = lambda trace_length_modifier: load_traces("bpic13_dur", filePath + "BPIC13.csv", lambda row: row[1] == "1", trace_length_modifier)
trace_registry["bpic17_dur"] = lambda trace_length_modifier: load_traces("bpic17_dur", filePath + "BPIC17.csv", lambda row: row[1] == "1", trace_length_modifier)
trace_registry["hospital_dur"] = lambda trace_length_modifier: load_traces("hospital_dur", filePath + "HospitalLog.csv", lambda row: row[1] == "1", trace_length_modifier)
results = []
random_seed = 123
def test_dataset(dataset_name):
def test_algorithm(algorithm):
global results
global random_seed
case_name = "test"
num_layers = 1
optimizer = "adam"
learning_rate = 0.01
batch_size = 256
num_callbacks = 50
hidden_dim_size = 32
num_iterations_between_reports = 100000
grad_clipping = 100
predict_only_outcome = True
final_trace_only = True
trace_length_modifier = 1.0
truncate_unknowns = False
max_num_words = 50
results = Model(
case_name = case_name,
dataset_name = dataset_name,
algorithm = algorithm,
num_layers = num_layers,
optimizer = optimizer,
learning_rate = learning_rate,
batch_size = batch_size,
num_callbacks = num_callbacks,
hidden_dim_size = hidden_dim_size,
num_iterations_between_reports = num_iterations_between_reports,
grad_clipping = grad_clipping,
predict_only_outcome = predict_only_outcome,
final_trace_only = final_trace_only,
trace_length_modifier = trace_length_modifier,
max_num_words = max_num_words,
truncate_unknowns = truncate_unknowns,
rng = np.random.RandomState(random_seed))
results = Model(
case_name = case_name,
dataset_name = dataset_name,
algorithm = algorithm,
num_layers = num_layers,
optimizer = optimizer,
learning_rate = learning_rate,
batch_size = batch_size,
num_callbacks = num_callbacks,
hidden_dim_size = hidden_dim_size,
num_iterations_between_reports = num_iterations_between_reports,
grad_clipping = grad_clipping,
predict_only_outcome = predict_only_outcome,
final_trace_only = final_trace_only,
trace_length_modifier = 0.5,
max_num_words = max_num_words,
truncate_unknowns = truncate_unknowns,
rng = np.random.RandomState(random_seed))
results = Model(
case_name = case_name,
dataset_name = dataset_name,
algorithm = algorithm,
num_layers = num_layers,
optimizer = optimizer,
learning_rate = learning_rate,
batch_size = batch_size,
num_callbacks = num_callbacks,
hidden_dim_size = hidden_dim_size,
num_iterations_between_reports = num_iterations_between_reports,
grad_clipping = grad_clipping,
predict_only_outcome = predict_only_outcome,
final_trace_only = False,
trace_length_modifier = trace_length_modifier,
max_num_words = max_num_words,
truncate_unknowns = truncate_unknowns,
rng = np.random.RandomState(random_seed))
results = Model(
case_name = case_name,
dataset_name = dataset_name,
algorithm = algorithm,
num_layers = num_layers,
optimizer = optimizer,
learning_rate = learning_rate,
batch_size = batch_size,
num_callbacks = num_callbacks,
hidden_dim_size = hidden_dim_size,
num_iterations_between_reports = num_iterations_between_reports,
grad_clipping = grad_clipping,
predict_only_outcome = False,
final_trace_only = final_trace_only,
trace_length_modifier = trace_length_modifier,
max_num_words = max_num_words,
truncate_unknowns = truncate_unknowns,
rng = np.random.RandomState(random_seed))
results = Model(
case_name = case_name,
dataset_name = dataset_name,
algorithm = algorithm,
num_layers = num_layers,
optimizer = optimizer,
learning_rate = learning_rate,
batch_size = batch_size,
num_callbacks = num_callbacks,
hidden_dim_size = hidden_dim_size,
num_iterations_between_reports = num_iterations_between_reports,
grad_clipping = grad_clipping,
predict_only_outcome = False,
final_trace_only = False,
trace_length_modifier = trace_length_modifier,
max_num_words = max_num_words,
truncate_unknowns = truncate_unknowns,
rng = np.random.RandomState(random_seed))
results = Model(
case_name = case_name,
dataset_name = dataset_name,
algorithm = algorithm,
num_layers = num_layers,
optimizer = optimizer,
learning_rate = learning_rate,
batch_size = batch_size,
num_callbacks = num_callbacks,
hidden_dim_size = 16,
num_iterations_between_reports = num_iterations_between_reports,
grad_clipping = grad_clipping,
predict_only_outcome = predict_only_outcome,
final_trace_only = final_trace_only,
trace_length_modifier = trace_length_modifier,
max_num_words = max_num_words,
truncate_unknowns = truncate_unknowns,
rng = np.random.RandomState(random_seed))
test_algorithm("gru")
test_algorithm("lstm")
test_dataset("bpic14_dur")
test_dataset("bpic14_rfi")
test_dataset("bpic12_dur")
test_dataset("bpic13_dur")
test_dataset("bpic17_dur")
test_dataset("hospital_dur")
| 7,759 | 44.647059 | 192 | py |
articles | articles-master/Classifying Processes Instances Using Recurrent Neural Networks/testframework/utils.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 25 11:59:43 2017
Test framework sources used to perform the tests required by paper: "Classifying Processes Instances Using Recurrent Neural Networks"
by Markku Hinkka, Teemu Lehto, Keijo Heljanko and Alexander Jung
"""
import csv
import numpy as np
import time
import sys
import operator
import io
import array
from datetime import datetime
import matplotlib.pyplot as plt
import math
UNKNOWN_TOKEN = "UNKNOWN"
IN_SELECTION_TOKEN = "SELECTED"
NOT_IN_SELECTION_TOKEN = "NOT_SELECTED"
class TraceData:
traceId = ""
isSelected = False
activities = []
# sentence = ""
def __init__(self, var1, var2, var3, trace_length_modifier):
self.traceId = var1
self.isSelected = var2
self.pathString = var3
self.trace_length_modifier = trace_length_modifier
if ((not var3) or var3.isspace()):
self.fullActivities = np.asarray([])
else:
self.fullActivities = np.asarray([w.replace(" ", "_") for w in var3[2:-2].split("..")])
if (trace_length_modifier != 1.0):
self.activities = self.fullActivities[range(math.ceil(trace_length_modifier * len(self.fullActivities)))]
else:
self.activities = self.fullActivities
# self.instrumentedActivities = [SENTENCE_START_TOKEN]
self.instrumentedActivities = []
self.instrumentedActivities.extend(self.activities)
self.instrumentedActivities.append((IN_SELECTION_TOKEN if self.isSelected else NOT_IN_SELECTION_TOKEN))
# self.instrumentedActivities.append(SENTENCE_END_TOKEN)
self.sentence = "%s %s" % (" ".join(self.activities), (IN_SELECTION_TOKEN if self.isSelected else NOT_IN_SELECTION_TOKEN))
self.activitiesForPrediction = {}
def getActivitiesForPrediction(self, word_to_index, tracePercentage, truncateUnknowns, seqLength, vocabSize):
key = "%s_%s_%s_%s_%s" % (tracePercentage, self.trace_length_modifier, truncateUnknowns, seqLength, vocabSize)
if (not key in self.activitiesForPrediction):
activities = self.activities[range(math.ceil(tracePercentage * len(self.activities)))]
unknownId = word_to_index[UNKNOWN_TOKEN]
activities = [word_to_index[activity] if (activity in word_to_index) else unknownId for activity in activities]
if (truncateUnknowns):
origActivities = activities
activities = []
wasUnknown = False
for id in origActivities:
isUnknown = id == unknownId
if ((not isUnknown) or (not wasUnknown)):
activities.append(id)
wasUnknown = isUnknown
self.activitiesForPrediction[key] = activities
return self.activitiesForPrediction[key]
def getActivitiesForPredictionGRU(self, word_to_index):
return [word_to_index[activity] for activity in self.activities]
loaded_traces = {}
def load_traces(traceName, filename, selectionCallback, trace_length_modifier, datasetSize = None):
key = "%s_%s_%s" % (traceName, trace_length_modifier, datasetSize)
if (key in loaded_traces):
return loaded_traces[key]
word_to_index = []
index_to_word = []
traces = []
# Read the data and append SENTENCE_START and SENTENCE_END tokens
writeLog("Creating traces \"" + traceName + "\". Reading CSV file: " + filename)
with open(filename, 'rt', encoding="utf-8") as f:
reader = csv.reader(f, skipinitialspace=True, delimiter=';')
reader.__next__()
for row in reader:
traces.append(TraceData(row[0], selectionCallback(row), row[len(row) - 1], trace_length_modifier))
writeLog("Parsed %d traces." % (len(traces)))
traces = np.asarray(traces)
# sentences = []
# for trace in traces:
# sentences.append(trace.instrumentedActivities)
if (datasetSize != None):
traces = traces[:datasetSize]
loaded_traces[key] = traces
return traces #, Word2Vec(sentences, min_count=1)
def print_trace(s, index_to_word):
sentence_str = [index_to_word[x] for x in s[1:-1]]
writeLog(" ".join(sentence_str))
sys.stdout.flush()
def generate_trace(model, index_to_word, word_to_index, min_length=5):
# We start the sentence with the start token
new_sentence = []
# Repeat until we get an end token
selIndex = word_to_index[IN_SELECTION_TOKEN]
notSelIndex = word_to_index[NOT_IN_SELECTION_TOKEN]
while not ((len(new_sentence) > 0) and ((new_sentence[-1] == selIndex) or (new_sentence[-1] == notSelIndex))):
next_word_probs = model.predict(new_sentence)[-1]
samples = np.random.multinomial(1, next_word_probs)
sampled_word = np.argmax(samples)
new_sentence.append(sampled_word)
# Seomtimes we get stuck if the sentence becomes too long, e.g. "........" :(
# And: We don't want sentences with UNKNOWN_TOKEN's
if len(new_sentence) > 100 or sampled_word == word_to_index[UNKNOWN_TOKEN]:
return None
if len(new_sentence) < min_length:
return None
return new_sentence
def generate_traces(model, n, index_to_word, word_to_index):
for i in range(n):
sent = None
while not sent:
sent = generate_trace(model, index_to_word, word_to_index)
print_trace(sent, index_to_word)
def predict_outcome(model, test, word_to_index):
nextPrediction = model.predict(test)[-1]
selIndex = word_to_index[IN_SELECTION_TOKEN]
notSelIndex = word_to_index[NOT_IN_SELECTION_TOKEN]
selProb = nextPrediction[selIndex]
notSelProb = nextPrediction[notSelIndex]
return selProb >= notSelProb;
def get_filename(figure_type, name, file_type):
dtstr = datetime.now().replace(microsecond=0).isoformat().replace("-", "").replace(":", "")
return _output_path + figure_type + "-" + name + "-" + dtstr + "." + file_type
def draw_train_chart(results, name):
for i in range(len(results)):
result = results[i]
plt.plot(result.sr_examplesSeen, result.sr_trains, label = result.case_name)
plt.xlabel('iterations')
plt.ylabel('Success rate')
plt.title('Training set classification success rate - ' + name)
plt.legend()
plt.savefig(get_filename("train", name, "pdf"))
plt.show()
def draw_test_chart(results, name):
for i in range(len(results)):
result = results[i]
plt.plot(result.sr_examplesSeen, result.sr_tests, label = result.case_name)
plt.xlabel('iterations')
plt.ylabel('Success rate')
plt.title('Test set classification success rate - ' + name)
plt.legend()
plt.savefig(get_filename("test", name, "pdf"))
plt.show()
def draw_time_used_chart(results, name):
for i in range(len(results)):
result = results[i]
plt.plot(result.sr_examplesSeen, result.time_used, label = result.case_name)
plt.xlabel('iterations')
plt.ylabel('Seconds')
plt.title('Time used - ' + name)
plt.legend()
plt.savefig(get_filename("duration", name, "pdf"))
plt.show()
def draw_charts(results, name):
draw_train_chart(results, name)
draw_test_chart(results, name)
draw_time_used_chart(results, name)
with open(get_filename("final-results", name, "csv"), "w") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(["TestName", "Dataset", "DatasetSize", "Count", "TimeUsed", "SR_Train", "SR_Test", "AvgCost", "Optimizer", "HiddenSize", "NumTraces"])
for i in range(len(results)):
result = results[i]
result.write_csv(name, csvwriter)
_output_path = ""
_log_filename = ""
_results_filename = ""
def configure(output_path):
global _output_path
global _log_filename
global _results_filename
_output_path = output_path
_log_filename = get_filename("log", "", "txt")
_results_filename = get_filename("results", "", "csv")
with open(_results_filename, "a", newline="") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(["Time", "Status", "Name", "TestName", "Dataset", "DatasetSize", "Algorithm", "NumLayers", "HiddenDimSize", "Optimizer", "LearningRate", "SeqLength", "BatchSize", "GradClipping", "ItemsBetween", "TestIteration", "Iteration", "Epoch", "TimeUsed", "CumulTimeUsed", "TimeUsedForTest", "CumulTimeUsedForTest", "SR_Train", "SR_Test", "SR_Test75p", "SR_Test50p", "SR_Test25p", "AvgCost", "AUC", "TP", "TN", "FP", "FN", "AllConfusions", "PredictOnlyOutcome", "FinalTraceOnly", "TraceLengthMod", "FixedLength", "MaxNumActivities", "TruncateUnknowns"])
def writeLog(message):
message = datetime.now().replace(microsecond=0).isoformat() + " \t" + message
print(message)
with open(_log_filename, "a") as logfile:
logfile.write(message + "\n")
def writeResultRow(cells):
with open(_results_filename, "a", newline="") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(cells)
| 9,257 | 42.261682 | 570 | py |
articles | articles-master/Classifying Processes Instances Using Recurrent Neural Networks/testframework/model.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 25 11:59:43 2017
Test framework sources used to perform the tests required by paper: "Classifying Processes Instances Using Recurrent Neural Networks"
by Markku Hinkka, Teemu Lehto, Keijo Heljanko and Alexander Jung
"""
import lasagne
from lasagne.layers import *
import numpy as np
import theano as theano
import theano.tensor as T
import time
import operator
from utils import TraceData, load_traces, writeLog, writeResultRow
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn import metrics
import nltk
import itertools
TRAIN_SAMPLE_PERCENTAGE = 0.75
UNKNOWN_TOKEN = "UNKNOWN"
IN_SELECTION_TOKEN = "SELECTED"
NOT_IN_SELECTION_TOKEN = "NOT_SELECTED"
class Model:
def __init__(self, case_name, dataset_name, algorithm, num_layers,
optimizer, learning_rate, batch_size,
num_callbacks, hidden_dim_size, num_iterations_between_reports,
grad_clipping, predict_only_outcome, final_trace_only,
trace_length_modifier, max_num_words, truncate_unknowns, rng):
writeLog("Using data set: " + dataset_name)
self.algorithm = algorithm
self.num_layers = num_layers
self.optimizer = optimizer
self.learning_rate = learning_rate
self.batch_size = batch_size
self.num_callbacks = num_callbacks
self.traces = trace_registry[dataset_name](trace_length_modifier)
self.dataset_size = len(self.traces)
self.dataset_name = dataset_name
self.case_name = case_name
self.hidden_dim_size = hidden_dim_size
self.num_iterations_between_reports = num_iterations_between_reports
self.grad_clipping = grad_clipping
self.rng = rng
self.predict_only_outcome = predict_only_outcome
self.final_trace_only = final_trace_only
self.trace_length_modifier = trace_length_modifier
self.max_num_words = max_num_words
self.truncate_unknowns = truncate_unknowns
lasagne.random.set_rng(rng)
try:
self.createModel()
except:
writeLog("Exception: " + sys.exc_info()[0])
def gen_data(self, data, p, positions, batch_size, return_target=True):
'''
This function produces a semi-redundant batch of training samples from the location 'p' in the provided string (data).
For instance, assuming SEQ_LENGTH = 5 and p=0, the function would create batches of
5 characters of the string (starting from the 0th character and stepping by 1 for each semi-redundant batch)
as the input and the next character as the target.
To make this clear, let us look at a concrete example. Assume that SEQ_LENGTH = 5, p = 0 and BATCH_SIZE = 2
If the input string was "The quick brown fox jumps over the lazy dog.",
For the first data point,
x (the inputs to the neural network) would correspond to the encoding of 'T','h','e',' ','q'
y (the targets of the neural network) would be the encoding of 'u'
For the second point,
x (the inputs to the neural network) would correspond to the encoding of 'h','e',' ','q', 'u'
y (the targets of the neural network) would be the encoding of 'i'
The data points are then stacked (into a three-dimensional tensor of size (batch_size,SEQ_LENGTH,vocab_size))
and returned.
Notice that there is overlap of characters between the batches (hence the name, semi-redundant batch).
'''
data_size = len(positions)
x = np.zeros((batch_size, self.seq_length, self.vocab_size))
y = np.zeros(batch_size)
masks = []
for n in range(batch_size):
ptr = (p + n) % data_size
pos = positions[ptr]
dt = data[pos[0]]
for i in range(pos[1]):
x[n, i, self.word_to_index[dt[i]]] = 1.
masks.append([1 if x < pos[1] else 0 for x in range(self.seq_length)])
#! if(return_target):
#! y[n, self.word_to_index[dt[-1]] if self.predict_only_outcome else self.word_to_index[dt[pos[1] + 1]]] = 1
if(return_target):
y[n] = self.word_to_index[dt[-1]] if self.predict_only_outcome else self.word_to_index[dt[pos[1]]]
return x, np.array(y,dtype='int32'), np.asarray(masks)
def gen_prediction_data(self, traces, tracePercentage):
batches = []
masks = []
numTraces = len(traces)
if (numTraces == 0):
return np.asarray(batches), np.asarray(masks)
batchRow = 0
x = np.zeros((self.batch_size if (numTraces > self.batch_size) else numTraces, self.seq_length, self.vocab_size))
m = np.zeros((self.batch_size if (numTraces > self.batch_size) else numTraces, self.seq_length))
batches.append(x)
masks.append(m)
for traceRow in range(len(traces)):
trace = traces[traceRow]
traceData = trace.getActivitiesForPrediction(self.word_to_index, tracePercentage, self.truncate_unknowns, self.seq_length, self.vocab_size)
for i in range(len(traceData)):
x[batchRow, i, traceData[i]] = 1.
for i in range(self.seq_length):
m[batchRow, i] = 1 if i < len(traceData) else 0
batchRow += 1
if (batchRow >= self.batch_size):
x = np.zeros((self.batch_size if (numTraces - traceRow) > self.batch_size else (numTraces - traceRow - 1), self.seq_length, self.vocab_size))
m = np.zeros((self.batch_size if (numTraces - traceRow) > self.batch_size else (numTraces - traceRow - 1), self.seq_length))
batches.append(x)
masks.append(m)
batchRow = 0
return np.asarray(batches), np.asarray(masks)
def trainModel(self, callback):
data_size = len(self.positions_train)
writeLog("Training...")
p = 0
num_iterations = 0
num_iterations_after_report = 0
num_report_iterations = 1
avg_cost = 0;
# writeLog("It: " + str(data_size * self.num_epochs // self.batch_size))
try:
it = 0
while (num_report_iterations <= self.num_callbacks):
x, y, mask = self.gen_data(self.TS_train, p, self.positions_train, self.batch_size)
it += 1
p += self.batch_size
num_iterations += self.batch_size
num_iterations_after_report += self.batch_size
# if(p+self.batch_size+self.seq_length >= data_size):
# writeLog('Carriage Return')
# p = 0;
avg_cost += self.train(x, y, mask)
if (callback and num_iterations_after_report >= self.num_iterations_between_reports):
callback(num_iterations, it, avg_cost / it, num_report_iterations)
avg_cost = 0
num_iterations_after_report = num_iterations_after_report - self.num_iterations_between_reports
num_report_iterations = num_report_iterations + 1
# callback(num_iterations, it, avg_cost / it, num_report_iterations)
except KeyboardInterrupt:
pass
def initializeTraces(self):
word_to_index = []
index_to_word = []
TRAIN_SIZE = int(self.dataset_size * TRAIN_SAMPLE_PERCENTAGE)
TEST_SIZE = int(self.dataset_size * (1 - TRAIN_SAMPLE_PERCENTAGE))
indexes = self.rng.permutation(self.dataset_size)
# indexes = range(self.dataset_size)
self.traces_train = self.traces[indexes[:TRAIN_SIZE]]
self.traces_test = self.traces[indexes[TRAIN_SIZE:]]
# Tokenize the sentences into words
writeLog("Tokenizing %s sentences." % len(self.traces))
# tokenized_sentences = [nltk.word_tokenize(trace.sentence) for trace in traces]
tokenized_sentences_train = [nltk.WhitespaceTokenizer().tokenize(trace.sentence) for trace in self.traces_train]
tokenized_sentences = [nltk.WhitespaceTokenizer().tokenize(trace.sentence) for trace in self.traces]
# Count the word frequencies
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences_train))
writeLog("Found %d unique words tokens." % len(word_freq.items()))
# Get the most common words and build index_to_word and word_to_index vectors
vocab = sorted(word_freq.items(), key=lambda x: (x[1], x[0]), reverse=True)
writeLog("Using vocabulary size %d." % len(vocab))
writeLog("The least frequent word in our vocabulary is '%s' and appeared %d times." % (vocab[-1][0], vocab[-1][1]))
words = []
for x in vocab:
w = x[0]
if (w != IN_SELECTION_TOKEN and w != NOT_IN_SELECTION_TOKEN):
words.append(w)
words = np.asarray(words)
if ((self.max_num_words != None) and (self.max_num_words < len(words))):
words = words[range(self.max_num_words)]
writeLog("Vocabulary was truncated to %d most frequent words in training set." % len(words))
index_to_word = np.concatenate([[UNKNOWN_TOKEN, IN_SELECTION_TOKEN, NOT_IN_SELECTION_TOKEN], words])
word_to_index = dict([(w, i) for i, w in enumerate(index_to_word)])
self.seq_length = 0
# Replace all words not in our vocabulary with the unknown token
for i, sent in enumerate(tokenized_sentences):
ts = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
if (self.truncate_unknowns):
origts = ts
ts = []
wasUnknown = False
for w in origts:
isUnknown = w == UNKNOWN_TOKEN
if ((not isUnknown) or (not wasUnknown)):
ts.append(w)
wasUnknown = isUnknown
tokenized_sentences[i] = ts
l = len(tokenized_sentences[i])
if (l > self.seq_length):
self.seq_length = l
writeLog("Maximum sequence length is %d tokens." % (self.seq_length))
self.word_to_index = word_to_index
self.index_to_word = index_to_word
self.vocab_size = len(self.word_to_index);
tokenized_sentences = np.asarray(tokenized_sentences)
self.TS_train = tokenized_sentences[indexes[:TRAIN_SIZE]]
self.positions_train = []
if (self.final_trace_only):
for i, ts in enumerate(self.TS_train):
l = len(ts)
if l > 1:
self.positions_train.append([i, l - 1])
else:
for i, ts in enumerate(self.TS_train):
l = len(ts)
if l > 1:
for pos in range(l - 1):
self.positions_train.append([i, pos])
self.TS_test = tokenized_sentences[indexes[TRAIN_SIZE:]]
self.positions_test = []
for i, ts in enumerate(self.TS_test):
l = len(ts)
if l > 1:
for pos in range(l - 1):
self.positions_test.append([i, pos])
def createModel(self):
self.initializeTraces()
writeLog("Preparing " + str(self.num_layers) + " layers for algorithm: " + self.algorithm)
# First, we build the network, starting with an input layer
# Recurrent layers expect input of shape
# (batch size, SEQ_LENGTH, num_features)
mask_var = T.matrix('mask')
l_in = lasagne.layers.InputLayer(shape=(None, None, self.vocab_size))
l_mask = lasagne.layers.InputLayer((None, None), mask_var)
l_layers = [l_in]
# We now build the LSTM layer which takes l_in as the input layer
# We clip the gradients at GRAD_CLIP to prevent the problem of exploding gradients.
if (self.algorithm == "gru"):
layerCreatorFunc = lambda parentLayer, isFirstLayer, isLastLayer: lasagne.layers.GRULayer(
parentLayer, self.hidden_dim_size, grad_clipping=self.grad_clipping,
mask_input = l_mask if isFirstLayer else None,
only_return_final=isLastLayer)
else:
# All gates have initializers for the input-to-gate and hidden state-to-gate
# weight matrices, the cell-to-gate weight vector, the bias vector, and the nonlinearity.
# The convention is that gates use the standard sigmoid nonlinearity,
# which is the default for the Gate class.
# gate_parameters = lasagne.layers.recurrent.Gate(
# W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
# b=lasagne.init.Constant(0.))
# cell_parameters = lasagne.layers.recurrent.Gate(
# W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
# # Setting W_cell to None denotes that no cell connection will be used.
# W_cell=None, b=lasagne.init.Constant(0.),
# # By convention, the cell nonlinearity is tanh in an LSTM.
# nonlinearity=lasagne.nonlinearities.tanh)
layerCreatorFunc = lambda parentLayer, isFirstLayer, isLastLayer: lasagne.layers.LSTMLayer(
parentLayer, self.hidden_dim_size, grad_clipping=self.grad_clipping,
mask_input = l_mask if isFirstLayer else None,
nonlinearity=lasagne.nonlinearities.tanh,
# Here, we supply the gate parameters for each gate
# ingate=gate_parameters, forgetgate=gate_parameters,
# cell=cell_parameters, outgate=gate_parameters,
# We'll learn the initialization and use gradient clipping
only_return_final=isLastLayer)
for layerId in range(self.num_layers):
l_layers.append(layerCreatorFunc(l_layers[layerId], layerId == 0, layerId == self.num_layers - 1))
# The output of l_forward_2 of shape (batch_size, N_HIDDEN) is then passed through the softmax nonlinearity to
# create probability distribution of the prediction
# The output of this stage is (batch_size, vocab_size)
l_out = lasagne.layers.DenseLayer(l_layers[len(l_layers) - 1], num_units=self.vocab_size, W = lasagne.init.Normal(), nonlinearity=lasagne.nonlinearities.softmax)
l_layers.append(l_out)
# Theano tensor for the targets
target_values = T.ivector('target_output')
#! target_var = T.matrix('target_output')
# lasagne.layers.get_output produces a variable for the output of the net
network_output = lasagne.layers.get_output(l_out)
# https://github.com/Lasagne/Lasagne/blob/master/examples/recurrent.py
# The network output will have shape (n_batch, 1); let's flatten to get a
# 1-dimensional vector of predicted values
# predicted_values = network_output.flatten()
# flat_target_values = target_values.flatten()
# Our cost will be mean-squared error
# cost = T.mean((predicted_values - flat_target_values)**2)
# cost = T.mean((network_output - target_values)**2)
# The loss function is calculated as the mean of the (categorical) cross-entropy between the prediction and target.
#! cost = T.nnet.categorical_crossentropy(network_output,target_var).mean()
cost = T.nnet.categorical_crossentropy(network_output,target_values).mean()
# Retrieve all parameters from the network
all_params = lasagne.layers.get_all_params(l_out,trainable=True)
# Compute AdaGrad updates for training
writeLog("Computing updates...")
writeLog("Using optimizer: " + self.optimizer)
if (self.optimizer == "sgd"):
updates = lasagne.updates.sgd(cost, all_params, self.learning_rate)
elif (self.optimizer == "adagrad"):
updates = lasagne.updates.adagrad(cost, all_params, self.learning_rate)
elif (self.optimizer == "adadelta"):
updates = lasagne.updates.adagrad(cost, all_params, self.learning_rate, 0.95)
elif (self.optimizer == "momentum"):
updates = lasagne.updates.momentum(cost, all_params, self.learning_rate, 0.9)
elif (self.optimizer == "nesterov_momentum"):
updates = lasagne.updates.nesterov_momentum(cost, all_params, self.learning_rate, 0.9)
elif (self.optimizer == "rmsprop"):
updates = lasagne.updates.rmsprop(cost, all_params, self.learning_rate, 0.9)
else:
updates = lasagne.updates.adam(cost, all_params, self.learning_rate, beta1=0.9, beta2=0.999)
# Theano functions for training and computing cost
writeLog("Compiling train function...")
self.train = theano.function([l_in.input_var, target_values, l_mask.input_var], cost, updates=updates, allow_input_downcast=True)
#! self.train = theano.function([l_in.input_var, target_var, l_mask.input_var], cost, updates=updates, allow_input_downcast=True)
writeLog("Compiling train cost computing function...")
self.compute_cost = theano.function([l_in.input_var, target_values, l_mask.input_var], cost, allow_input_downcast=True)
#! self.compute_cost = theano.function([l_in.input_var, target_var, l_mask.input_var], cost, allow_input_downcast=True)
# In order to generate text from the network, we need the probability distribution of the next character given
# the state of the network and the input (a seed).
# In order to produce the probability distribution of the prediction, we compile a function called probs.
writeLog("Compiling propabilities computing function...")
self.propabilities = theano.function([l_in.input_var, l_mask.input_var],network_output,allow_input_downcast=True)
self.start_time = time.time()
self.previous_time = self.start_time
self.cumul_train_time = 0
self.cumul_test_time = 0
self.auc = 0
self.sr_trains = []
self.sr_tests = []
self.sr_tests_75p = []
self.sr_tests_50p = []
self.sr_tests_25p = []
self.sr_examplesSeen = []
self.time_used = []
self.avg_costs = []
self.time_used_for_test = []
self.all_cms = []
def predict_outcome(tracesToCalculateFor, selIndex, notSelIndex, tracePercentage):
batches, masks = self.gen_prediction_data(tracesToCalculateFor, tracePercentage)
correct = 0
predictions = []
probs_out = []
for i in range(len(batches)):
x = batches[i]
mask = masks[i]
probs = self.propabilities(x, mask)
for prob in enumerate(probs):
selProb = prob[1][selIndex]
notSelProb = prob[1][notSelIndex]
probs_out.append(selProb / (selProb + notSelProb))
predictions.append(selProb >= notSelProb)
return predictions, probs_out
def calculateSuccessRate(tracesToCalculateFor, tracePercentage, testId):
selIndex = self.word_to_index[IN_SELECTION_TOKEN]
notSelIndex = self.word_to_index[NOT_IN_SELECTION_TOKEN]
predictions, probs = predict_outcome(tracesToCalculateFor, selIndex, notSelIndex, tracePercentage)
numSuccess = 0
cm = [0, 0, 0, 0]
exps = []
for i in range(len(tracesToCalculateFor)):
expected = tracesToCalculateFor[i].isSelected
actual = predictions[i]
exps.append(1 if expected else 0)
numSuccess += 1 if expected == actual else 0
cm[0] += 1 if expected and actual else 0
cm[1] += 1 if not expected and not actual else 0
cm[2] += 1 if not expected and actual else 0
cm[3] += 1 if expected and not actual else 0
self.cms[testId] = cm
self.cms_str += ":%i_%i_%i_%i" % (cm[0], cm[1], cm[2], cm[3])
if (testId == 1):
self.auc = metrics.roc_auc_score(exps, probs)
return numSuccess / len(tracesToCalculateFor)
def report(num_examples_seen, it, avg_cost, num_report_iterations):
t2 = time.time()
tutrain = (t2 - self.previous_time)
self.cumul_train_time = self.cumul_train_time + tutrain
self.time_used.append(tutrain)
self.generate_trace(5)
self.sr_examplesSeen.append(num_examples_seen)
self.cms = {}
self.cms_str = ""
writeLog("Testing 100% training samples")
sr_train = calculateSuccessRate(self.traces_train, 1.0, 0)
self.sr_trains.append(sr_train)
writeLog("Testing 100% test samples")
sr_test = calculateSuccessRate(self.traces_test, 1.0, 1)
writeLog("Testing 75% test samples")
sr_tests_75p = calculateSuccessRate(self.traces_test, 0.75, 2)
writeLog("Testing 50% test samples")
sr_tests_50p = calculateSuccessRate(self.traces_test, 0.5, 3)
writeLog("Testing 25% test samples")
sr_tests_25p = calculateSuccessRate(self.traces_test, 0.25, 4)
self.sr_tests.append(sr_test)
self.sr_tests_75p.append(sr_tests_75p)
self.sr_tests_50p.append(sr_tests_50p)
self.sr_tests_25p.append(sr_tests_25p)
self.avg_costs.append(avg_cost)
data_size = len(self.TS_train)
epoch = it*self.batch_size/data_size
t3 = time.time()
tutest = (t3 - t2)
self.cumul_test_time = self.cumul_test_time + tutest
self.previous_time = t3
self.time_used_for_test.append(tutest)
self.all_cms.append(self.cms)
writeLog("Iteration: %i (%i) Total time used: ~%f seconds (train: %f, test: %f)" % (num_report_iterations, num_examples_seen, (time.time() - self.start_time) * 1., self.cumul_train_time, self.cumul_test_time))
writeLog("Epoch {} average loss = {}".format(epoch, avg_cost))
writeLog("Success rates: test: %f test 75%%: %f test 50%%: %f test 25%%: %f train: %f" % (sr_test, sr_tests_75p, sr_tests_50p, sr_tests_25p, sr_train))
writeResultRow([datetime.now().replace(microsecond=0).isoformat(),
"ok", "", self.case_name, self.dataset_name, self.dataset_size,
self.algorithm, self.num_layers, self.hidden_dim_size,
self.optimizer, self.learning_rate, self.seq_length, self.batch_size,
self.grad_clipping, self.num_iterations_between_reports,
num_report_iterations,
num_examples_seen, epoch, tutrain, self.cumul_train_time, tutest,
self.cumul_test_time, sr_train, sr_test, sr_tests_75p, sr_tests_50p,
sr_tests_25p,
avg_cost, self.auc, self.cms[1][0], self.cms[1][1], self.cms[1][2], self.cms[1][3],
str(self.cms_str),
self.predict_only_outcome, self.final_trace_only, self.trace_length_modifier,
self.num_iterations_between_reports * self.num_callbacks == 100000 * 50,
self.max_num_words, self.truncate_unknowns])
# self.draw_chart()
# writeLog("Calculating initial probabilities.")
# self.sr_examplesSeen.append(0)
self.cms = {}
self.cms_str = ""
# sr_train = calculateSuccessRate(self.traces_train, 1.0, 0)
# self.sr_trains.append(sr_train)
# sr_test = calculateSuccessRate(self.traces_test, 1.0, 1)
# self.sr_tests.append(sr_test)
# self.time_used.append(time.time() - self.start_time)
# self.avg_costs.append(0)
# writeLog("Initial success rates: test: %f train: %f" % (sr_test, sr_train))
num_examples_seen = self.trainModel(report)
self.cms = {}
self.cms_str = ""
self.sr_examplesSeen.append(num_examples_seen)
sr_train = calculateSuccessRate(self.traces_train, 1.0, 0)
self.sr_trains.append(sr_train)
sr_test = calculateSuccessRate(self.traces_test, 1.0, 1)
self.sr_tests.append(sr_test)
self.avg_costs.append(0)
writeLog("Final success rates: test: %f train: %f" % (sr_test, sr_train))
self.time_used.append(self.cumul_train_time)
# self.draw_chart()
def draw_chart(self):
plt.plot(self.sr_examplesSeen, self.sr_trains, label = 'Train data')
plt.plot(self.sr_examplesSeen, self.sr_tests, label = 'Test data')
plt.plot(self.sr_examplesSeen, self.avg_costs, label = 'Avg. Cost')
plt.xlabel('iterations')
plt.ylabel('Success rate')
plt.title('Classification prediction success rate - ' + self.case_name)
plt.legend()
plt.show()
def generate_trace(self, min_length=5):
# We start the sentence with the start token
x = np.zeros((1, self.seq_length, self.vocab_size))
mask = np.zeros((1, self.seq_length))
new_sentence = []
i = 0
# Repeat until we get an end token
selIndex = self.word_to_index[IN_SELECTION_TOKEN]
notSelIndex = self.word_to_index[NOT_IN_SELECTION_TOKEN]
while not ((len(new_sentence) > 0) and ((new_sentence[-1] == selIndex) or (new_sentence[-1] == notSelIndex))):
probs = self.propabilities(x, mask)[0]
# samples = np.random.multinomial(1, probs)
# index = np.argmax(samples)
index = np.random.choice(range(len(probs)), p=probs)
new_sentence.append(index)
x[0, i, index] = 1
mask[0, i] = 1
i += 1
# Seomtimes we get stuck if the sentence becomes too long, e.g. "........" :(
# And: We don't want sentences with UNKNOWN_TOKEN's
if len(new_sentence) >= self.seq_length or index == self.word_to_index[UNKNOWN_TOKEN]:
writeLog("Generated exceedingly long example trace. Skipping.")
return None
if len(new_sentence) < min_length:
return None
res = [self.index_to_word[x] for x in new_sentence]
writeLog("Generated example trace of length %d: %s" % (len(res), str(res)))
return res
def write_csv(self, name, csvwriter):
for i in range(len(self.sr_examplesSeen)):
csvwriter.writerow([self.case_name, self.sr_examplesSeen[i],
self.time_used[i], self.sr_trains[i], self.sr_tests[i],
self.avg_costs[i], self.optimizer, self.hidden_dim_size,
self.dataset_size])
trace_registry = {
}
| 26,945 | 49.745763 | 221 | py |
articles | articles-master/Exploiting Event Log Event Attributes in RNN Based Prediction/src/main.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 25 11:59:43 2017
Test framework sources used to perform the tests required by paper:
"Exploiting Event Log Event Attributes in RNN Based Prediction"
by Markku Hinkka, Teemu Lehto and Keijo Heljanko
"""
import sys
import os
import numpy as np
import json
import logging
import traceback
from time import time, sleep
from optparse import OptionParser
from pathlib import Path
import collections
import datetime
import copy
from multiprocessing import Process
from model import Model
from modelcluster import ModelCluster
from eventlog import EventLog
from cluster import Clustering
from my_utils import writeLog, generate_traces, configure, TraceData, get_filename, getInputDatasetFilename, getOutputPath, getInputPath
outputDirectory = "C:/Users/User/Dropbox/Aalto/testing/testruns/"
#outputDirectory = "C:/Users/marhink/Dropbox/Aalto/testing/testruns/"
#outputDirectory = "d:\\dev\\aalto\\testing\\testruns\\"
inputFilesDirectory = "d:\\dev\\aalto\\testing\\testdata\\"
modelOutputDirectory = inputFilesDirectory + "..\\models\\"
# http://scikit-learn.org/stable/auto_examples/text/plot_document_clustering.html#sphx-glr-auto-examples-text-plot-document-clustering-py
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
#op.add_option("--built-in-test",
# action="store_false", dest="built_in_test", default=True,
# help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
op.add_option("--log_to_file_only",
action="store_true", dest="log_to_file_only", default=False,
help="Output log only into a logfile.")
op.add_option("--input_data_from_standard_input2",
action="store_true", dest="input_data_from_standard_input2", default=False,
help="Read input data from standard input.")
op.add_option("--log_to_file_only2",
action="store_true", dest="log_to_file_only2", default=False,
help="Output log only into a logfile.")
op.add_option("--input_data_from_standard_input",
action="store_true", dest="input_data_from_standard_input", default=False,
help="Read input data from standard input.")
op.add_option("--configuration_from_standard_input",
action="store_true", dest="configuration_from_standard_input", default=False,
help="Read configuration from standard input.")
op.add_option("-m", "--model", dest="model_filename",
help="File containing trained model", metavar="FILE")
op.add_option("-s", "--skip", dest="skip_tests", type="int", default=0,
help="Number of tests to skip from the beginning")
op.add_option("--m2", dest="model_filename2",
help="File containing trained model", metavar="FILE")
op.add_option("-i", "--input", dest="input_filename",
help="File containing trace data of traces to use for training", metavar="FILE")
op.add_option("-t", "--test", dest="test_filename",
help="File containing trace data of traces to use for testing", metavar="FILE")
op.add_option("--i2", dest="input_filename2",
help="File containing trace data of traces to use for training", metavar="FILE")
op.add_option("-c", "--configuration", dest="configuration_filename",
help="File containing configuration for the tests", metavar="FILE")
op.add_option("-o", "--output", dest="output_filename",
help="Name of the file to generate", metavar="FILE")
op.add_option("--predict_next_activity",
action="store_true", dest="predict_next_activity", default=False,
help="Predict the next activity.")
op.add_option("--disable_durations",
action="store_true", dest="disable_durations", default=False,
help="Do not use duration information in training and prediction.")
op.add_option("--disable_event_attributes",
action="store_true", dest="disable_event_attributes", default=False,
help="Do not use event attributes in training and prediction.")
op.add_option("--disable_case_attributes",
action="store_true", dest="disable_case_attributes", default=False,
help="Do not use case attributes in training and prediction.")
op.add_option("--disable_activity_labels",
action="store_true", dest="disable_activity_labels", default=False,
help="Do not use activity labels in training and prediction.")
op.add_option("-w", "--wait-for-config", dest="test_config_filename",
help="File to use to bring new test configurations", metavar="FILE")
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
random_seed = 123
default_parameters = {
"algorithm": "gru",
"case_name": "test",
"dataset_name": None,
"test_name": "",
"num_layers": 1,
"optimizer": "adam",
"learning_rate": 0.01,
"num_callbacks": 100,
"batch_size": 256,
"hidden_dim_size": 256,
#"batch_size": 16,
#"hidden_dim_size": 8,
"num_iterations_between_reports": 1000,
"grad_clipping": 100,
"predict_only_outcome": True,
"final_trace_only": True,
"trace_length_modifier": 1.0,
"truncate_unknowns": False,
"max_num_words": None,
"num_epochs_per_iteration": 1.0,
"num_models": 1,
"file_handle": "test",
"use_single_event_clustering": False,
"num_case_clusters": 5,
"num_event_clusters": 5,
"case_clustering_method": "xmeans",
"case_clustering_include_activity_occurrences": False,
"case_clustering_include_case_attributes": True,
"event_clustering_method": "xmeans",
"ignore_values_threshold_for_case_attributes": 0.1,
"ignore_values_threshold_for_event_attributes": 0.1,
"duration_split_method": "5-buckets",
"predict_next_activity": opts.predict_next_activity,
"disable_durations": opts.disable_durations,
"disable_event_attributes": opts.disable_event_attributes,
"disable_case_attributes": opts.disable_case_attributes,
"model_filename": opts.model_filename,
"input_filename": opts.input_filename,
"test_filename": opts.test_filename,
"output_filename": opts.output_filename,
"test_config_filename": opts.test_config_filename,
"disable_raw_event_attributes": False,
"disable_raw_case_attributes": False,
"include_activity_occurrences_as_raw_case_attributes": False,
"dataset_name": "",
"write_input_to_file": False,
"predict_only": False,
"max_num_samples_training_cluster": 10000,
"max_num_traces_to_test": 10000,
"max_num_traces_in_testing": 100000,
"use_single_value_for_duration": False,
"max_num_case_clusters": 20,
"max_num_event_clusters": 20,
"pause_filename": "pause.txt",
"input_directory": inputFilesDirectory,
"output_directory": outputDirectory,
"model_output_directory": modelOutputDirectory,
"spawn_worker_processes": False,
"max_num_cases_in_training": None,
"max_num_traces_in_training": None,
"max_num_traces_in_training_test": None,
"test_data_percentage": 0.75,
"split_traces_to_prefixes": False,
"min_splitted_trace_prefix_length": 4,
"max_trace_length": 100,
"cross-validation-splits": None,
"create-unknown-tokens": True
}
configuration = {
"runs":[
{}
],
}
configurationPath = None
def run(parameters):
rng = np.random.RandomState(random_seed)
writeLog("Running test using parameters: " + json.dumps(parameters))
inputJson = None
if (opts.input_data_from_standard_input):
writeLog("Reading from standard input")
inputJson = sys.stdin.readline()
writeLog("Standard input reading finished")
if (parameters["write_input_to_file"]):
filename = get_filename("testdata_", "%s_%s_%s" % (parameters["file_handle"], "", ""), "json")
with open(filename, "w") as f:
f.write(inputJson)
if (parameters["model_filename"] != None):
m = ModelCluster(rng)
m.load(parameters["model_filename"], parameters)
inputFilename = None if parameters["test_filename"] == None else parameters["test_filename"]
if (inputFilename != None):
writeLog("Reading test data from file: " + inputFilename)
el = EventLog(parameters, rng, inputFilename, modelCluster = m, inputJson = inputJson)
jsonResult = "{}"
if (len(el.testData) > 0):
writeLog("Test set contains %d cases." % (len(el.testData)))
result = m.test(el)
jsonResult = json.dumps(result)
filename = get_filename("predict_result", "%s_%s_%s" % (parameters["file_handle"], m.case_name, m.eventlog.filename), "json")
with open(filename, "w") as f:
f.write(jsonResult)
writeLog("Generated results saved into file: %s" % filename)
else:
writeLog("Test set is empty. No results created.")
print(jsonResult)
elif ((parameters["input_filename"] != None) or (inputJson != None)):
if parameters["cross-validation-splits"] != None:
EventLog.performCrossValidatedTests(parameters, inputJson, rng)
return
e = EventLog(parameters, rng, parameters["input_filename"], parameters["test_data_percentage"], inputJson = inputJson)
m = ModelCluster(rng)
m.initialize(
parameters = parameters,
case_clustering = Clustering(parameters["case_clustering_method"], parameters, {
"num_clusters": parameters["num_case_clusters"],
"max_num_clusters": parameters["max_num_case_clusters"],
"ignore_values_threshold": parameters["ignore_values_threshold_for_case_attributes"]
}),
event_clustering = Clustering(parameters["event_clustering_method"], parameters, {
"num_clusters": parameters["num_event_clusters"],
"max_num_clusters": parameters["max_num_event_clusters"],
"ignore_values_threshold": parameters["ignore_values_threshold_for_event_attributes"]
}),
rng = rng)
trainResult = m.train(e)
filename = m.save(parameters["file_handle"], parameters)
writeLog("Generated model saved into file: %s" % filename)
print(filename)
if (parameters["test_filename"] != None):
m = ModelCluster(rng)
m.load(filename, parameters)
el = EventLog(parameters, rng, parameters["test_filename"], modelCluster = m)
result = m.test(el, 1.0, trainResult)
jsonResult = json.dumps(result)
filename = get_filename("predict_result", "%s_%s_%s" % (parameters["file_handle"], m.case_name, m.eventlog.filename), "json")
with open(filename, "w") as f:
f.write(jsonResult)
writeLog("Generated results saved into file: %s" % filename)
print(jsonResult)
def isFile(filename):
path = Path(filename)
try:
if (path.is_file()):
return True
except:
return False
return False
def testPaused(parameters):
wasPaused = False
while True:
filename = parameters["pause_filename"]
if (not isFile(filename)):
filename = getInputPath() + parameters["pause_filename"]
if (not isFile(filename)):
filename = getOutputPath() + parameters["pause_filename"]
if (not isFile(filename)):
break
if not wasPaused:
writeLog("Tests paused until file is removed: %s" % filename)
wasPaused = True
sleep(1)
if wasPaused:
writeLog("Tests continued...")
def waitForConfiguration(origFilename, parameters):
wasPaused = False
filename = None
while True:
filename = origFilename
if isFile(filename):
break
filename = getInputPath() + origFilename
if isFile(filename):
break
filename = getOutputPath() + origFilename
if isFile(filename):
break
if not wasPaused:
writeLog("Tests paused until a new configuration file appears in: %s" % origFilename)
wasPaused = True
sleep(1)
if wasPaused:
writeLog("Got new configuration. Continuing...")
writeLog("Reading new configuration from %a" % filename)
result = loadConfiguration(filename, parameters)
os.remove(filename)
return result
def collect(configuration, parameters, to):
if isinstance(configuration, list):
to += configuration
return True
conf = dict(configuration)
parameters = dict(parameters)
if (("exit" in parameters) and (parameters["exit"])):
return False
result = True
if "for_each" in conf:
for iterConf in conf["for_each"]:
newConf = dict(conf)
newConf.update(iterConf)
newConf.pop("for_each")
result &= collect(newConf, parameters, to)
return result
if "include" in conf:
filename = conf["include"]
conf.pop("include")
parameters.update(conf)
if (not isFile(filename)):
filename = default_parameters["input_directory"] + conf["include"]
if (not isFile(filename) and (configurationPath != None)):
filename = str(configurationPath.parent) + conf["include"]
with open(filename) as data:
newConf = json.load(data)
result &= collect(newConf, parameters, to)
else:
leaf = not ("runs" in conf)
if not leaf:
conf.pop("runs")
parameters.update(conf)
if (leaf):
if (("skip" in parameters) and (parameters["skip"])):
return result
to.append(parameters)
else:
for childConfiguration in configuration["runs"]:
result &= collect(childConfiguration, parameters, to)
return result
def loadConfiguration(filename, parameters):
global configurationPath
path = Path(filename)
if (not path.is_file()):
filename = default_parameters["input_directory"] + filename
configurationPath = Path(filename)
if (configurationPath.is_file()):
with open(filename) as data:
configuration = json.load(data)
if isinstance(configuration, list):
configuration = {
"runs": configuration
}
parameters.update(configuration)
configure(parameters["input_directory"], parameters["output_directory"], opts.log_to_file_only)
return configuration
return None
def main(configuration, parameters):
def saveConfigs(testConfigs):
jsonConfig = json.dumps(testConfigs)
with open(started_tests_filename, "w") as f:
f.write(jsonConfig)
if configuration != None:
tests = []
if Path(started_tests_filename).is_file():
tsts = None
with open(started_tests_filename) as data:
tsts = json.load(data)
for t in tsts:
ts = dict(default_parameters)
ts.update(t)
tests.append(ts)
writeLog("Loaded remaining %d test configurations from %s." % (len(tests), started_tests_filename))
else:
if (not collect(configuration, default_parameters, tests)):
writeLog("Exit requested. Finishing tests...")
return
saveConfigs(tests)
writeLog("Generated %d test configurations." % (len(tests)))
if opts.skip_tests > 0:
tests = tests[opts.skip_tests:]
saveConfigs(tests)
writeLog("Skipping %d first test configurations leaving total of %d test remaining." % (opts.skip_tests, len(tests)))
testPaused(parameters)
nTests = len(tests)
i = 1
while (len(tests) > 0):
writeLog("Starting test %d of %d." % (i, nTests))
try:
run(tests[0])
except:
writeLog("Exception: " + traceback.format_exc())
tests = tests[1:]
saveConfigs(tests)
testPaused(parameters)
i = i + 1
os.remove(started_tests_filename)
if ("test_config_filename" in default_parameters) and (default_parameters["test_config_filename"] != None):
parameters = dict(default_parameters)
configuration = waitForConfiguration(parameters["test_config_filename"], parameters)
main(configuration, parameters)
writeLog("Tests finished.")
started_tests_filename = default_parameters["output_directory"] + "current-tests.json"
if (opts.configuration_from_standard_input):
writeLog("Reading configuration from standard input")
jsonConfig = sys.stdin.readline()
configuration = json.loads(jsonConfig)
writeLog("Standard input reading finished")
parameters = dict(default_parameters)
configuration = None
if (opts.configuration_filename != None):
configuration = loadConfiguration(opts.configuration_filename, parameters)
configure(parameters["input_directory"], parameters["output_directory"], opts.log_to_file_only)
writeLog(__doc__)
if __name__ == '__main__':
main(configuration, parameters)
| 17,910 | 39.522624 | 137 | py |
articles | articles-master/Exploiting Event Log Event Attributes in RNN Based Prediction/src/eventlog.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 25 11:59:43 2017
Test framework sources used to perform the tests required by paper:
"Exploiting Event Log Event Attributes in RNN Based Prediction"
by Markku Hinkka, Teemu Lehto and Keijo Heljanko
"""
import sys
import numpy as np
import json
import logging
from time import time
from optparse import OptionParser
import collections
import datetime
import copy
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.model_selection import KFold
from sklearn import metrics
from pathlib import Path
import ntpath
from model import Model, DURATION_TOKEN_PREFIX, EVENT_ATTRIBUTE_TOKEN_PREFIX, WORD_PART_SEPARATOR
from my_utils import writeLog, generate_traces, configure, TraceData, getInputDatasetFilename
from modelcluster import ModelCluster
from cluster import Clustering
def parse_date(jd):
sign = jd[-7]
if sign not in '-+' or len(jd) == 13:
millisecs = int(jd[6:-2])
else:
millisecs = int(jd[6:-7])
hh = int(jd[-7:-4])
mm = int(jd[-4:-2])
if sign == '-': mm = -mm
millisecs += (hh * 60 + mm) * 60000
return datetime.datetime(1970, 1, 1) \
+ datetime.timedelta(microseconds=millisecs * 1000)
class EventLog:
def __init__(self, parameters, rng, filename = None, pTraining = 0.0, modelCluster = None, inputJson = None):
writeLog("Initializing event log")
self.rng = rng
self.parameters = dict(parameters)
self.trainingData = []
self.testData = []
if (inputJson != None):
self.data = json.loads(inputJson)
self.filename = "unnamed"
self.filepath = ""
elif (filename != None):
path = Path(filename)
if (not path.is_file()):
filename = getInputDatasetFilename(filename)
self.filepath = filename
self.filename = ntpath.basename(filename)
with open(filename) as f:
self.data = json.load(f)
else:
return
self.pTraining = pTraining
if pTraining == None:
return
if (modelCluster != None):
model = modelCluster.models[0]
if not ("activities" in self.data):
self.data["activities"] = model.eventlogActivities
if not ("attributes" in self.data):
self.data["attributes"] = model.eventlogAttributes
self.setTrainingSize(parameters, pTraining)
self.initializationReport()
def initializationReport(self):
writeLog("Initialized event log %s" % (self.filename))
writeLog(" # cases: %d (train: %d, test: %d)" % (len(self.data["cases"]), len(self.trainingData), len(self.testData)))
writeLog(" # activities: %d" % (len(self.data["activities"])))
writeLog(" # case attributes: %d" % (len(self.data["attributes"]["case"])))
writeLog(" # event attributes: %d" % (len(self.data["attributes"]["event"])))
if (self.pTraining != None):
writeLog(" Training set percentage: %d" % (int(self.pTraining * 100)))
def initializeForTesting(self, model):
self.trainingData = []
self.testData = np.asarray(self.data["cases"])
if (model.eventlogActivities != None):
self.data["activities"] = model.eventlogActivities
if (model.eventlogAttributes != None):
self.data["attributes"] = model.eventlogAttributes
if (model.eventlogFilename != None):
self.filename = model.eventlogFilename
if (model.eventlogFilepath != None):
self.filepath = model.eventlogFilepath
self.initializeDerivedData()
def createEmptyCopy(self, parameters = None):
result = EventLog(parameters if parameters != None else self.parameters, self.rng)
result.trainingData = []
result.testData = []
result.filename = self.filename
result.filepath = self.filepath
result.data = {
"cases": [],
"activities": copy.copy(self.data["activities"]) if ("activities" in self.data) else None,
"attributes": copy.copy(self.data["attributes"] if ("attributes" in self.data) else None)
}
result.parent = self
return result
def setTrainingSize(self, parameters, pTraining):
cases = np.asarray(self.data["cases"])
maxNumCases = parameters["max_num_cases_in_training"]
if (maxNumCases != None) and (maxNumCases < len(cases)):
writeLog("Filtering out %d cases out of %d" % (maxNumCases, len(cases)))
cases = np.random.choice(cases, maxNumCases, replace=False)
self.data["cases"] = cases
nTraining = int(len(cases) * pTraining)
indexes = self.rng.permutation(len(cases))
self.trainingData = cases[indexes[:nTraining]]
self.testData = cases[indexes[nTraining:]]
self.initializeDerivedData()
def getActivityOccurrences(self, cases):
result = copy.copy(self.activities)
for activityId, activity in result.items():
activity["occ"] = []
for c in cases:
for e in c["t"]:
activity = result[e[0]]
activity["occ"].append(e)
return result
def initializeDerivedData(self, forSplittedEventLog = False):
self.activities = {}
self.activitiesByLabel = {}
if ("activities" in self.data):
for a in self.data["activities"]:
self.activities[a["id"]] = {
"name": a["name"],
"occ": []
}
self.activitiesByLabel[a["name"].replace(" ", "_")] = a
if (not forSplittedEventLog):
writeLog("Initializing activity counts for %d cases" % (len(self.data["cases"])))
for c in self.data["cases"]:
counters = collections.Counter(t[0] for t in c["t"])
c["occ"] = [counters[act["id"]] for act in self.data["activities"]]
self.flows = {}
def preProcessForTraining(self, parameters):
disableDurations = parameters["disable_durations"]
if not disableDurations:
numEvents = 0
writeLog("Pre-processing %d cases" % (len(self.trainingData)))
for c in self.trainingData:
prev = None
prevDate = None
evts = c["t"]
numEvents += len(evts)
for e in evts:
eDate = parse_date(e[1])
if prev is not None:
key = "%s->%s" % (prev[0], e[0])
if (key in self.flows):
flow = self.flows[key]
else:
flow = self.flows[key] = { "name": key, "occ": [] }
delta = eDate - prevDate
flow["occ"].append(delta)
prevDate = eDate
prev = e
writeLog("Total number of events in training data: %d (Average case length: %f)" % (numEvents, (numEvents / len(self.trainingData))))
writeLog("Pre-processing %d flows" % (len(self.flows)))
for key in self.flows:
f = self.flows[key]
nOcc = len(f["occ"])
f["occ"].sort()
if (nOcc > 0):
min = f["min"] = f["occ"][0]
max = f["max"] = f["occ"][nOcc - 1]
f["avg"] = np.mean(f["occ"])
f["med"] = np.median(f["occ"])
f["perc10"] = np.percentile(f["occ"], 10)
f["perc25"] = np.percentile(f["occ"], 25)
f["perc75"] = np.percentile(f["occ"], 75)
f["perc90"] = np.percentile(f["occ"], 90)
f["diff"] = f["max"] - f["min"]
f["fast"] = f["perc10"]
f["slow"] = f["perc90"]
def addTrace(self, traceData, isForTraining):
self.data["cases"].append(traceData)
if (isForTraining):
self.trainingData.append(traceData)
else:
self.testData.append(traceData)
def convertTracesFromInputData(self, data, parameters, trace_length_modifier):
writeLog("Converting %d cases into event traces." % (len(data)))
enableDurations = not parameters["disable_durations"]
splitDurationsInto5Buckets = parameters["duration_split_method"] == "5-buckets"
addOnlyFullTraceForFinisher = not parameters["predict_next_activity"]
useSingleValueForDuration = parameters["use_single_value_for_duration"]
includeActivityOccurrencesAsRawCaseAttributes = parameters["include_activity_occurrences_as_raw_case_attributes"]
disableEventAttributes = parameters["disable_event_attributes"]
splitTracesToPrefixes = parameters["split_traces_to_prefixes"]
minPrefixLength = parameters["min_splitted_trace_prefix_length"]
maxTraceLength = parameters["max_trace_length"]
result = []
numFilteredCases = 0
numFilteredTraces = 0
for c in data:
traces = []
l = len(c["t"])
finisherTraceFiltered = False
if l > minPrefixLength:
if splitTracesToPrefixes:
if (l > maxTraceLength):
numFilteredCases += 1
numFilteredTraces += l - maxTraceLength - minPrefixLength
l = maxTraceLength
finisherTraceFiltered = True
for i in range(minPrefixLength, l):
traces.append(c["t"][:i])
else:
if (l > maxTraceLength):
numFilteredCases += 1
numFilteredTraces += 1
finisherTraceFiltered = True
traces.append(c["t"][:maxTraceLength])
else:
traces.append(c["t"])
if len(traces) == 0:
continue
lastTrace = traces[len(traces) - 1]
for trace in traces:
sentence = []
durations = []
cAttributes = (c["a"] + c["occ"]) if includeActivityOccurrencesAsRawCaseAttributes else c["a"]
prev = None
prevDate = None
eAttributes = []
for e in trace:
eDate = parse_date(e[1])
durationPart = DURATION_TOKEN_PREFIX + "normal"
dp = 0.5
if enableDurations and prev is not None:
key = "%s->%s" % (prev[0], e[0])
flow = self.flows[key] if key in self.flows else None
delta = eDate - prevDate
if (flow != None) and ("slow" in flow):
if splitDurationsInto5Buckets:
if (delta > flow["perc90"]):
durationPart = DURATION_TOKEN_PREFIX + "perc90"
dp = 0.0
elif (delta > flow["perc75"]):
durationPart = DURATION_TOKEN_PREFIX + "perc75"
dp = 0.25
elif (delta > flow["perc25"]):
durationPart = DURATION_TOKEN_PREFIX + "perc25"
dp = 0.5
elif (delta > flow["perc10"]):
durationPart = DURATION_TOKEN_PREFIX + "perc10"
dp = 0.75
else:
durationPart = DURATION_TOKEN_PREFIX + "perc0"
dp = 1.0
else:
if (delta > flow["slow"]):
durationPart = DURATION_TOKEN_PREFIX + "slow"
dp = 0.0
elif (delta < flow["fast"]):
durationPart = DURATION_TOKEN_PREFIX + "fast"
dp = 1.0
actPart = self.activities[e[0]]["name"]
eAttributes += [e[2:(len(e) - 1) if disableEventAttributes else -1]]
clusterPart = EVENT_ATTRIBUTE_TOKEN_PREFIX + str(e[len(e) - 1])
sentence.append(durationPart + WORD_PART_SEPARATOR + actPart.replace(WORD_PART_SEPARATOR, "_") + WORD_PART_SEPARATOR + clusterPart)
if useSingleValueForDuration:
durations.append(dp)
prevDate = eDate
prev = e
finisher = c["f"] if "f" in c else ((trace == lastTrace) and (not finisherTraceFiltered))
cluster = c["_cluster"] if ("_cluster" in c) else None
if (not (addOnlyFullTraceForFinisher and finisher)):
result.append(TraceData(c["n"], c["s"] if "s" in c else None, "s" in c, cAttributes, eAttributes, cluster, sentence, durations, parameters, trace_length_modifier, self.model, False))
if (finisher):
result.append(TraceData(c["n"] + "_f", c["s"] if "s" in c else None, "s" in c, cAttributes, eAttributes, cluster, sentence, durations, parameters, trace_length_modifier, self.model, True))
writeLog("Generated %d event traces out of %d cases." % (len(result), len(data)))
if (numFilteredTraces > 0):
writeLog("Filtered %d traces in %d cases due to them having more than maximum allowed number of events (%d)" % (numFilteredTraces, numFilteredCases, maxTraceLength))
return result
def performCrossValidationRun(self, fullTestData, trainIndex, testIndex, parameters):
maxNumCases = parameters["max_num_cases_in_training"]
cvRunIndex = parameters["cross-validation-run"]
nSplits = parameters["cross-validation-splits"]
writeLog("Starting cross-validation run %d of %d" % (cvRunIndex, nSplits))
if (maxNumCases != None) and (maxNumCases < len(trainIndex)):
writeLog("Filtering out %d cases out of %d" % (maxNumCases, len(trainIndex)))
trainIndex = np.random.choice(trainIndex, maxNumCases, replace=False)
runEventLog = self.createEmptyCopy()
runEventLog.data["cases"] = fullTestData[trainIndex]
runEventLog.pTraining = parameters["test_data_percentage"]
runEventLog.setTrainingSize(parameters, runEventLog.pTraining)
runEventLog.initializationReport()
m = ModelCluster(runEventLog.rng)
m.initialize(
parameters = parameters,
case_clustering = Clustering(parameters["case_clustering_method"], parameters, {
"num_clusters": parameters["num_case_clusters"],
"max_num_clusters": parameters["max_num_case_clusters"],
"ignore_values_threshold": parameters["ignore_values_threshold_for_case_attributes"]
}),
event_clustering = Clustering(parameters["event_clustering_method"], parameters, {
"num_clusters": parameters["num_event_clusters"],
"max_num_clusters": parameters["max_num_event_clusters"],
"ignore_values_threshold": parameters["ignore_values_threshold_for_event_attributes"]
}),
rng = runEventLog.rng)
trainResult = m.train(runEventLog)
writeLog("Starting cross-validation test for run %d" % (cvRunIndex))
runEventLog = self.createEmptyCopy()
runEventLog.data["cases"] = fullTestData[testIndex]
runEventLog.testData = fullTestData[testIndex]
runEventLog.trainingData = []
runEventLog.pTraining = 0.0
runEventLog.initializeDerivedData()
runEventLog.initializationReport()
maxNumTraces = parameters["max_num_traces_in_testing"] if "max_num_traces_in_testing" in parameters else None
m.test(runEventLog, 1.0, trainResult, maxNumTraces)
@staticmethod
def performCrossValidatedTests(parameters, inputJson, rng):
e = EventLog(parameters, rng, parameters["input_filename"], None, inputJson = inputJson)
e.performCrossValidatedTestsForFullEventLog()
def performCrossValidatedTestsForFullEventLog(self):
parameters = self.parameters
nSplits = parameters["cross-validation-splits"]
writeLog("Performing cross-validation using %d splits" % (nSplits))
fullTestData = np.asarray(self.data["cases"])
self.initializationReport()
kf = KFold(n_splits=nSplits, random_state=self.rng, shuffle=True)
cvRunIndex = 0
for trainIndex, testIndex in kf.split(fullTestData):
cvRunIndex += 1
parameters["cross-validation-run"] = cvRunIndex
self.performCrossValidationRun(fullTestData, trainIndex, testIndex, parameters)
| 17,464 | 44.839895 | 208 | py |
articles | articles-master/Exploiting Event Log Event Attributes in RNN Based Prediction/src/cluster.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 25 11:59:43 2017
Test framework sources used to perform the tests required by paper:
"Exploiting Event Log Event Attributes in RNN Based Prediction"
by Markku Hinkka, Teemu Lehto and Keijo Heljanko
"""
# pip install pyclustering
# conda install -c conda-forge matplotlib
# conda install pillow
import sys
import lasagne
from lasagne.layers import *
import numpy as np
import theano as theano
import theano.tensor as T
from time import time
import operator
import pickle
#import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.metrics import silhouette_score
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import metrics
from time import time
from pathlib import Path
import pandas as pd
import nltk
import itertools
import json
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES;
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer;
from pyclustering.cluster.xmeans import xmeans, splitting_type;
#from pyxmeans import _minibatch
#from pyxmeans.mini_batch import MiniBatch
#from pyxmeans.xmeans import XMeans
from my_utils import encodeCaseAttributeValueForVocabulary, encodeEventAttributeValueForVocabulary, TraceData, writeLog, writeResultRow, get_filename, getOutputPath, ATTRIBUTE_COLUMN_PREFIX, OUTCOME_SELECTION_TOKEN_PREFIX, DURATION_TOKEN_PREFIX, EVENT_ATTRIBUTE_TOKEN_PREFIX, WORD_PART_SEPARATOR, CASE_ATTRIBUTE_TOKEN_PREFIX, UNKNOWN_TOKEN, OTHER_TOKEN
pd.options.mode.chained_assignment = None # default='warn'
def train_hashvalue(df, parameters):
hashes = {}
hashId = 0
nextHashId = 0
labels = []
for row in df:
hashValue = hash(tuple(row))
if (hashValue in hashes):
hashId = hashes[hashValue]
else:
nextHashId += 1
hashId = hashes[hashValue] = nextHashId
labels.append(hashId)
writeLog("Hashvalue clustering resulted into %d unique hash values for %d rows." % (len(hashes), len(labels)))
return hashes, labels, [i for i in range(nextHashId)]
def predict_hashvalue(df, model):
labels = []
for row in df:
hashId = hash(tuple(row))
labels.append(model[hashId] if (hashId in model) else None)
return labels
def train_kmeans(df, parameters):
num_clusters = parameters["num_clusters"]
return do_train_kmeans(df, num_clusters)
def do_train_kmeans(df, num_clusters, centers = None):
if (df.shape[1] == 0) or (num_clusters < 2):
writeLog("No columns in the table to be clustered. Returning constant labels.")
model = None
labels = len(df) * [0]
return model, labels, [0]
if centers == None:
model = MiniBatchKMeans(n_clusters=num_clusters, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=False)
else:
model = KMeans(n_clusters=num_clusters, init=np.asarray(centers), n_init=1, max_iter=1)
x = model.fit(df)
writeLog("K-means model created for %d clusters." % (model.n_clusters))
return model, x.labels_, [i for i in range(model.n_clusters)]
def predict_kmeans(df, model):
if model == None:
return len(df) * [0]
return model.predict(df)
def train_xmeans(df, parameters):
# create object of X-Means algorithm that uses CCORE for processing
# initial centers - optional parameter, if it is None, then random centers will be used by the algorithm.
# let's avoid random initial centers and initialize them using K-Means++ method:
max_num_clusters = parameters["max_num_clusters"]
num_clusters = parameters["num_clusters"]
initial_centers = kmeans_plusplus_initializer(df, min(df.shape[0], num_clusters)).initialize()
xmeans_instance = xmeans(df, initial_centers, ccore=True, kmax=max_num_clusters)
# run cluster analysis
xmeans_instance.process()
# obtain results of clustering
clusters = xmeans_instance.get_clusters()
writeLog("X-means clustered using %d clusters (init: %d, max: %d). Using that as the desired number of clusters for k-means." % (len(clusters), num_clusters, max_num_clusters))
return do_train_kmeans(df, len(clusters), xmeans_instance.get_centers())
# result = [0 for x in range(len(df))]
# for clusterId, rows in enumerate(clusters):
# for rowId in rows:
# result[rowId] = clusterId
# return xmeans_instance, result
# num_clusters = parameters["num_clusters"]
# create instance of Elbow method using K value from 1 to 10.
# kmin, kmax = 1, 20
# elbow_instance = elbow(df, kmin, kmax)
# process input data and obtain results of analysis
# elbow_instance.process()
# num_clusters = elbow_instance.get_amount() # most probable amount of clusters
# https://datascience.stackexchange.com/questions/34187/kmeans-using-silhouette-score
def predict_xmeans(df, model):
if model == None:
return len(df) * [0]
return model.predict(df)
def train_skmeans(df, parameters):
# num_clusters = parameters["num_clusters"]
# create instance of Elbow method using K value from 1 to 10.
# kmin, kmax = 1, 20
# elbow_instance = elbow(df, kmin, kmax)
# process input data and obtain results of analysis
# elbow_instance.process()
# num_clusters = elbow_instance.get_amount() # most probable amount of clusters
# https://datascience.stackexchange.com/questions/34187/kmeans-using-silhouette-score
max_num_clusters = parameters["max_num_clusters"]
Ks = range(2, min(max_num_clusters, len(df)) + 1)
kms = [MiniBatchKMeans(n_clusters=i, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=False) for i in Ks]
writeLog("Performing K-means for cluster sizes 2 - %d" % (min(max_num_clusters, len(df))))
sil_coeff = []
all_labels = []
distance_matrix = None
max_num_samples_training_cluster = parameters["max_num_samples_training_cluster"]
if len(df) > max_num_samples_training_cluster:
writeLog("The number of samples to be clustered (%d) exceeds the configured maximum of %d. Taking random sample of the configured maximum size." % (len(df), max_num_samples_training_cluster))
traindf = df[np.random.choice(df.shape[0], max_num_samples_training_cluster, replace=False), :]
else:
traindf = df
for i, km in enumerate(kms):
x = km.fit(traindf)
if (i == 0):
distance_matrix = pairwise_distances(traindf, metric="euclidean")
score = 0.0
try:
score = silhouette_score(distance_matrix, x.labels_, metric='precomputed')
writeLog("sihouette_score for cluster size %d = %f" % (km.n_clusters, score))
except:
writeLog("Unable to calculate sihouette_score for cluster size %d. Using %f." % (km.n_clusters, score))
if len(traindf) < len(df):
labels = km.predict(df)
else:
labels = x.labels_
sil_coeff.append(score)
all_labels.append(labels)
if score >= 1.0:
writeLog("Maximum silhouette score reached. No need to consider any more clusters.")
break
max_index = np.asarray(sil_coeff).argmax(axis=0)
model = kms[max_index]
labels = all_labels[max_index]
writeLog("Optimum number of clusters: " + str(model.n_clusters))
return model, labels, [i for i in range(model.n_clusters)]
# create object of X-Means algorithm that uses CCORE for processing
# initial centers - optional parameter, if it is None, then random centers will be used by the algorithm.
# let's avoid random initial centers and initialize them using K-Means++ method:
# initial_centers = kmeans_plusplus_initializer(df, num_clusters).initialize();
# xmeans_instance = xmeans(df, initial_centers, ccore=True);
# run cluster analysis
# xmeans_instance.process();
# obtain results of clustering
# clusters = xmeans_instance.get_clusters();
# result = [0 for x in range(len(df))]
# for clusterId, rows in enumerate(clusters):
# for rowId in rows:
# result[rowId] = clusterId
# return xmeans_instance, result
def predict_skmeans(df, model):
if model == None:
return len(df) * [0]
return model.predict(df)
algorithms = {
"hashvalue": {
"train": train_hashvalue,
"predict": predict_hashvalue
},
"kmeans": {
"train": train_kmeans,
"predict": predict_kmeans
},
"s+kmeans": {
"train": train_skmeans,
"predict": predict_skmeans
},
"xmeans": {
"train": train_xmeans,
"predict": predict_xmeans
}
}
class Clustering:
def __init__(self, algorithm = None, globalParameters = None, parameters = None, copyFrom = None):
if copyFrom != None:
self.algorithm = copyFrom.algorithm
self.parameters = dict(copyFrom.parameters)
else:
self.algorithm = algorithm
if (globalParameters != None):
self.parameters = dict(globalParameters)
self.parameters.update(parameters)
else:
self.parameters = parameters
writeLog("Creating new clustering object for algorithm: " + self.algorithm)
self.model = None
self.vectorizer = None
self.known_values = None
self.labels = []
def getCaseFeatureGroupsToInclude(self):
ica_clustering = (not self.parameters["disable_case_attributes"]) and (self.parameters["case_clustering_include_case_attributes"])
iao_clustering = (not self.parameters["disable_case_attributes"]) and (self.parameters["case_clustering_include_activity_occurrences"])
ica_filtering = ica_clustering or (not self.parameters["disable_raw_case_attributes"])
iao_filtering = iao_clustering or ((not self.parameters["disable_raw_case_attributes"]) and (self.parameters["include_activity_occurrences_as_raw_case_attributes"]))
return ica_clustering, iao_clustering, ica_filtering, iao_filtering
def trainForCaseClustering(self, eventlog, cases):
if self.parameters["disable_case_attributes"] and self.parameters["disable_raw_case_attributes"]:
writeLog("Case clustering not needed. Skipping it.")
for t in cases:
t["_cluster"] = 0
return
writeLog("Clustering %d cases" % (len(cases)))
# num_clusters = self.parameters["num_clusters"]
# if (num_clusters <= 1):
# for t in cases:
# t["_cluster"] = 0
# return
t0 = time()
data = []
cols = []
ica_clustering, iao_clustering, ica_filtering, iao_filtering = self.getCaseFeatureGroupsToInclude()
ica_cols = []
iao_cols = []
if ica_filtering:
data += [c["a"] + c["occ"] for c in cases] if iao_filtering else [c["a"] for c in cases]
ica_cols = ["A_" + a for a in eventlog.data["attributes"]["case"]]
cols += ica_cols
if iao_filtering:
if not ica_filtering:
data += [c["occ"] for c in cases]
iao_cols = ["O_" + a["name"] for a in eventlog.data["activities"]]
cols += iao_cols
df = pd.DataFrame(data, columns=cols)
self.known_values = self.filterUnusualValues(df, self.parameters)
if (ica_filtering and (not ica_clustering)):
df = df.drop(ica_cols, axis = 1)
if (iao_filtering and (not iao_clustering)):
df = df.drop(iao_cols, axis = 1)
if ("Cost" in df.columns):
df = df.drop(["Cost"], axis = 1)
if ("_cluster" in df.columns):
df = df.drop(["_cluster"], axis = 1)
if not self.parameters["disable_case_attributes"]:
self.model, self.vectorizer, labels = self.train(df, self.parameters)
for i, d in enumerate(labels):
cases[i]["_cluster"] = d
writeLog("Case clustering done in %0.3fs" % (time() - t0))
else:
self.model = None
self.vectorizer = None
writeLog("Case data filtering done in %0.3fs" % (time() - t0))
def trainForEventClustering(self, eventlog, cases):
if self.parameters["disable_event_attributes"] and self.parameters["disable_raw_event_attributes"]:
writeLog("Event clustering not needed. Skipping it.")
for c in cases:
for e in c["t"]:
e.append(0)
return
writeLog("Clustering events in %d cases" % (len(cases)))
# num_clusters = self.parameters["num_clusters"]
# if (num_clusters <= 1):
# for c in cases:
# for e in c["t"]:
# e.append(0)
# return
t0 = time()
if (self.parameters["use_single_event_clustering"]):
events = []
for c in cases:
for e in c["t"]:
events.append(["" if i == None else i for i in e[2:]])
df = pd.DataFrame(events, columns=eventlog.data["attributes"]["event"])
known_values = self.filterUnusualValues(df, self.parameters)
if not self.parameters["disable_event_attributes"]:
model, vectorizer, labels = self.train(df, self.parameters)
i = 0
for c in cases:
for e in c["t"]:
e.append(labels[i])
i += 1
self.vectorizer = { "primary": vectorizer }
self.model = { "primary": model }
else:
model = None
vectorizer = None
self.known_values = { "primary": known_values }
else:
self.model = {}
self.vectorizer = {}
self.known_values = {}
eventAttributes = eventlog.data["attributes"]["event"]
activities = eventlog.getActivityOccurrences(cases)
for activityId, activity in activities.items():
t0 = time()
writeLog("Clustering %d events for activity: %s (id: %s)" % (len(activity["occ"]), activity["name"], activityId))
events = [None] * len(activity["occ"])
maxLen = len(eventAttributes) + 2
for i, e in enumerate(activity["occ"]):
events[i] = e[2:maxLen]
if (len(events) < 1):
i = 0
for e in activity["occ"]:
e.append(0)
i += 1
continue
df = pd.DataFrame(events, columns=eventlog.data["attributes"]["event"])
self.known_values[activityId] = self.filterUnusualValues(df, self.parameters)
if not self.parameters["disable_event_attributes"]:
self.model[activityId], self.vectorizer[activityId], labels = self.train(df, self.parameters)
i = 0
if not self.parameters["disable_event_attributes"]:
for e in activity["occ"]:
e.append(labels[i])
i += 1
else:
self.model[activityId] = None
self.vectorizer[activityId] = None
writeLog("Event clustering done in %0.3fs" % (time() - t0))
def filterUnusualValues(self, df, parameters):
writeLog("Number of colums to filter unusual values from %d" % (len(df.columns)))
t0 = time()
threshold = parameters["ignore_values_threshold"] * len(df)
known_values = {}
for col in df.columns:
writeLog("Replacing unusual values in column '%s' with minimum usage of %d rows." % (col, threshold))
vc = df[col].value_counts()
toRemove = vc[vc <= threshold].index
toKeep = vc[vc > threshold].index
known_values[col] = toKeep
writeLog("Remaining known values: %s (removed %d values out of %d values)" % (str([i for i in toKeep]), len(toRemove), len(toKeep)))
if len(toRemove) > 0:
df[col].replace(toRemove, OTHER_TOKEN, inplace=True)
writeLog("Unusual value filtering done in %f s" % (time() - t0))
return known_values
def train(self, df, parameters):
writeLog("Number of colums to cluster %d" % (len(df.columns)))
t0 = time()
vectorizer = DictVectorizer(sparse = False)
writeLog("Vectorizing data frame of shape: %s" % (str(df.shape)))
X = vectorizer.fit_transform(df.to_dict(orient = 'records'))
writeLog("Data vectorization done in %fs" % (time() - t0))
writeLog("n_samples: %d, n_features: %d" % X.shape)
t0 = time()
alg = algorithms[self.algorithm]
# #############################################################################
# Do the actual clustering
if df.shape[0] < 2:
writeLog("One row or less to cluster. Returning constant labels.")
model = None
labels = len(df) * [0]
allLabels = [0]
elif df.shape[1] == 0:
writeLog("No columns in the table to be clustered. Returning constant labels.")
model = None
labels = len(df) * [0]
allLabels = [0]
else:
model, labels, allLabels = alg["train"](X, parameters)
if (len(allLabels) > len(self.labels)):
self.labels = allLabels
writeLog("Clustering using %s done in %fs" % (self.algorithm, time() - t0))
return model, vectorizer, labels
def getCaseClusteringDataFrame(self, eventlog, cases):
cols = []
ica = self.parameters["case_clustering_include_case_attributes"]
iao = self.parameters["case_clustering_include_activity_occurrences"]
if ica:
cols += eventlog.data["attributes"]["case"]
if iao:
cols += [a["name"] for a in eventlog.data["activities"]]
rows = [([] + (c["occ"] if iao else []) + (["" if i == None else i for i in c["a"]] if ica else [])) for c in cases]
return pd.DataFrame(rows, columns=cols)
def clusterCases(self, eventlog, testData):
if self.model == None:
for td in testData:
td["_cluster"] = 0
return
df = self.getCaseClusteringDataFrame(eventlog, testData)
labels = self.predict(df, self.model, self.vectorizer, self.known_values)
for i, d in enumerate(labels):
testData[i]["_cluster"] = d
def clusterEvents(self, eventlog, testData):
if self.model == None:
for c in testData:
for e in c["t"]:
e.append(0)
return
if (self.parameters["use_single_event_clustering"]):
self.clusterTestDataUsingSingleClustering(eventlog, testData)
else:
self.clusterTestDataUsingMultipleClusterings(eventlog, testData)
def clusterTestDataUsingSingleClustering(self, eventlog, testData):
eventAttributes = eventlog.data["attributes"]["event"]
events = []
for c in testData:
for e in c["t"]:
# if (len(e) > len(eventAttributes) + 2):
# del(e[len(e) - 1])
events.append(["" if i == None else i for i in e[2:]])
df = pd.DataFrame(events, columns=eventAttributes)
labels = self.predict(df, self.model["primary"], self.vectorizer["primary"], self.known_values["primary"])
i = 0
for c in testData:
for e in c["t"]:
e.append(labels[i])
i += 1
def clusterTestDataUsingMultipleClusterings(self, eventlog, testData):
num_clusters = self.parameters["num_clusters"]
eventAttributes = eventlog.data["attributes"]["event"]
activities = eventlog.getActivityOccurrences(testData)
for activityId, activity in activities.items():
numEvents = len(activity["occ"])
writeLog("Clustering %d test events for activity: %s (id: %s)" % (numEvents, activity["name"], activityId));
model = self.model[activityId] if activityId in self.model else None
if ((numEvents < 2) or (model == None)):
i = 0
for e in activity["occ"]:
e.append(0)
i += 1
continue
events = [None] * numEvents
maxLen = len(eventAttributes) + 2
for i, e in enumerate(activity["occ"]):
events[i] = e[2:maxLen]
df = pd.DataFrame(events, columns=eventlog.data["attributes"]["event"])
labels = self.predict(df, model, self.vectorizer[activityId], self.known_values[activityId])
i = 0
for e in activity["occ"]:
e.append(labels[i])
i += 1
def predict(self, df, model, vectorizer, known_values):
threshold = self.parameters["ignore_values_threshold"] * len(df)
if threshold > 0:
for col in df.columns:
writeLog("Replacing unusual values in column %s." % (col))
if col in known_values:
isin = df[col].isin(known_values[col])
df[col].loc[-isin] = OTHER_TOKEN
writeLog("Vectorizing data frame of shape: %s" % (str(df.shape)))
XX = vectorizer.transform(df.to_dict(orient = 'records'))
alg = algorithms[self.algorithm]
return alg["predict"](XX, model)
def filterEventAttributes(self, eventlog, trace):
result = []
eventAttributes = eventlog.data["attributes"]["event"]
if (self.parameters["use_single_event_clustering"]):
for eventId, e in enumerate(trace.eventAttributes):
r = []
kv = self.known_values["primary"]
for attributeId, val in enumerate(e[0:len(eventAttributes)]):
attributeName = eventAttributes[attributeId]
valueIndex = kv[attributeName]
v = val if (val in valueIndex) else OTHER_TOKEN
r.append(encodeEventAttributeValueForVocabulary(v, attributeId))
result.append(r)
else:
for eventId, e in enumerate(trace.eventAttributes):
r = []
activity = eventlog.activitiesByLabel[trace.activityLabels[eventId]]
activityId = activity["id"]
if activityId in self.known_values:
kv = self.known_values[activityId]
for attributeId, val in enumerate(e[0:len(eventAttributes)]):
attributeName = eventAttributes[attributeId]
valueIndex = kv[attributeName]
v = val if (val in valueIndex) else OTHER_TOKEN
r.append(encodeEventAttributeValueForVocabulary(v, attributeId))
else:
for attributeId, val in enumerate(e):
r.append(encodeEventAttributeValueForVocabulary(OTHER_TOKEN, attributeId))
result.append(r)
trace.filteredEventAttributes = result
def filterCaseAttributes(self, eventlog, trace):
result = []
caseAttributes = eventlog.data["attributes"]["case"]
activities = eventlog.data["activities"]
ica_clustering, iao_clustering, ica_filtering, iao_filtering = self.getCaseFeatureGroupsToInclude()
lastAttributeId = 0
if ica_filtering:
lastAttributeId = len(caseAttributes)
kv = self.known_values
for attributeId, val in enumerate(trace.caseAttributes):
if (attributeId < lastAttributeId):
attributeName = "A_" + caseAttributes[attributeId]
valueIndex = kv[attributeName]
else:
attributeName = "O_" + activities[attributeId - lastAttributeId]["name"]
valueIndex = kv[attributeName]
v = val if (val in valueIndex) else OTHER_TOKEN
result.append(encodeCaseAttributeValueForVocabulary(v, attributeId))
trace.filteredCaseAttributes = result
def getSetOfKnownCaseAttributeValuesForVocabulary(self, eventlog, traces):
result = []
if self.parameters["disable_raw_case_attributes"]:
return result
caseAttributes = eventlog.data["attributes"]["case"]
kv = self.known_values
for attributeId, attributeName in enumerate(caseAttributes):
r = set([encodeCaseAttributeValueForVocabulary(OTHER_TOKEN, attributeId)])
attributeKey = "A_" + attributeName
vals = kv[attributeKey]
r.update([encodeCaseAttributeValueForVocabulary(v, attributeId) for v in vals])
result.append(r)
if not self.parameters["include_activity_occurrences_as_raw_case_attributes"]:
return [item for sublist in result for item in sublist]
activities = eventlog.data["activities"]
for activityId, activity in enumerate(activities):
attributeId = activityId + len(caseAttributes)
# r = set([encodeCaseAttributeValueForVocabulary(OTHER_TOKEN, attributeId)])
r = set()
attributeKey = "O_" + activity["name"]
vals = kv[attributeKey]
r.update([encodeCaseAttributeValueForVocabulary(v, attributeId) for v in vals])
result.append(r)
return [item for sublist in result for item in sublist]
def getSetOfKnownEventAttributeValuesForVocabulary(self, eventlog, traces):
result = []
if self.parameters["disable_raw_event_attributes"]:
return result
result = []
eventAttributes = eventlog.data["attributes"]["event"]
if (self.parameters["use_single_event_clustering"]):
kv = self.known_values["primary"]
for attributeId, e in enumerate(eventAttributes):
# r = set([encodeEventAttributeValueForVocabulary(OTHER_TOKEN, attributeId)])
attributeName = eventAttributes[attributeId]
r = set()
vals = kv[attributeName]
r.update([encodeEventAttributeValueForVocabulary(v, attributeId) for v in vals])
result.append(r)
else:
for attributeId, e in enumerate(eventAttributes):
# r = set([encodeEventAttributeValueForVocabulary(OTHER_TOKEN, attributeId)])
attributeName = eventAttributes[attributeId]
r = set()
for activity in eventlog.data["activities"]:
activityId = activity["id"]
if activityId in self.known_values:
kv = self.known_values[activityId]
vals = kv[attributeName]
else:
vals = []
r.update([encodeEventAttributeValueForVocabulary(v, attributeId) for v in vals])
result.append(r)
return [item for sublist in result for item in sublist]
def addUndefinedParameters(self, parameters):
for key, value in parameters.items():
if not key in self.parameters:
self.parameters[key] = value
def getClusterLabels(self):
return self.labels
def save(self):
saved = {
"algorithm": self.algorithm,
"model": self.model,
"vectorizer": self.vectorizer,
"parameters": self.parameters,
"known_values": self.known_values,
"labels": self.labels
}
return saved
def load(self, saved):
self.algorithm = saved["algorithm"]
self.model = saved["model"]
self.vectorizer = saved["vectorizer"]
self.parameters = saved["parameters"]
self.known_values = saved["known_values"]
self.labels = saved["labels"]
| 28,365 | 41.400598 | 352 | py |
articles | articles-master/Exploiting Event Log Event Attributes in RNN Based Prediction/src/my_utils.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 25 11:59:43 2017
Test framework sources used to perform the tests required by paper:
"Exploiting Event Log Event Attributes in RNN Based Prediction"
by Markku Hinkka, Teemu Lehto and Keijo Heljanko
"""
import csv
import numpy as np
import time
import sys
import operator
import io
import array
from datetime import datetime
#import matplotlib.pyplot as plt
import math
OTHER_TOKEN = "OTHER"
UNKNOWN_TOKEN = "UNKNOWN"
ATTRIBUTE_COLUMN_PREFIX = "_A"
OUTCOME_SELECTION_TOKEN_PREFIX = "_O_"
DURATION_TOKEN_PREFIX = "_D_"
EVENT_ATTRIBUTE_TOKEN_PREFIX = "_EA_"
CASE_ATTRIBUTE_TOKEN_PREFIX = "_CA_"
WORD_PART_SEPARATOR = ":"
TRACE_FINISH_TOKEN = "__FINISH__"
class TraceData:
traceId = ""
outcome = None
activities = []
# sentence = ""
tokenized_sentences = None
positions = []
activityLabels = []
durations = []
durationValues = []
eventAttributeClusters = []
def __init__(self, traceId, outcome, outcomeDefined, cAttributes, eAttributes, cluster, sentence, durationValues, parameters, trace_length_modifier, model, is_full_trace = False):
self.traceId = traceId
self.fullActivities = np.asarray([w.replace(" ", "_") for w in sentence])
if parameters["predict_next_activity"]:
# self.fullActivities = np.asarray([w.replace(" ", "_") for w in sentence] + [FINISH_TOKEN])
if is_full_trace:
self.outcome = TRACE_FINISH_TOKEN
self.outcomeToken = OUTCOME_SELECTION_TOKEN_PREFIX + self.outcome
elif not parameters["predict_only"]:
last = self.fullActivities[-1]
parts = last.split(WORD_PART_SEPARATOR)
w = parts[1] if len(parts) == 3 else last
self.outcomeToken = self.outcome = w
self.fullActivities = self.fullActivities[:-1]
eAttributes = eAttributes[:-1]
else:
self.outcome = str(outcome)
self.outcomeToken = OUTCOME_SELECTION_TOKEN_PREFIX + self.outcome
self.caseAttributeCluster = CASE_ATTRIBUTE_TOKEN_PREFIX + str(cluster)
self.trace_length_modifier = trace_length_modifier
if (trace_length_modifier != 1.0):
self.activities = self.fullActivities[range(math.ceil(trace_length_modifier * len(self.fullActivities)))]
else:
self.activities = self.fullActivities
self.sentence = "%s %s" % (" ".join(self.activities), OUTCOME_SELECTION_TOKEN_PREFIX + str(self.outcome))
self.activitiesForPrediction = {}
self.durations = []
self.activityLabels = []
self.eventAttributeClusters = []
self.durationValues = np.asarray(durationValues)
for a in self.activities:
parts = a.split(WORD_PART_SEPARATOR)
if (len(parts) == 3):
self.durations.append(parts[0])
self.activityLabels.append(parts[1])
self.eventAttributeClusters.append(parts[2])
else:
self.durations.append(None)
self.activityLabels.append(a)
self.eventAttributeClusters.append(None)
self.activityLabels = np.asarray(self.activityLabels)
self.durations = np.asarray(self.durations)
self.eventAttributeClusters = np.asarray(self.eventAttributeClusters)
self.eventAttributes = [] if parameters["disable_raw_event_attributes"] else eAttributes
self.caseAttributes = [] if parameters["disable_raw_case_attributes"] else cAttributes
self.filteredCaseAttributes = []
self.filteredEventAttributes = []
self.indexedCaseAttributeWords = None
self.indexedEventAttributeWords = None
def getActivitiesForPrediction(self, word_to_index, tracePercentage, truncateUnknowns, seqLength, vocabSize, disableActivityLabels, disableDurations, disableEventAttributes, disableCaseAttributes, useSingleValueForDuration):
# key = "%s_%s_%s_%s_%s" % (tracePercentage, self.trace_length_modifier, truncateUnknowns, seqLength, vocabSize)
# if (not key in self.activitiesForPrediction):
r = range(math.ceil(tracePercentage * len(self.activityLabels)))
labels = self.activityLabels[r]
durations = self.durations[r]
eventAttributeClusters = self.eventAttributeClusters[r]
durationValues = self.durationValues[r] if useSingleValueForDuration else None
# unknownId = word_to_index[UNKNOWN_TOKEN]
# wordTokens = [word_to_index[word] if (word in word_to_index) else unknownId for word in sentence]
labels = [(word_to_index[word] if (word in word_to_index) and (not disableActivityLabels) else word_to_index[UNKNOWN_TOKEN]) for word in labels]
durations = [word_to_index[word] if (word != None) and (not disableDurations) and (not useSingleValueForDuration) else None for word in durations]
eventAttributeClusters = [word_to_index[word] if (word != None) and (not disableEventAttributes) else None for word in eventAttributeClusters]
return (labels, durations, eventAttributeClusters, durationValues)
# self.activitiesForPrediction[key] = (labels, durations, eventAttributeClusters)
# return self.activitiesForPrediction[key]
def getCaseAttributeWordIndexes(self, word_to_index):
if self.indexedCaseAttributeWords == None:
self.indexedCaseAttributeWords = [(word_to_index[val] if val in word_to_index else None) for val in self.filteredCaseAttributes]
return self.indexedCaseAttributeWords
def getEventAttributeWordIndexes(self, word_to_index):
if self.indexedEventAttributeWords == None:
self.indexedEventAttributeWords = [[(word_to_index[val] if val in word_to_index else None) for val in fea] for fea in self.filteredEventAttributes]
return self.indexedEventAttributeWords
def print_trace(s, index_to_word):
sentence_str = [index_to_word[x] for x in s[1:-1]]
writeLog(" ".join(sentence_str))
sys.stdout.flush()
def generate_trace(model, index_to_word, word_to_index, min_length=5):
# We start the sentence with the start token
new_sentence = []
# Repeat until we get an end token
selIndex = word_to_index[IN_SELECTION_TOKEN]
notSelIndex = word_to_index[NOT_IN_SELECTION_TOKEN]
while not ((len(new_sentence) > 0) and ((new_sentence[-1] == selIndex) or (new_sentence[-1] == notSelIndex))):
next_word_probs = model.predict(new_sentence)[-1]
samples = np.random.multinomial(1, next_word_probs)
sampled_word = np.argmax(samples)
new_sentence.append(sampled_word)
# Seomtimes we get stuck if the sentence becomes too long, e.g. "........" :(
# And: We don't want sentences with UNKNOWN_TOKEN's
if len(new_sentence) > 100 or sampled_word == word_to_index[UNKNOWN_TOKEN]:
return None
if len(new_sentence) < min_length:
return None
return new_sentence
def generate_traces(model, n, index_to_word, word_to_index):
for i in range(n):
sent = None
while not sent:
sent = generate_trace(model, index_to_word, word_to_index)
print_trace(sent, index_to_word)
def predict_outcome(model, test, word_to_index):
nextPrediction = model.predict(test)[-1]
selIndex = word_to_index[IN_SELECTION_TOKEN]
notSelIndex = word_to_index[NOT_IN_SELECTION_TOKEN]
selProb = nextPrediction[selIndex]
notSelProb = nextPrediction[notSelIndex]
return selProb >= notSelProb
def get_filename(figure_type, name, file_type, output_path = None):
dtstr = datetime.now().replace(microsecond=0).isoformat().replace("-", "").replace(":", "")
return (output_path if output_path != None else _output_path) + figure_type + "-" + name + "-" + dtstr + "." + file_type
_output_path = ""
_input_files_path = ""
_log_filename = ""
_results_filename = ""
_log_to_file_only = False
def getOutputPath():
return _output_path
def getInputPath():
return _input_files_path
def configure(input_files_path, output_path, log_to_file_only):
global _output_path
global _input_files_path
global _log_filename
global _results_filename
global _log_to_file_only
_output_path = output_path
_input_files_path = input_files_path
_log_filename = get_filename("log", "", "txt")
_results_filename = get_filename("results", "", "csv")
_log_to_file_only = log_to_file_only
with open(_results_filename, "w", newline="") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(["Time", "Status", "Name", "TestName", "Dataset", "CVRunId", "TrainDatasetSize", "TestDatasetSize", "DatasetSize", "Algorithm", "NumLayers", "HiddenDimSize", "Optimizer", "LearningRate", "SeqLength", "InputVectorSize", "BatchSize", "GradClipping", "ItemsBetween", "BestModelIteration", "TestIteration", "Iteration", "Epoch", "TrainDataInitTimeUsed", "TrainLayerInitTimeUsed", "CumulExactTrainTimeUsed", "TimeUsed", "CumulTimeUsed", "TimeUsedForTest", "CumulTimeUsedForTest", "SR_Train", "SR_Test", "SR_Test75p", "SR_Test50p", "SR_Test25p", "AvgCost", "AUC", "TP", "TN", "FP", "FN", "AllConfusions", "PredictOnlyOutcome", "FinalTraceOnly", "TraceLengthMod", "FixedLength", "MaxNumActivities", "TruncateUnknowns", "ActivityLabels", "Durations", "EventAttributes", "CaseAttributes", "RawEventAttributes", "RawCaseAttributes", "PredictNextActivity", "SingleEventClustering", "DurationSplitMethod", "CaseClusteringMethod", "EventClusteringMethod", "CaseClusteringIncludeActivityOccurrences", "CaseClusteringIncludeCaseAttributes", "IncludeActivityOccurrencesAsRawCaseAttributes", "UseSingleValueForDuration", "MaxNumCaseClusters", "MaxNumEventClusters", "MinimumUsageForCaseAttributes", "MinimumUsageForEventAttributes"])
def getInputDatasetFilename(dataset_name):
return _input_files_path + dataset_name + ".json"
def writeLog(message):
global _log_to_file_only
message = datetime.now().replace(microsecond=0).isoformat() + " \t" + message
if (not _log_to_file_only):
print(message)
with open(_log_filename, "a") as logfile:
logfile.write(message + "\n")
def writeResultRow(cells):
with open(_results_filename, "a", newline="") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(cells)
def writeTestResultRow(cells):
writeResultRow(cells)
def encodeCaseAttributeValueForVocabulary(val, attributeId):
return "%sC%d_%s" % (ATTRIBUTE_COLUMN_PREFIX, attributeId, val)
def encodeEventAttributeValueForVocabulary(val, attributeId):
return "%sE%d_%s" % (ATTRIBUTE_COLUMN_PREFIX, attributeId, val)
| 10,820 | 49.097222 | 1,243 | py |
articles | articles-master/Exploiting Event Log Event Attributes in RNN Based Prediction/src/bucket.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 25 11:59:43 2017
Test framework sources used to perform the tests required by paper:
"Exploiting Event Log Event Attributes in RNN Based Prediction"
by Markku Hinkka, Teemu Lehto and Keijo Heljanko
"""
import sys
import lasagne
from lasagne.layers import *
import numpy as np
import theano as theano
import theano.tensor as T
from time import time
import operator
import pickle
from my_utils import TraceData, writeLog, writeResultRow, get_filename, getOutputPath, OUTCOME_SELECTION_TOKEN_PREFIX, DURATION_TOKEN_PREFIX, EVENT_ATTRIBUTE_TOKEN_PREFIX, WORD_PART_SEPARATOR, CASE_ATTRIBUTE_TOKEN_PREFIX
#import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn import metrics
from time import time
from pathlib import Path
import pandas as pd
import nltk
import itertools
import json
class Bucket:
def __init__(self, num_layers, algorithm, num_units, hidden_dim_size, grad_clipping, optimizer, learning_rate):
self.traces_train = []
self.traces_test = []
self.num_layers = num_layers
self.algorithm = algorithm
self.num_units = num_units
self.hidden_dim_size = hidden_dim_size
self.grad_clipping = grad_clipping
self.optimizer = optimizer
self.learning_rate = learning_rate
writeLog("Preparing " + str(self.num_layers) + " layers for algorithm: " + self.algorithm)
# First, we build the network, starting with an input layer
# Recurrent layers expect input of shape
# (batch size, SEQ_LENGTH, num_features)
mask_var = T.matrix('mask')
l_in = lasagne.layers.InputLayer(shape=(None, None, num_units))
l_mask = lasagne.layers.InputLayer((None, None), mask_var)
self.l_layers = [l_in]
# We now build the LSTM layer which takes l_in as the input layer
# We clip the gradients at GRAD_CLIP to prevent the problem of exploding gradients.
if (self.algorithm == "gru"):
layerCreatorFunc = lambda parentLayer, isFirstLayer, isLastLayer: lasagne.layers.GRULayer(
parentLayer, self.hidden_dim_size, grad_clipping=self.grad_clipping,
mask_input = l_mask if isFirstLayer else None,
only_return_final=isLastLayer)
else:
# All gates have initializers for the input-to-gate and hidden state-to-gate
# weight matrices, the cell-to-gate weight vector, the bias vector, and the nonlinearity.
# The convention is that gates use the standard sigmoid nonlinearity,
# which is the default for the Gate class.
# gate_parameters = lasagne.layers.recurrent.Gate(
# W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
# b=lasagne.init.Constant(0.))
# cell_parameters = lasagne.layers.recurrent.Gate(
# W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
# # Setting W_cell to None denotes that no cell connection will be used.
# W_cell=None, b=lasagne.init.Constant(0.),
# # By convention, the cell nonlinearity is tanh in an LSTM.
# nonlinearity=lasagne.nonlinearities.tanh)
layerCreatorFunc = lambda parentLayer, isFirstLayer, isLastLayer: lasagne.layers.LSTMLayer(
parentLayer, self.hidden_dim_size, grad_clipping=self.grad_clipping,
mask_input = l_mask if isFirstLayer else None,
nonlinearity=lasagne.nonlinearities.tanh,
# Here, we supply the gate parameters for each gate
# ingate=gate_parameters, forgetgate=gate_parameters,
# cell=cell_parameters, outgate=gate_parameters,
# We'll learn the initialization and use gradient clipping
only_return_final=isLastLayer)
for layerId in range(self.num_layers):
self.l_layers.append(layerCreatorFunc(self.l_layers[layerId], layerId == 0, layerId == self.num_layers - 1))
# The output of l_forward_2 of shape (batch_size, N_HIDDEN) is then passed through the softmax nonlinearity to
# create probability distribution of the prediction
# The output of this stage is (batch_size, vocab_size)
self.l_out = lasagne.layers.DenseLayer(self.l_layers[len(self.l_layers) - 1], num_units=num_units, W = lasagne.init.Normal(), nonlinearity=lasagne.nonlinearities.softmax)
self.l_layers.append(self.l_out)
# Theano tensor for the targets
target_values = T.ivector('target_output')
#! target_var = T.matrix('target_output')
# lasagne.layers.get_output produces a variable for the output of the net
network_output = lasagne.layers.get_output(self.l_out)
# https://github.com/Lasagne/Lasagne/blob/master/examples/recurrent.py
# The network output will have shape (n_batch, 1); let's flatten to get a
# 1-dimensional vector of predicted values
# predicted_values = network_output.flatten()
# flat_target_values = target_values.flatten()
# Our cost will be mean-squared error
# cost = T.mean((predicted_values - flat_target_values)**2)
# cost = T.mean((network_output - target_values)**2)
# The loss function is calculated as the mean of the (categorical) cross-entropy between the prediction and target.
#! cost = T.nnet.categorical_crossentropy(network_output,target_var).mean()
cost = T.nnet.categorical_crossentropy(network_output,target_values).mean()
# Retrieve all parameters from the network
all_params = lasagne.layers.get_all_params(self.l_out,trainable=True)
# Compute AdaGrad updates for training
writeLog("Computing updates...")
writeLog("Using optimizer: " + self.optimizer)
if (self.optimizer == "sgd"):
updates = lasagne.updates.sgd(cost, all_params, self.learning_rate)
elif (self.optimizer == "adagrad"):
updates = lasagne.updates.adagrad(cost, all_params, self.learning_rate)
elif (self.optimizer == "adadelta"):
updates = lasagne.updates.adagrad(cost, all_params, self.learning_rate, 0.95)
elif (self.optimizer == "momentum"):
updates = lasagne.updates.momentum(cost, all_params, self.learning_rate, 0.9)
elif (self.optimizer == "nesterov_momentum"):
updates = lasagne.updates.nesterov_momentum(cost, all_params, self.learning_rate, 0.9)
elif (self.optimizer == "rmsprop"):
updates = lasagne.updates.rmsprop(cost, all_params, self.learning_rate, 0.9)
else:
updates = lasagne.updates.adam(cost, all_params, self.learning_rate, beta1=0.9, beta2=0.999)
# Theano functions for training and computing cost
writeLog("Compiling train function...")
self.train = theano.function([l_in.input_var, target_values, l_mask.input_var], cost, updates=updates, allow_input_downcast=True)
#! self.train = theano.function([l_in.input_var, target_var, l_mask.input_var], cost, updates=updates, allow_input_downcast=True)
writeLog("Compiling train cost computing function...")
# self.compute_cost = theano.function([l_in.input_var, target_values, l_mask.input_var], cost, allow_input_downcast=True)
# In order to generate text from the network, we need the probability distribution of the next character given
# the state of the network and the input (a seed).
# In order to produce the probability distribution of the prediction, we compile a function called probs.
writeLog("Compiling propabilities computing function...")
self.propabilities = theano.function([l_in.input_var, l_mask.input_var],network_output,allow_input_downcast=True)
| 8,115 | 52.394737 | 220 | py |
articles | articles-master/Exploiting Event Log Event Attributes in RNN Based Prediction/src/model.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 25 11:59:43 2017
Test framework sources used to perform the tests required by paper:
"Exploiting Event Log Event Attributes in RNN Based Prediction"
by Markku Hinkka, Teemu Lehto and Keijo Heljanko
"""
import sys
import lasagne
from lasagne.layers import *
import numpy as np
import theano as theano
import theano.tensor as T
from time import time
import operator
import pickle
import traceback
#import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn import metrics
from time import time
from pathlib import Path
import pandas as pd
import nltk
import itertools
import json
from my_utils import writeLog, writeResultRow, get_filename, getOutputPath, ATTRIBUTE_COLUMN_PREFIX, OTHER_TOKEN, OUTCOME_SELECTION_TOKEN_PREFIX, DURATION_TOKEN_PREFIX, EVENT_ATTRIBUTE_TOKEN_PREFIX, WORD_PART_SEPARATOR, CASE_ATTRIBUTE_TOKEN_PREFIX, TRACE_FINISH_TOKEN
from bucket import Bucket
UNKNOWN_TOKEN = "UNKNOWN"
DURATION_VALUE_PLACEHOLDER_TOKEN = "__DURATION_VALUE__"
class Model:
def __init__(self, parameters):
writeLog("Creating new model object")
self.parameters = parameters
self.algorithm = parameters["algorithm"]
self.num_layers = parameters["num_layers"]
self.optimizer = parameters["optimizer"]
self.learning_rate = parameters["learning_rate"]
self.batch_size = parameters["batch_size"]
self.num_callbacks = parameters["num_callbacks"]
self.case_name = parameters["case_name"]
self.hidden_dim_size = parameters["hidden_dim_size"]
self.num_iterations_between_reports = parameters["num_iterations_between_reports"]
self.grad_clipping = parameters["grad_clipping"]
self.predict_only_outcome = parameters["predict_only_outcome"]
self.final_trace_only = parameters["final_trace_only"]
self.max_num_words = parameters["max_num_words"]
self.trace_length_modifier = parameters["trace_length_modifier"]
self.truncate_unknowns = parameters["truncate_unknowns"]
self.eventlogActivities = None
self.eventlogAttributes = None
self.eventlogFilename = None
self.eventlogFilepath = None
def initialize(self, case_clustering, event_clustering, rng):
self.case_clustering = case_clustering
self.event_clustering = event_clustering
self.rng = rng
def train(self, eventlog):
self.train_start_time = time()
self.eventlog = eventlog
self.eventlog.model = self
try:
self.eventlog.preProcessForTraining(self.parameters)
self.initClusters(eventlog)
self.prepareTestData(eventlog)
self.traces_train = eventlog.convertTracesFromInputData(eventlog.trainingData, self.parameters, self.trace_length_modifier)
self.traces_test = eventlog.convertTracesFromInputData(eventlog.testData, self.parameters, self.trace_length_modifier)
maxNumTraces = self.parameters["max_num_traces_in_training"]
if (maxNumTraces != None) and (maxNumTraces < len(self.traces_train)):
writeLog("Filtering %d traces out of %d training traces" % (maxNumTraces, len(self.traces_train)))
self.traces_train = list(np.random.choice(np.asarray(self.traces_train), maxNumTraces, replace=False))
maxNumTraces = self.parameters["max_num_traces_in_training_test"]
if (maxNumTraces != None) and (maxNumTraces < len(self.traces_test)):
writeLog("Filtering %d traces out of %d validation traces" % (maxNumTraces, len(self.traces_test)))
self.traces_test = list(np.random.choice(np.asarray(self.traces_test), maxNumTraces, replace=False))
return self.createModel()
except:
writeLog("Exception: " + traceback.format_exc())
# writeLog("Exception: " + sys.exc_info()[0])
def prepareTestData(self, eventlog):
testData = eventlog.testData
if (self.case_clustering != None):
self.case_clustering.clusterCases(eventlog, eventlog.testData)
else:
for td in enumerate(testData):
td["_cluster"] = 0
if (self.event_clustering != None):
self.event_clustering.clusterEvents(eventlog, eventlog.testData)
else:
for c in testData:
for e in c["t"]:
e.append(0)
def initClusters(self, eventlog):
self.case_clustering.trainForCaseClustering(eventlog, eventlog.trainingData)
self.event_clustering.trainForEventClustering(eventlog, eventlog.trainingData)
def gen_data(self, traces, p, positions, batch_size):
'''
This function produces a semi-redundant batch of training samples from the location 'p' in the provided string (data).
For instance, assuming SEQ_LENGTH = 5 and p=0, the function would create batches of
5 characters of the string (starting from the 0th character and stepping by 1 for each semi-redundant batch)
as the input and the next character as the target.
To make this clear, let us look at a concrete example. Assume that SEQ_LENGTH = 5, p = 0 and BATCH_SIZE = 2
If the input string was "The quick brown fox jumps over the lazy dog.",
For the first data point,
x (the inputs to the neural network) would correspond to the encoding of 'T','h','e',' ','q'
y (the targets of the neural network) would be the encoding of 'u'
For the second point,
x (the inputs to the neural network) would correspond to the encoding of 'h','e',' ','q', 'u'
y (the targets of the neural network) would be the encoding of 'i'
The data points are then stacked (into a three-dimensional tensor of size (batch_size,SEQ_LENGTH,vocab_size))
and returned.
Notice that there is overlap of characters between the batches (hence the name, semi-redundant batch).
'''
data_size = len(positions) if positions != None else len(traces)
x = np.zeros((batch_size, self.seq_length, len(self.word_to_index)))
y = np.zeros(batch_size)
masks = []
disableActivityLabels = self.parameters["disable_activity_labels"]
disableDurations = self.parameters["disable_durations"]
disableEventAttributes = self.parameters["disable_event_attributes"]
disableCaseAttributes = self.parameters["disable_case_attributes"]
disableRawEventAttributes = self.parameters["disable_raw_event_attributes"]
disableRawCaseAttributes = self.parameters["disable_raw_case_attributes"]
useSingleValueForDuration = self.parameters["use_single_value_for_duration"]
for n in range(batch_size):
ptr = (p + n) % data_size
traceId = positions[ptr][0] if positions != None else ptr
trace = traces[traceId]
traceLastId = positions[ptr][1] if positions != None else len(traces[traceId].tokenized_sentences)
caCluster = None if disableCaseAttributes else self.word_to_index[trace.caseAttributeCluster]
caWords = None if disableRawCaseAttributes else trace.getCaseAttributeWordIndexes(self.word_to_index)
eaWords = None if disableRawEventAttributes else trace.getEventAttributeWordIndexes(self.word_to_index)
for i in range(traceLastId):
if ((not disableActivityLabels)):
label = trace.activityLabels[i]
x[n, i, self.word_to_index[label]] = 1.
if ((caCluster != None) and (not disableCaseAttributes)):
x[n, i, caCluster] = 1.
duration = trace.durations[i]
if (not disableDurations):
if useSingleValueForDuration:
x[n, i, self.word_to_index[DURATION_VALUE_PLACEHOLDER_TOKEN]] = trace.durationValues[i]
elif (duration != None):
x[n, i, self.word_to_index[duration]] = 1.
cluster = trace.eventAttributeClusters[i]
if ((cluster != None) and (not disableEventAttributes)):
x[n, i, self.word_to_index[cluster]] = 1.
if (not disableRawCaseAttributes):
for w in caWords:
if w != None:
x[n, i, w] = 1.
if (not disableRawEventAttributes):
for w in eaWords[i]:
if w != None:
x[n, i, w] = 1.
masks.append([1 if x < traceLastId else 0 for x in range(self.seq_length)])
y[n] = self.word_to_index[trace.outcomeToken] if self.predict_only_outcome else (self.word_to_index[trace.tokenized_sentences[traceLastId] if traceLastId < len(trace.activityLabels) else trace.outcomeToken])
return x, np.array(y,dtype='int32'), np.asarray(masks)
def gen_prediction_data(self, traces, tracePercentage):
batches = []
masks = []
numTraces = len(traces)
if (numTraces == 0):
return np.asarray(batches), np.asarray(masks)
batchRow = 0
x = np.zeros((self.batch_size if (numTraces > self.batch_size) else numTraces, self.seq_length, len(self.word_to_index)))
m = np.zeros((self.batch_size if (numTraces > self.batch_size) else numTraces, self.seq_length))
batches.append(x)
masks.append(m)
disableActivityLabels = self.parameters["disable_activity_labels"]
disableDurations = self.parameters["disable_durations"]
disableEventAttributes = self.parameters["disable_event_attributes"]
disableCaseAttributes = self.parameters["disable_case_attributes"]
disableRawEventAttributes = self.parameters["disable_raw_event_attributes"]
disableRawCaseAttributes = self.parameters["disable_raw_case_attributes"]
useSingleValueForDuration = self.parameters["use_single_value_for_duration"]
dpIndex = self.word_to_index[DURATION_VALUE_PLACEHOLDER_TOKEN] if useSingleValueForDuration and (not disableDurations) else None
for traceRow in range(len(traces)):
trace = traces[traceRow]
(labels, durations, clusters, durationValues) = trace.getActivitiesForPrediction(self.word_to_index, tracePercentage, self.truncate_unknowns, self.seq_length, len(self.word_to_index), disableActivityLabels, disableDurations, disableEventAttributes, disableCaseAttributes, useSingleValueForDuration)
caCluster = None if disableCaseAttributes else self.word_to_index[trace.caseAttributeCluster]
caWords = None if disableRawCaseAttributes else trace.getCaseAttributeWordIndexes(self.word_to_index)
eaWords = None if disableRawEventAttributes else trace.getEventAttributeWordIndexes(self.word_to_index)
for i in range(min(len(labels), x.shape[1] - 1)):
if ((not disableActivityLabels)):
x[batchRow, i, labels[i]] = 1.
if ((caCluster != None) and (not disableCaseAttributes)):
x[batchRow, i, caCluster] = 1.
if (not disableDurations):
duration = durations[i]
if useSingleValueForDuration:
x[batchRow, i, dpIndex] = durationValues[i]
elif (duration != None):
x[batchRow, i, duration] = 1.
if ((clusters[i] != None) and (not disableEventAttributes)):
x[batchRow, i, clusters[i]] = 1.
if (not disableRawCaseAttributes):
for w in caWords:
if w != None:
x[batchRow, i, w] = 1.
if (not disableRawEventAttributes):
for w in eaWords[i]:
if w != None:
x[batchRow, i, w] = 1.
for i in range(self.seq_length):
m[batchRow, i] = 1 if i < len(labels) else 0
batchRow += 1
if (batchRow >= self.batch_size):
x = np.zeros((self.batch_size if (numTraces - traceRow) > self.batch_size else (numTraces - traceRow - 1), self.seq_length, len(self.word_to_index)))
m = np.zeros((self.batch_size if (numTraces - traceRow) > self.batch_size else (numTraces - traceRow - 1), self.seq_length))
batches.append(x)
masks.append(m)
batchRow = 0
return np.asarray(batches), np.asarray(masks)
def trainModel(self, callback):
writeLog("Training...")
p = 0
data_size = len(self.traces_train)
if self.parameters["num_epochs_per_iteration"] != None:
self.num_iterations_between_reports = self.parameters["num_epochs_per_iteration"] * data_size
num_iterations = 0
num_iterations_after_report = 0
num_report_iterations = 1
avg_cost = 0
# writeLog("It: " + str(data_size * self.num_epochs // self.batch_size))
try:
it = 0
while (num_report_iterations <= self.num_callbacks):
x, y, mask = self.gen_data(self.traces_train, p, self.positions_train, self.batch_size)
it += 1
p += self.batch_size
num_iterations += self.batch_size
num_iterations_after_report += self.batch_size
# if(p+self.batch_size+self.seq_length >= data_size):
# writeLog('Carriage Return')
# p = 0;
avg_cost += self.rnn_train(x, y, mask)
if (callback and num_iterations_after_report >= self.num_iterations_between_reports):
callback(num_iterations, it, avg_cost / it, num_report_iterations)
avg_cost = 0
num_iterations_after_report = num_iterations_after_report - self.num_iterations_between_reports
num_report_iterations = num_report_iterations + 1
# callback(num_iterations, it, avg_cost / it, num_report_iterations)
except KeyboardInterrupt:
pass
def initializeTraces(self):
word_to_index = []
index_to_word = []
# Tokenize the sentences into words
writeLog("Tokenizing %s training- and %s test sentences." % (len(self.traces_train), len(self.traces_test)))
# tokenized_sentences = [nltk.word_tokenize(trace.sentence) for trace in traces]
# tokenized_sentences_train = [nltk.WhitespaceTokenizer().tokenize(trace.sentence) for trace in self.traces_train]
tokenized_sentences_train = [trace.activityLabels for trace in self.traces_train]
durations = set()
self.outcomes = set()
predict_next_activity = self.parameters["predict_next_activity"]
includeRawEventAttributes = not self.parameters["disable_raw_event_attributes"] and (self.event_clustering != None)
includeRawCaseAttributes = not self.parameters["disable_raw_case_attributes"] and (self.case_clustering != None)
rawCaseAttributes = []
rawEventAttributes = []
for trace in self.traces_train:
durations.update(trace.durations)
if (not predict_next_activity):
self.outcomes.add(trace.outcomeToken)
if (includeRawCaseAttributes):
self.case_clustering.filterCaseAttributes(self.eventlog, trace)
if (includeRawEventAttributes):
self.event_clustering.filterEventAttributes(self.eventlog, trace)
if (includeRawCaseAttributes):
rawCaseAttributes = self.case_clustering.getSetOfKnownCaseAttributeValuesForVocabulary(self.eventlog, self.traces_train)
if (includeRawEventAttributes):
rawEventAttributes = self.event_clustering.getSetOfKnownEventAttributeValuesForVocabulary(self.eventlog, self.traces_train)
if (predict_next_activity):
self.outcomes.add(OUTCOME_SELECTION_TOKEN_PREFIX + TRACE_FINISH_TOKEN)
durations = [d for d in durations if d != None]
eventAttributeClusters = [EVENT_ATTRIBUTE_TOKEN_PREFIX + str(l) for l in self.event_clustering.getClusterLabels()]
caseAttributeClusters = [CASE_ATTRIBUTE_TOKEN_PREFIX + str(l) for l in self.case_clustering.getClusterLabels()]
# Count the word frequencies
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences_train))
writeLog("Found %d unique words tokens." % len(word_freq.items()))
# Get the most common words and build index_to_word and word_to_index vectors
vocab = sorted(word_freq.items(), key=lambda x: (x[1], x[0]), reverse=True)
writeLog("Using vocabulary size %d." % len(vocab))
writeLog("Words frequencies in the vocabulary:")
words = []
for x in vocab:
w = x[0]
writeLog(" %d * %s" % (x[1], x[0]))
words.append(w)
words = np.asarray(words)
self.outcomes = list(self.outcomes)
if ((self.max_num_words != None) and (self.max_num_words < len(words))):
words = words[range(self.max_num_words)]
writeLog("Vocabulary was truncated to %d most frequent words in training set." % len(words))
disable_activity_labels = self.parameters["disable_activity_labels"]
disable_durations = self.parameters["disable_durations"]
disable_event_attributes = self.parameters["disable_event_attributes"]
disable_case_attributes = self.parameters["disable_case_attributes"]
create_unknown_token = self.parameters["create-unknown-tokens"]
if disable_durations:
durations = []
if disable_event_attributes:
eventAttributeClusters = []
if disable_case_attributes:
caseAttributeClusters = []
if disable_activity_labels and (not predict_next_activity):
words = []
others = [UNKNOWN_TOKEN]
if (not disable_event_attributes) and create_unknown_token:
others.append(EVENT_ATTRIBUTE_TOKEN_PREFIX + "None")
if (not disable_case_attributes) and create_unknown_token:
others.append(CASE_ATTRIBUTE_TOKEN_PREFIX + "None")
useSingleValueForDuration = self.parameters["use_single_value_for_duration"]
if (not disable_durations) and self.parameters["use_single_value_for_duration"]:
durations = [DURATION_VALUE_PLACEHOLDER_TOKEN]
self.index_to_word = np.concatenate([self.outcomes, durations, eventAttributeClusters, caseAttributeClusters, others, words, rawCaseAttributes, rawEventAttributes])
self.word_to_index = dict([(w, i) for i, w in enumerate(self.index_to_word)])
writeLog("Total number of unique tokens: %d" % len(self.index_to_word))
if (predict_next_activity):
writeLog(" # outcomes: %d (predicting next activities)" % len(words))
else:
writeLog(" # outcomes: %d" % len(self.outcomes))
writeLog(" # durations: %d" % len(durations))
writeLog(" # eventAttributeClusters: %d" % len(eventAttributeClusters))
writeLog(" # rawEventAttributes: %d" % len(rawEventAttributes))
writeLog(" # caseAttributeClusters: %d" % len(caseAttributeClusters))
writeLog(" # rawCaseAttributes: %d" % len(rawCaseAttributes))
if (predict_next_activity):
writeLog(" # activity labels: %d + 1 finish token" % len(words))
else:
writeLog(" # activity labels: %d" % len(words))
writeLog(" # others: %d" % 1)
self.initializeTokenTypeArrays()
self.positions_train = None if self.final_trace_only else []
self.seq_length = self.prepareTokenizedSentences(self.traces_train, tokenized_sentences_train, self.positions_train, False) + 1
writeLog("Maximum sequence length in the training set is %d tokens." % (self.seq_length))
tokenized_sentences_test = [nltk.WhitespaceTokenizer().tokenize(trace.sentence) for trace in self.traces_test]
sl = self.prepareTokenizedSentences(self.traces_test, tokenized_sentences_test, None, self.seq_length, True)
writeLog("Maximum sequence length in the test set is %d tokens." % (sl))
def initializeFilteredRawAttributes(self, traces):
includeRawEventAttributes = not self.parameters["disable_raw_event_attributes"] and (self.event_clustering != None)
includeRawCaseAttributes = not self.parameters["disable_raw_case_attributes"] and (self.case_clustering != None)
if ((not includeRawCaseAttributes) and (not includeRawEventAttributes)):
return
for trace in traces:
if (includeRawCaseAttributes):
self.case_clustering.filterCaseAttributes(self.eventlog, trace)
if (includeRawEventAttributes):
self.event_clustering.filterEventAttributes(self.eventlog, trace)
def initializeTokenTypeArrays(self):
self.unknown_token_id = self.word_to_index[UNKNOWN_TOKEN]
self.is_outcome = [w.startswith(OUTCOME_SELECTION_TOKEN_PREFIX) for w in self.index_to_word]
self.is_duration = [w.startswith(DURATION_TOKEN_PREFIX) for w in self.index_to_word]
self.is_event_attribute_cluster = [w.startswith(EVENT_ATTRIBUTE_TOKEN_PREFIX) for w in self.index_to_word]
self.is_case_attribute_cluster = [w.startswith(CASE_ATTRIBUTE_TOKEN_PREFIX) for w in self.index_to_word]
self.is_event_attribute = [(w.startswith(ATTRIBUTE_COLUMN_PREFIX + "E")) for w in self.index_to_word]
self.is_case_attribute = [(w.startswith(ATTRIBUTE_COLUMN_PREFIX + "C")) for w in self.index_to_word]
self.is_word_token = [ not (self.is_outcome[i] or self.is_duration[i] or self.is_event_attribute_cluster[i] or self.is_case_attribute_cluster[i] or self.is_event_attribute[i] or self.is_case_attribute[i] or i == self.unknown_token_id) for i, w in enumerate(self.index_to_word)]
self.num_outcomes = len(self.outcomes)
def prepareTokenizedSentences(self, traces, tokenized_sentences, positions, initializeFilteredRawAttributes, truncate_to_length = None):
tokenized_sentences = np.asarray(tokenized_sentences)
result = self.handleUnknowns(tokenized_sentences, truncate_to_length)
for i, trace in enumerate(traces):
trace.tokenized_sentences = tokenized_sentences[i]
if (positions != None):
for t, ts in enumerate(tokenized_sentences):
l = len(ts)
if l > 1:
for pos in range(l - 1):
positions.append([t, pos])
if initializeFilteredRawAttributes:
self.initializeFilteredRawAttributes(traces)
return result
def handleUnknowns(self, tokenized_sentences, truncate_to_length = None):
# Replace all words not in our vocabulary with the unknown token
seq_length = 0
for i, sent in enumerate(tokenized_sentences):
ts = [w if w in self.word_to_index else UNKNOWN_TOKEN for w in sent]
if (self.truncate_unknowns):
origts = ts
ts = []
wasUnknown = False
for w in origts:
isUnknown = w == UNKNOWN_TOKEN
if ((not isUnknown) or (not wasUnknown)):
ts.append(w)
wasUnknown = isUnknown
l = len(ts)
if (truncate_to_length != None):
if ts[-1].startswith(OUTCOME_SELECTION_TOKEN_PREFIX):
ts = ts[:-1] # Cut the outcome away from the test set if present
--l
if (l > truncate_to_length):
ts = ts[:(truncate_to_length)]
tokenized_sentences[i] = ts
if (l > seq_length):
seq_length = l
return seq_length
def prepareLayers(self):
writeLog("Preparing " + str(self.num_layers) + " layers for algorithm: " + self.algorithm)
# First, we build the network, starting with an input layer
# Recurrent layers expect input of shape
# (batch size, SEQ_LENGTH, num_features)
mask_var = T.matrix('mask')
l_in = lasagne.layers.InputLayer(shape=(None, None, len(self.word_to_index)))
l_mask = lasagne.layers.InputLayer((None, None), mask_var)
self.l_layers = [l_in]
# We now build the LSTM layer which takes l_in as the input layer
# We clip the gradients at GRAD_CLIP to prevent the problem of exploding gradients.
if (self.algorithm == "gru"):
layerCreatorFunc = lambda parentLayer, isFirstLayer, isLastLayer: lasagne.layers.GRULayer(
parentLayer, self.hidden_dim_size, grad_clipping=self.grad_clipping,
mask_input = l_mask if isFirstLayer else None,
only_return_final=isLastLayer)
else:
# All gates have initializers for the input-to-gate and hidden state-to-gate
# weight matrices, the cell-to-gate weight vector, the bias vector, and the nonlinearity.
# The convention is that gates use the standard sigmoid nonlinearity,
# which is the default for the Gate class.
# gate_parameters = lasagne.layers.recurrent.Gate(
# W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
# b=lasagne.init.Constant(0.))
# cell_parameters = lasagne.layers.recurrent.Gate(
# W_in=lasagne.init.Orthogonal(), W_hid=lasagne.init.Orthogonal(),
# # Setting W_cell to None denotes that no cell connection will be used.
# W_cell=None, b=lasagne.init.Constant(0.),
# # By convention, the cell nonlinearity is tanh in an LSTM.
# nonlinearity=lasagne.nonlinearities.tanh)
layerCreatorFunc = lambda parentLayer, isFirstLayer, isLastLayer: lasagne.layers.LSTMLayer(
parentLayer, self.hidden_dim_size, grad_clipping=self.grad_clipping,
mask_input = l_mask if isFirstLayer else None,
nonlinearity=lasagne.nonlinearities.tanh,
# Here, we supply the gate parameters for each gate
# ingate=gate_parameters, forgetgate=gate_parameters,
# cell=cell_parameters, outgate=gate_parameters,
# We'll learn the initialization and use gradient clipping
only_return_final=isLastLayer)
for layerId in range(self.num_layers):
self.l_layers.append(layerCreatorFunc(self.l_layers[layerId], layerId == 0, layerId == self.num_layers - 1))
# The output of l_forward_2 of shape (batch_size, N_HIDDEN) is then passed through the softmax nonlinearity to
# create probability distribution of the prediction
# The output of this stage is (batch_size, vocab_size)
self.l_out = lasagne.layers.DenseLayer(self.l_layers[len(self.l_layers) - 1], num_units=len(self.word_to_index), W = lasagne.init.Normal(), nonlinearity=lasagne.nonlinearities.softmax)
self.l_layers.append(self.l_out)
# Theano tensor for the targets
target_values = T.ivector('target_output')
#! target_var = T.matrix('target_output')
# lasagne.layers.get_output produces a variable for the output of the net
network_output = lasagne.layers.get_output(self.l_out)
# https://github.com/Lasagne/Lasagne/blob/master/examples/recurrent.py
# The network output will have shape (n_batch, 1); let's flatten to get a
# 1-dimensional vector of predicted values
# predicted_values = network_output.flatten()
# flat_target_values = target_values.flatten()
# Our cost will be mean-squared error
# cost = T.mean((predicted_values - flat_target_values)**2)
# cost = T.mean((network_output - target_values)**2)
# The loss function is calculated as the mean of the (categorical) cross-entropy between the prediction and target.
#! cost = T.nnet.categorical_crossentropy(network_output,target_var).mean()
cost = T.nnet.categorical_crossentropy(network_output,target_values).mean()
# Retrieve all parameters from the network
all_params = lasagne.layers.get_all_params(self.l_out,trainable=True)
# Compute AdaGrad updates for training
writeLog("Computing updates...")
writeLog("Using optimizer: " + self.optimizer)
if (self.optimizer == "sgd"):
updates = lasagne.updates.sgd(cost, all_params, self.learning_rate)
elif (self.optimizer == "adagrad"):
updates = lasagne.updates.adagrad(cost, all_params, self.learning_rate)
elif (self.optimizer == "adadelta"):
updates = lasagne.updates.adagrad(cost, all_params, self.learning_rate, 0.95)
elif (self.optimizer == "momentum"):
updates = lasagne.updates.momentum(cost, all_params, self.learning_rate, 0.9)
elif (self.optimizer == "nesterov_momentum"):
updates = lasagne.updates.nesterov_momentum(cost, all_params, self.learning_rate, 0.9)
elif (self.optimizer == "rmsprop"):
updates = lasagne.updates.rmsprop(cost, all_params, self.learning_rate, 0.9)
else:
updates = lasagne.updates.adam(cost, all_params, self.learning_rate, beta1=0.9, beta2=0.999)
# Theano functions for training and computing cost
writeLog("Compiling train function...")
self.rnn_train = theano.function([l_in.input_var, target_values, l_mask.input_var], cost, updates=updates, allow_input_downcast=True)
#! self.train = theano.function([l_in.input_var, target_var, l_mask.input_var], cost, updates=updates, allow_input_downcast=True)
writeLog("Compiling train cost computing function...")
self.compute_cost = theano.function([l_in.input_var, target_values, l_mask.input_var], cost, allow_input_downcast=True)
#! self.compute_cost = theano.function([l_in.input_var, target_var, l_mask.input_var], cost, allow_input_downcast=True)
# In order to generate text from the network, we need the probability distribution of the next character given
# the state of the network and the input (a seed).
# In order to produce the probability distribution of the prediction, we compile a function called probs.
writeLog("Compiling propabilities computing function...")
self.propabilities = theano.function([l_in.input_var, l_mask.input_var],network_output,allow_input_downcast=True)
def predict_outcome(self, tracesToCalculateFor, tracePercentage):
batches, masks = self.gen_prediction_data(tracesToCalculateFor, tracePercentage)
correct = 0
predictions = []
probs_out = []
predict_next_activity = self.parameters["predict_next_activity"]
for i in range(len(batches)):
x = batches[i]
mask = masks[i]
probs = self.propabilities(x, mask)
for prob in enumerate(probs):
if predict_next_activity:
outcomeProbs = np.asarray([p if self.is_word_token[k] or self.is_outcome[k] else 0 for k, p in enumerate(prob[1])])
else:
outcomeProbs = np.asarray([p if self.is_outcome[k] else 0 for k, p in enumerate(prob[1])])
sumProb = 0
maxProb = 0
for t, p in enumerate(outcomeProbs):
sumProb += p
if (p > maxProb):
maxProb = p
maxIndex = t
probs_out.append(maxProb / sumProb)
word = self.index_to_word[maxIndex]
if predict_next_activity and word.startswith(OUTCOME_SELECTION_TOKEN_PREFIX):
word = word[len(OUTCOME_SELECTION_TOKEN_PREFIX):]
predictions.append(word)
return predictions, probs_out
def createModel(self):
self.initializeTraces()
self.layer_preparation_start_time = time()
self.train_initialization_time_used = self.layer_preparation_start_time - self.train_start_time
self.prepareLayers()
self.start_time = time()
self.layer_initialization_time_used = self.start_time - self.layer_preparation_start_time
self.previous_time = self.start_time
self.cumul_train_time = 0
self.cumul_exact_train_time = 0
self.cumul_test_time = 0
self.auc = 0
self.sr_trains = []
self.sr_tests = []
self.sr_tests_75p = []
self.sr_tests_50p = []
self.sr_tests_25p = []
self.time_used = []
self.avg_costs = []
self.time_used_for_test = []
self.all_cms = []
self.best_sr_test = 0
self.best_sr_train = 0
self.best_num_success = 0
self.best_num_fail = 0
self.best_params = None
def calculateSuccessRate(tracesToCalculateFor, tracePercentage, testId):
max_num_traces_to_test = self.parameters["max_num_traces_to_test"]
if (len(tracesToCalculateFor) > max_num_traces_to_test):
writeLog("Number of traces to test %d exceeds the configured maximum %d. Taking random sample of the configured maximum size." % (len(tracesToCalculateFor), max_num_traces_to_test))
tracesToCalculateFor = np.asarray(tracesToCalculateFor)[np.random.choice(len(tracesToCalculateFor), max_num_traces_to_test, replace=False)]
predictions, probs = self.predict_outcome(tracesToCalculateFor, tracePercentage)
numSuccess = 0
cm = [0, 0, 0, 0]
exps = []
trueWord = self.index_to_word[1] if self.num_outcomes == 2 else ""
predict_next_activity = self.parameters["predict_next_activity"]
for i in range(len(tracesToCalculateFor)):
expected = ("" if predict_next_activity else OUTCOME_SELECTION_TOKEN_PREFIX) + tracesToCalculateFor[i].outcome
actual = predictions[i]
numSuccess += 1 if expected == actual else 0
if (self.num_outcomes == 2):
bExpected = expected == trueWord
bActual = actual == trueWord
exps.append(1 if bExpected else 0)
cm[0] += 1 if bExpected and bActual else 0
cm[1] += 1 if not bExpected and not bActual else 0
cm[2] += 1 if not bExpected and bActual else 0
cm[3] += 1 if bExpected and not bActual else 0
self.cms[testId] = cm
self.cms_str += ":%i_%i_%i_%i" % (cm[0], cm[1], cm[2], cm[3])
if ((testId == 1) and (not predict_next_activity) and (self.num_outcomes == 2)):
self.auc = metrics.roc_auc_score(exps, probs)
return numSuccess, len(tracesToCalculateFor), numSuccess / len(tracesToCalculateFor)
def report(num_examples_seen, it, avg_cost, num_report_iterations, test_partial_traces = False):
t2 = time()
tutrain = (t2 - self.previous_time)
self.cumul_train_time = self.cumul_train_time + tutrain
self.time_used.append(tutrain)
self.cms = {}
self.cms_str = ""
writeLog("Testing 100% test samples")
numSuccess, numFail, sr_test = calculateSuccessRate(self.traces_test, 1.0, 1)
self.cumul_exact_train_time = self.cumul_exact_train_time + (time() - self.previous_time)
self.sr_tests.append(sr_test)
writeLog("Testing 100% training samples")
numSuccess, numFail, sr_train = calculateSuccessRate(self.traces_train, 1.0, 0)
self.sr_trains.append(sr_train)
sr_tests_75p = sr_tests_50p = sr_tests_25p = None
if (test_partial_traces):
writeLog("Testing 75% test samples")
numSuccess, numFail, sr_tests_75p = calculateSuccessRate(self.traces_test, 0.75, 2)
self.sr_tests_75p.append(sr_tests_75p)
writeLog("Testing 50% test samples")
numSuccess, numFail, sr_tests_50p = calculateSuccessRate(self.traces_test, 0.5, 3)
self.sr_tests_50p.append(sr_tests_50p)
writeLog("Testing 25% test samples")
numSuccess, numFail, sr_tests_25p = calculateSuccessRate(self.traces_test, 0.25, 4)
self.sr_tests_25p.append(sr_tests_25p)
self.avg_costs.append(avg_cost)
data_size = len(self.traces_train)
self.epoch = it*self.batch_size/data_size
self.generate_trace(5)
t3 = time()
tutest = (t3 - t2)
self.cumul_test_time = self.cumul_test_time + tutest
self.previous_time = t3
self.time_used_for_test.append(tutest)
self.all_cms.append(self.cms)
writeLog("Iteration: %i (%i) Total time used: ~%f seconds (train: %f, test: %f)" % (num_report_iterations, num_examples_seen, (time() - self.start_time) * 1., self.cumul_train_time, self.cumul_test_time))
writeLog("Epoch {} average loss = {}".format(self.epoch, avg_cost))
if (test_partial_traces):
writeLog("Success rates: test: %f test 75%%: %f test 50%%: %f test 25%%: %f train: %f" % (sr_test, sr_tests_75p, sr_tests_50p, sr_tests_25p, sr_train))
else:
writeLog("Success rates: test: %f train: %f" % (sr_test, sr_train))
if (sr_test > self.best_sr_test):
writeLog("Best accuracy thus far achieved. Storing parameters...")
self.best_sr_test = sr_test
self.best_sr_train = sr_train
self.best_num_success = numSuccess
self.best_num_fail = numFail
self.best_params = lasagne.layers.get_all_param_values(self.l_out,trainable=True)
self.best_iteration = num_report_iterations
writeResultRow([datetime.now().replace(microsecond=0).isoformat(),
"ok", self.parameters["test_name"], self.case_name,
self.parameters["dataset_name"] if (("dataset_name" in self.parameters) and (self.parameters["dataset_name"] != None)) else self.eventlog.filename,
self.parameters["cross-validation-run"] if (("cross-validation-run" in self.parameters) and (self.parameters["cross-validation-run"] != None)) else "",
len(self.traces_train), len(self.traces_test),
len(self.traces_train) + len(self.traces_test),
self.algorithm, self.num_layers, self.hidden_dim_size,
self.optimizer, self.learning_rate, self.seq_length, len(self.word_to_index), self.batch_size,
self.grad_clipping, self.num_iterations_between_reports,
self.best_iteration,
num_report_iterations,
num_examples_seen, self.epoch,
self.train_initialization_time_used, self.layer_initialization_time_used,
self.cumul_exact_train_time, tutrain, self.cumul_train_time, tutest,
self.cumul_test_time, sr_train, sr_test, sr_tests_75p, sr_tests_50p,
sr_tests_25p,
avg_cost, self.auc, self.cms[1][0], self.cms[1][1], self.cms[1][2], self.cms[1][3],
str(self.cms_str),
self.predict_only_outcome, self.final_trace_only, self.trace_length_modifier,
self.num_iterations_between_reports * self.num_callbacks == 100000 * 50,
self.max_num_words, self.truncate_unknowns,
not self.parameters["disable_activity_labels"],
not self.parameters["disable_durations"],
not self.parameters["disable_event_attributes"],
not self.parameters["disable_case_attributes"],
not self.parameters["disable_raw_event_attributes"],
not self.parameters["disable_raw_case_attributes"],
self.parameters["predict_next_activity"],
self.parameters["use_single_event_clustering"],
self.parameters["duration_split_method"],
self.parameters["case_clustering_method"],
self.parameters["event_clustering_method"],
self.parameters["case_clustering_include_activity_occurrences"],
self.parameters["case_clustering_include_case_attributes"],
self.parameters["include_activity_occurrences_as_raw_case_attributes"],
self.parameters["use_single_value_for_duration"],
self.parameters["max_num_case_clusters"],
self.parameters["max_num_event_clusters"],
self.parameters["ignore_values_threshold_for_case_attributes"],
self.parameters["ignore_values_threshold_for_event_attributes"]
])
# self.draw_chart()
# writeLog("Calculating initial probabilities.")
self.cms = {}
self.cms_str = ""
# sr_train = calculateSuccessRate(self.traces_train, 1.0, 0)
# self.sr_trains.append(sr_train)
# sr_test = calculateSuccessRate(self.traces_test, 1.0, 1)
# self.sr_tests.append(sr_test)
# self.time_used.append(time.time() - self.start_time)
# self.avg_costs.append(0)
# writeLog("Initial success rates: test: %f train: %f" % (sr_test, sr_train))
self.trainModel(report)
self.cms = {}
self.cms_str = ""
numSuccess, numFail, sr_train = calculateSuccessRate(self.traces_train, 1.0, 0)
self.sr_trains.append(sr_train)
numSuccess, numFail, sr_test = calculateSuccessRate(self.traces_test, 1.0, 1)
self.sr_tests.append(sr_test)
self.avg_costs.append(0)
writeLog("Final success rates: test: %f train: %f iteration: %i" % (self.best_sr_test, self.best_sr_train, self.best_iteration))
self.time_used.append(self.cumul_train_time)
return self.best_num_success, self.best_num_fail, self.cumul_exact_train_time
# self.draw_chart()
def generate_trace(self, min_length=2):
# We start the sentence with the start token
x = np.zeros((1, self.seq_length, len(self.word_to_index)))
mask = np.zeros((1, self.seq_length))
new_sentence = []
i = 0
wordsMask = np.array([(b or self.is_outcome[i]) for i, b in enumerate(self.is_word_token)])
# Repeat until we get an end token
while ((len(new_sentence) == 0) or (not self.is_outcome[new_sentence[-1]])):
probs = self.propabilities(x, mask)[0]
probs = np.asarray([p if wordsMask[i] else 0 for i, p in enumerate(probs)])
if (probs.sum() == 0.0):
writeLog("Sum of probabilities is zero. Unable to generate trace.")
break
probs /= probs.sum()
# samples = np.random.multinomial(1, probs)
# index = np.argmax(samples)
index = np.random.choice(range(len(probs)), p=probs)
new_sentence.append(index)
x[0, i, index] = 1
mask[0, i] = 1
i += 1
# Seomtimes we get stuck if the sentence becomes too long, e.g. "........" :(
# And: We don't want sentences with UNKNOWN_TOKEN's
if len(new_sentence) >= self.seq_length or index == self.word_to_index[UNKNOWN_TOKEN]:
writeLog("Generated exceedingly long example trace. Skipping.")
return None
if len(new_sentence) < min_length:
return None
res = [self.index_to_word[x] for x in new_sentence]
writeLog("Generated example trace of length %d: %s" % (len(res), str(res)))
return res
def save(self):
saved = {
"name": self.eventlog.filename,
"nn_params": {
"algorithm": self.algorithm,
"num_layers": self.num_layers,
"optimizer": self.optimizer,
"learning_rate": self.learning_rate,
"batch_size": self.batch_size,
"num_callbacks": self.num_callbacks,
"case_name": self.case_name,
"hidden_dim_size": self.hidden_dim_size,
"num_iterations_between_reports": self.num_iterations_between_reports,
"grad_clipping": self.grad_clipping,
"predict_only_outcome": self.predict_only_outcome,
"final_trace_only": self.final_trace_only,
"max_num_words": self.max_num_words,
"trace_length_modifier": self.trace_length_modifier,
"truncate_unknowns": self.truncate_unknowns,
"trained_params": self.best_params
},
"event_clustering": self.event_clustering,
"case_clustering": self.case_clustering,
"eventlog": {
"activities": self.eventlog.data["activities"],
"attributes": self.eventlog.data["attributes"],
"filename": self.eventlog.filename,
"filepath": self.eventlog.filepath
},
"word_to_index": self.word_to_index,
"index_to_word": self.index_to_word,
"seq_length": self.seq_length,
"outcomes": self.outcomes,
"parameters": self.parameters
}
return saved
def load(self, saved):
self.algorithm = saved["nn_params"]["algorithm"]
self.num_layers = saved["nn_params"]["num_layers"]
self.optimizer = saved["nn_params"]["optimizer"]
self.learning_rate = saved["nn_params"]["learning_rate"]
self.batch_size = saved["nn_params"]["batch_size"]
self.num_callbacks = saved["nn_params"]["num_callbacks"]
self.case_name = saved["nn_params"]["case_name"]
self.hidden_dim_size = saved["nn_params"]["hidden_dim_size"]
self.num_iterations_between_reports = saved["nn_params"]["num_iterations_between_reports"]
self.grad_clipping = saved["nn_params"]["grad_clipping"]
self.predict_only_outcome = saved["nn_params"]["predict_only_outcome"]
self.final_trace_only = saved["nn_params"]["final_trace_only"]
self.max_num_words = saved["nn_params"]["max_num_words"]
self.trace_length_modifier = saved["nn_params"]["trace_length_modifier"]
self.truncate_unknowns = saved["nn_params"]["truncate_unknowns"]
self.word_to_index = saved["word_to_index"]
self.index_to_word = saved["index_to_word"]
self.seq_length = saved["seq_length"]
self.outcomes = saved["outcomes"]
self.parameters.update(saved["parameters"])
try:
self.event_clustering = saved["event_clustering"]
self.event_clustering.addUndefinedParameters(self.parameters)
self.case_clustering = saved["case_clustering"]
self.case_clustering.addUndefinedParameters(self.parameters)
self.eventlogActivities = saved["eventlog"]["activities"]
self.eventlogAttributes = saved["eventlog"]["attributes"]
self.eventlogFilename = saved["eventlog"]["filename"]
self.eventlogFilepath = saved["eventlog"]["filepath"]
self.prepareLayers()
lasagne.layers.set_all_param_values(self.l_out, saved["nn_params"]["trained_params"], trainable=True)
except:
writeLog("Exception: " + sys.exc_info()[0])
def test(self, eventlog, tracePercentage = 1.0, maxNumTraces = None):
self.eventlog = eventlog
self.eventlog.model = self
eventlog.initializeForTesting(self)
self.prepareTestData(eventlog)
self.traces_train = []
self.traces_test = eventlog.convertTracesFromInputData(eventlog.testData, self.parameters, self.trace_length_modifier)
if (maxNumTraces != None) and (maxNumTraces < len(self.traces_test)):
writeLog("Filtering %d traces out of %d test traces" % (maxNumTraces, len(self.traces_test)))
self.traces_test = list(np.random.choice(np.asarray(self.traces_test), maxNumTraces, replace=False))
self.initializeTokenTypeArrays()
tokenized_sentences_test = [nltk.WhitespaceTokenizer().tokenize(trace.sentence) for trace in self.traces_test]
sl = self.prepareTokenizedSentences(self.traces_test, tokenized_sentences_test, None, self.seq_length, True)
writeLog("Maximum sequence length in the test set is %d tokens." % (sl))
predictions, probs = self.predict_outcome(self.traces_test, tracePercentage)
numSuccess = 0
cases = eventlog.data["cases"]
if len(cases) > 0:
predict_next_activity = self.parameters["predict_next_activity"]
if (predict_next_activity or ("s" in cases[0])):
prefix = ("" if predict_next_activity else OUTCOME_SELECTION_TOKEN_PREFIX)
for i, pred in enumerate(predictions):
if pred == prefix + self.traces_test[i].outcome:
numSuccess += 1
return self.traces_test, predictions, probs, numSuccess
| 49,689 | 54.272525 | 310 | py |
articles | articles-master/Exploiting Event Log Event Attributes in RNN Based Prediction/src/modelcluster.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 25 11:59:43 2017
Test framework sources used to perform the tests required by paper:
"Exploiting Event Log Event Attributes in RNN Based Prediction"
by Markku Hinkka, Teemu Lehto and Keijo Heljanko
"""
import sys
import lasagne
from lasagne.layers import *
import numpy as np
import theano as theano
import theano.tensor as T
from time import time
import operator
import pickle
#import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn import metrics
from time import time
from pathlib import Path
from cluster import Clustering
import pandas as pd
import nltk
import itertools
import json
from my_utils import TraceData, writeLog, writeResultRow, writeTestResultRow, get_filename, getOutputPath, OUTCOME_SELECTION_TOKEN_PREFIX, DURATION_TOKEN_PREFIX, EVENT_ATTRIBUTE_TOKEN_PREFIX, WORD_PART_SEPARATOR, CASE_ATTRIBUTE_TOKEN_PREFIX
from bucket import Bucket
from model import Model
UNKNOWN_TOKEN = "UNKNOWN"
class ModelCluster:
def __init__(self, rng):
lasagne.random.set_rng(rng)
writeLog("Creating new model cluster object")
def initialize(self, parameters,
case_clustering, event_clustering, rng):
self.caseClusterModel = None
self.caseClusterVectorizer = None
self.algorithm = parameters["algorithm"]
self.num_layers = parameters["num_layers"]
self.optimizer = parameters["optimizer"]
self.learning_rate = parameters["learning_rate"]
self.batch_size = parameters["batch_size"]
self.num_callbacks = parameters["num_callbacks"]
self.case_name = parameters["case_name"]
self.hidden_dim_size = parameters["hidden_dim_size"]
self.num_iterations_between_reports = parameters["num_iterations_between_reports"]
self.grad_clipping = parameters["grad_clipping"]
self.predict_only_outcome = parameters["predict_only_outcome"]
self.final_trace_only = parameters["final_trace_only"]
self.max_num_words = parameters["max_num_words"]
self.trace_length_modifier = parameters["trace_length_modifier"]
self.truncate_unknowns = parameters["truncate_unknowns"]
self.num_models = parameters["num_models"]
self.parameters = parameters
self.case_clustering = case_clustering
self.event_clustering = event_clustering
self.rng = rng
self.models = [self.createModel() for i in range(self.num_models)]
def createModel(self):
result = Model(self.parameters)
result.initialize(
case_clustering = Clustering(copyFrom=self.case_clustering),
event_clustering = Clustering(copyFrom=self.event_clustering),
rng = self.rng)
return result
def train(self, eventlog):
self.eventlogs = self.splitLog(eventlog)
writeLog("Trace distribution by models:")
trainDatasetSize = 0
for i, eventlog in enumerate(self.eventlogs):
writeLog("Model #%d: Train: %d traces, Test: %d traces" % (i + 1, len(eventlog.trainingData), len(eventlog.testData)))
trainDatasetSize += len(eventlog.trainingData) + len(eventlog.testData)
tutrain = 0
numSuccess = 0
numFail = 0
titu = 0
litu = 0
numEpochs = []
ivs = []
bestIterations = []
for i, eventlog in enumerate(self.eventlogs):
model = self.models[i]
writeLog("Training model %d of %d" % (i + 1, len(self.eventlogs)))
ns, ne, tu = model.train(eventlog)
numEpochs.append(model.epoch)
ivs.append(len(model.word_to_index))
bestIterations.append(model.best_iteration)
tutrain += tu
numSuccess += ns
numFail += ne
titu += model.train_initialization_time_used
litu += model.layer_initialization_time_used
srtrain = numSuccess / numFail
writeLog("Total time used in training: %d (success rate = %f)" % (tutrain, srtrain))
return {
"success_rate": srtrain,
"train_dataset_size": trainDatasetSize,
"train_time_used": tutrain,
"train_init_time_used": titu,
"layer_init_time_used": litu,
"num_epochs": np.mean(np.asarray(numEpochs)),
"test_iterations": self.parameters["num_callbacks"],
"input_vector_size": np.mean(ivs),
"best_iteration": np.mean(bestIterations)
}
def splitLog(self, eventlog, onlyTest = False):
self.eventlog = eventlog
true_k = len(self.models)
if (true_k == 1):
return [self.eventlog]
t0 = time()
result = [self.eventlog.createEmptyCopy(self.parameters) for model in self.models]
if (not onlyTest):
cases = np.array([c["occ"] for c in self.eventlog.trainingData])
df = pd.DataFrame(cases, columns=[a["name"] for a in self.eventlog.data["activities"]])
self.caseClusterVectorizer = DictVectorizer(sparse = False)
X = self.caseClusterVectorizer.fit_transform(df.to_dict(orient = 'records'))
writeLog("Event log splitting done in %fs" % (time() - t0))
writeLog("n_samples: %d, n_features: %d" % X.shape)
# #############################################################################
# Do the actual clustering
# if opts.minibatch:
self.caseClusterModel = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=False)
# else:
# self.caseClusterModel = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
# verbose=opts.verbose)
# writeLog("Clustering sparse data with %s" % self.caseClusterModel)
t0 = time()
x = self.caseClusterModel.fit(X)
writeLog("done in %0.3fs" % (time() - t0))
for i, d in enumerate(x.labels_):
result[d].addTrace(self.eventlog.trainingData[i], True)
cases = np.array([c["occ"] for c in self.eventlog.testData])
df = pd.DataFrame(cases, columns=[a["name"] for a in self.eventlog.data["activities"]])
XX = self.caseClusterVectorizer.transform(df.to_dict(orient = 'records'))
x = self.caseClusterModel.predict(XX)
for i, d in enumerate(x):
result[d].addTrace(self.eventlog.testData[i], False)
for eventlog in result:
eventlog.initializeDerivedData(True)
return result
def save(self, file_handle, parameters):
hasExactOutputFilename = (("output_filename" in parameters) and (parameters["output_filename"] != None))
directory = parameters["model_output_directory"] if ("model_output_directory" in parameters and parameters["model_output_directory"] != None) else getOutputPath()
filename = parameters["output_filename"] if hasExactOutputFilename else (((parameters["dataset_name"] + "-") if (("dataset_name" in parameters) and (parameters["dataset_name"] != None)) else "") + parameters["test_name"])
filename = (directory + filename) if hasExactOutputFilename else ("%s%s_%s.model" % (directory, file_handle, filename))
savedModels = [model.save() for model in self.models]
saved = {
"name": self.eventlog.filename,
"parameters": self.parameters,
"saved_models": savedModels,
"case_cluster_model": self.caseClusterModel,
"case_cluster_vectorizer": self.caseClusterVectorizer,
"case_clustering": self.case_clustering,
"event_clustering": self.event_clustering,
"nn_params": {
"algorithm": self.algorithm,
"num_layers": self.num_layers,
"optimizer": self.optimizer,
"learning_rate": self.learning_rate,
"batch_size": self.batch_size,
"num_callbacks": self.num_callbacks,
"case_name": self.case_name,
"hidden_dim_size": self.hidden_dim_size,
"num_iterations_between_reports": self.num_iterations_between_reports,
"grad_clipping": self.grad_clipping,
"predict_only_outcome": self.predict_only_outcome,
"final_trace_only": self.final_trace_only,
"max_num_words": self.max_num_words,
"trace_length_modifier": self.trace_length_modifier,
"truncate_unknowns": self.truncate_unknowns,
"num_models": self.num_models
}
}
with open(filename, 'wb') as f:
pickle.dump(saved, f) # https://groups.google.com/d/msg/lasagne-users/w8safJOJYvI/SvdiuIHIDQAJ
return filename
def load(self, filename, parameters):
path = Path(filename)
if (not path.is_file()):
filename = getOutputPath() + filename
with open(filename, 'rb') as f:
saved = pickle.load(f) # https://groups.google.com/d/msg/lasagne-users/w8safJOJYvI/SvdiuIHIDQAJ
self.parameters = dict(parameters)
self.parameters.update(saved["parameters"])
self.caseClusterModel = saved["case_cluster_model"]
self.caseClusterVectorizer = saved["case_cluster_vectorizer"]
self.case_clustering = saved["case_clustering"]
self.event_clustering = saved["event_clustering"]
self.algorithm = saved["nn_params"]["algorithm"]
self.num_layers = saved["nn_params"]["num_layers"]
self.optimizer = saved["nn_params"]["optimizer"]
self.learning_rate = saved["nn_params"]["learning_rate"]
self.batch_size = saved["nn_params"]["batch_size"]
self.num_callbacks = saved["nn_params"]["num_callbacks"]
self.case_name = saved["nn_params"]["case_name"]
self.hidden_dim_size = saved["nn_params"]["hidden_dim_size"]
self.num_iterations_between_reports = saved["nn_params"]["num_iterations_between_reports"]
self.grad_clipping = saved["nn_params"]["grad_clipping"]
self.predict_only_outcome = saved["nn_params"]["predict_only_outcome"]
self.final_trace_only = saved["nn_params"]["final_trace_only"]
self.max_num_words = saved["nn_params"]["max_num_words"]
self.trace_length_modifier = saved["nn_params"]["trace_length_modifier"]
self.truncate_unknowns = saved["nn_params"]["truncate_unknowns"]
self.num_models = saved["nn_params"]["num_models"]
self.models = []
for i in range(self.num_models):
writeLog("Loading model %d of %d" % (i + 1, self.num_models))
model = Model(self.parameters)
self.models.append(model)
model.load(saved["saved_models"][i])
def test(self, eventlog, tracePercentage = 1.0, trainResult = None, maxNumTraces = None):
self.eventlogs = self.splitLog(eventlog, True)
writeLog("Trace distribution by models:")
for i, eventlog in enumerate(self.eventlogs):
writeLog("Model #%d: Train: %d cases, Test: %d cases" % (i + 1, len(eventlog.trainingData), len(eventlog.testData)))
traces = []
predictions = []
probs = []
numSuccess = 0
t0 = time()
for i, model in enumerate(self.models):
writeLog("Testing model %d of %d" % (i + 1, len(self.eventlogs)))
t, pred, prob, ns = model.test(self.eventlogs[i], tracePercentage, maxNumTraces)
traces += t
predictions += pred
probs += prob
numSuccess += ns
tutest = (time() - t0)
sr_test = numSuccess / len(predictions)
writeLog("Success rate for test data: %d/%d (=%f%%)" % (numSuccess, len(predictions), 100 * sr_test))
train_success_rate = ""
train_time_used = ""
train_init_time_used = ""
train_layer_init_time_used = ""
num_epochs = ""
test_iterations = ""
train_dataset_size = 0
if trainResult != None:
train_success_rate = trainResult["success_rate"]
train_time_used = trainResult["train_time_used"]
train_init_time_used = trainResult["train_init_time_used"]
train_layer_init_time_used = trainResult["layer_init_time_used"]
train_dataset_size = trainResult["train_dataset_size"]
num_epochs = trainResult["num_epochs"]
test_iterations = trainResult["test_iterations"]
train_input_vector_size = trainResult["input_vector_size"]
train_best_iteration = trainResult["best_iteration"]
writeTestResultRow([datetime.now().replace(microsecond=0).isoformat(),
"ok-test", self.parameters["test_name"], self.case_name,
self.parameters["dataset_name"] if (("dataset_name" in self.parameters) and (self.parameters["dataset_name"] != None)) else self.eventlog.filename,
self.parameters["cross-validation-run"] if (("cross-validation-run" in self.parameters) and (self.parameters["cross-validation-run"] != None)) else "",
train_dataset_size, len(traces), len(traces),
self.algorithm, self.num_layers, self.hidden_dim_size,
self.optimizer, self.learning_rate, "", train_input_vector_size, self.batch_size,
self.grad_clipping, self.num_iterations_between_reports,
train_best_iteration,
test_iterations, "", num_epochs,
train_init_time_used, train_layer_init_time_used,
train_time_used, train_time_used, train_time_used, tutest, tutest, train_success_rate, sr_test, "", "",
"",
"", "", "", "", "", "",
"",
self.predict_only_outcome, self.final_trace_only, self.trace_length_modifier,
self.num_iterations_between_reports * self.num_callbacks == 100000 * 50,
self.max_num_words, self.truncate_unknowns,
not self.parameters["disable_activity_labels"],
not self.parameters["disable_durations"],
not self.parameters["disable_event_attributes"],
not self.parameters["disable_case_attributes"],
not self.parameters["disable_raw_event_attributes"],
not self.parameters["disable_raw_case_attributes"],
self.parameters["predict_next_activity"],
self.parameters["use_single_event_clustering"],
self.parameters["duration_split_method"],
self.parameters["case_clustering_method"],
self.parameters["event_clustering_method"],
self.parameters["case_clustering_include_activity_occurrences"],
self.parameters["case_clustering_include_case_attributes"],
self.parameters["include_activity_occurrences_as_raw_case_attributes"],
self.parameters["use_single_value_for_duration"],
self.parameters["max_num_case_clusters"],
self.parameters["max_num_event_clusters"],
self.parameters["ignore_values_threshold_for_case_attributes"],
self.parameters["ignore_values_threshold_for_event_attributes"]
])
writeLog("Collecting results...")
result = {}
for i, trace in enumerate(traces):
pred = predictions[i]
result[trace.traceId] = {
"outcome": pred[len(OUTCOME_SELECTION_TOKEN_PREFIX):] if pred.startswith(OUTCOME_SELECTION_TOKEN_PREFIX) else pred,
"p": probs[i],
"expected": trace.outcome if trace.outcome != None else ""
}
return result
| 15,958 | 46.497024 | 240 | py |
GraB | GraB-main/setup.py | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="orderedsampler",
version="0.0.1",
author="Yucheng Lu",
author_email="[email protected]",
description="pytorch-based OrderedSampler that supports example ordering",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/EugeneLYC/orderedsampler",
project_urls={
"Bug Tracker": "https://github.com/EugeneLYC/orderedsampler",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
) | 838 | 31.269231 | 78 | py |
GraB | GraB-main/neurips22/examples/nlp/BertGlue/train_bert_glue.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning a 🤗 Transformers model for sequence classification on GLUE."""
import argparse
import logging
import math
import os
import random
from pathlib import Path
import datasets
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
import transformers
from accelerate import Accelerator
from huggingface_hub import Repository
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
SchedulerType,
default_data_collator,
get_scheduler,
set_seed,
)
from transformers.file_utils import get_full_repo_name
from transformers.utils.versions import require_version
import torch
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
_RANDOM_RESHUFFLING_ = 'random_reshuffling'
_SHUFFLE_ONCE_ = 'shuffle_once'
_STALE_GRAD_SORT_ = 'stale_grad_greedy_sort'
_DM_SORT_ = 'dm'
_FLIPFLOP_SORT_ = 'flipflop'
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task")
parser.add_argument(
"--task_name",
type=str,
default=None,
help="The name of the glue task to train on.",
choices=list(task_to_keys.keys()),
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--max_length",
type=int,
default=128,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument('--use_tensorboard',
default=False,
action='store_true',
help='log the seeds results in a txt file for consistent results')
parser.add_argument('--tensorboard_path',
type=str,
help='the base directory for tensorboard logs')
parser.add_argument('--shuffle_type',
default='RR',
type=str,
help='shuffle type used for the optimization (choose from RR, SO, greedy, ZO, fresh)')
parser.add_argument('--use_random_proj',
default=False,
action='store_true',
help='whether to use projection when doing the greedy sorting (default: True)')
parser.add_argument('--use_random_proj_full',
default=False,
action='store_true',
help='whether to use projection after storing all the full-dimension gradients (default: True)')
parser.add_argument('--use_qr',
default=False,
action='store_true',
help='whether to use qr_decomposition in the sorting part (default: True)')
parser.add_argument('--proj_ratio',
default=0.1,
type=float,
help='decide project how much ratio of the orginal entire model (default: 0.1)')
parser.add_argument('--proj_target',
default=1024,
type=int,
help='the target dimension for random projection')
args = parser.parse_args()
# Sanity checks
if args.task_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a task name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
return args
def main():
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.task_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset("glue", args.task_name.lower())
else:
# Loading the dataset from local csv or json file.
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = (args.train_file if args.train_file is not None else args.valid_file).split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
if args.task_name is not None:
is_regression = args.task_name == "stsb"
if not is_regression:
label_list = raw_datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = raw_datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)
model = AutoModelForSequenceClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
# Preprocessing the datasets
if args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
logger.info(
f"The configuration of the model provided the following label correspondence: {label_name_to_id}. "
"Using it!"
)
label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
else:
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif args.task_name is None:
label_to_id = {v: i for i, v in enumerate(label_list)}
if label_to_id is not None:
model.config.label2id = label_to_id
model.config.id2label = {id: label for label, id in config.label2id.items()}
elif args.task_name is not None and not is_regression:
model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = {id: label for label, id in config.label2id.items()}
padding = "max_length" if args.pad_to_max_length else False
def preprocess_function(examples):
# Tokenize the texts
texts = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True)
if "label" in examples:
if label_to_id is not None:
# Map labels to IDs (not necessary for GLUE tasks)
result["labels"] = [label_to_id[l] for l in examples["label"]]
else:
# In all cases, rename the column to labels because the model will expect that.
result["labels"] = examples["label"]
return result
with accelerator.main_process_first():
processed_datasets = raw_datasets.map(
preprocess_function,
batched=True,
remove_columns=raw_datasets["train"].column_names,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation_matched" if args.task_name == "mnli" else "validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
if args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))
shuffle_flag = True if args.shuffle_type == 'RR' else False
train_dataloader = DataLoader(
train_dataset, shuffle=shuffle_flag, collate_fn=data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
# Note that we are using momentum SGD for this testing.
# This can be achieved by setting betas and eps accordingly as follows.
optimizer = AdamW(params=optimizer_grouped_parameters, lr=args.learning_rate, betas=(0.9, 0), eps=1, correct_bias=False)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
# Get the metric function
if args.task_name is not None:
metric = load_metric("glue", args.task_name)
else:
metric = load_metric("accuracy")
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
if args.use_tensorboard:
tb_path = os.path.join(args.tensorboard_path, 'runs', args.shuffle_type+'_seed'+str(args.seed)+'_task_'+args.task_name)
writer = SummaryWriter(tb_path)
else:
writer = None
grad_dimen = sum(p.numel() for p in model.parameters() if p.requires_grad)
num_batches = len(list(enumerate(train_dataloader)))
args.use_cuda = True
if args.shuffle_type == _STALE_GRAD_SORT_:
from dmsort.algo import StaleGradGreedySort
sorter = StaleGradGreedySort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _FRESH_GRAD_SORT_:
from dmsort.algo import FreshGradGreedySort
sorter = FreshGradGreedySort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _DM_SORT_:
from dmsort.algo import StaleGradDiscrepencyMinimizationSort
sorter = StaleGradDiscrepencyMinimizationSort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _FLIPFLOP_SORT_:
from dmsort.algo import FlipFlopSort
sorter = FlipFlopSort(args,
num_batches,
grad_dimen)
else:
sorter = None
for epoch in range(args.num_train_epochs):
model.train()
train_batches = list(enumerate(train_dataloader))
if sorter is not None:
if args.shuffle_type == _STALE_GRAD_SORT_:
orders = sorter.sort(epoch)
elif args.shuffle_type == _DM_SORT_:
orders = sorter.sort()
elif args.shuffle_type == _FLIPFLOP_SORT_:
orders = sorter.sort(epoch)
else:
raise NotImplementedError
else:
orders = {i:0 for i in range(len(train_batches))}
# for step, batch in enumerate(train_dataloader):
step = -1
for i in orders.keys():
step += 1
_, batch = train_batches[i]
outputs = model(**batch)
loss = outputs.loss
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
if sorter is not None and args.shuffle_type == _STALE_GRAD_SORT_:
sorter.update_stale_grad(optimizer=optimizer,
batch_idx=i,
epoch=epoch)
if sorter is not None and args.shuffle_type == _DM_SORT_:
sorter.step(optimizer=optimizer, batch_idx=i)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if writer is not None:
writer.add_scalar('train/loss', loss.item(), completed_steps)
if completed_steps >= args.max_train_steps:
break
model.eval()
for step, batch in enumerate(eval_dataloader):
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze()
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
)
eval_metric = metric.compute()
if writer is not None:
for k in eval_metric.keys():
writer.add_scalar('val/'+k, eval_metric[k], epoch)
logger.info(f"epoch {epoch}: {eval_metric}")
if args.push_to_hub and epoch < args.num_train_epochs - 1:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
)
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
if args.task_name == "mnli":
# Final evaluation on mismatched validation set
eval_dataset = processed_datasets["validation_mismatched"]
eval_dataloader = DataLoader(
eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
eval_dataloader = accelerator.prepare(eval_dataloader)
model.eval()
for step, batch in enumerate(eval_dataloader):
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
)
eval_metric = metric.compute()
logger.info(f"mnli-mm: {eval_metric}")
if writer is not None:
writer.close()
if __name__ == "__main__":
main() | 26,114 | 42.236755 | 127 | py |
GraB | GraB-main/neurips22/examples/nlp/word_language_model/main.py | # coding: utf-8
import argparse
import math
import os
import torch
import torch.nn as nn
import data
import model
import random
import tqdm
import time
from contextlib import contextmanager
from tensorboardX import SummaryWriter
from constants import _STALE_GRAD_SORT_, \
_RANDOM_RESHUFFLING_, \
_SHUFFLE_ONCE_, \
_DM_SORT_, \
_FLIPFLOP_SORT_
parser = argparse.ArgumentParser(description='PyTorch RNN/LSTM/GRU Language Model')
parser.add_argument('--data', type=str, default='./wikitext-2',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
choices=['RNN_TANH', 'RNN_RELU', 'LSTM', 'GRU', 'Transformer'],
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--emsize', type=int, default=32,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=32,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=2,
help='number of layers')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=50,
help='upper epoch limit')
parser.add_argument('--train_batch_size', type=int, default=40, metavar='N',
help='train batch size')
parser.add_argument('--val_batch_size', type=int, default=10, metavar='N',
help='val batch size')
parser.add_argument('--test_batch_size', type=int, default=1, metavar='N',
help='test batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1,
help='random seed')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--nhead', type=int, default=2,
help='the number of heads in the encoder/decoder of the transformer model')
parser.add_argument('--notes', type=str, default='wiki2')
parser.add_argument('--shuffle_type', type=str)
parser.add_argument('--use_tensorboard',
default=False,
action='store_true',
help='log the seeds results in a txt file for consistent results')
parser.add_argument('--tensorboard_path',
type=str,
help='the base directory for tensorboard logs')
args = parser.parse_args()
setattr(args, 'use_cuda', torch.cuda.is_available())
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
random.seed(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else 'cpu')
def make_directory_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
###############################################################################
# Load data
###############################################################################
train_path = os.path.join(args.data, 'train.txt')
valid_path = os.path.join(args.data, 'valid.txt')
test_path = os.path.join(args.data, 'test.txt')
corpus = data.Corpus(train_path=train_path, valid_path=valid_path, test_path=test_path)
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data
train_data_train = batchify(corpus.train.clone(), args.train_batch_size)
train_data_test = batchify(corpus.train.clone(), args.train_batch_size)
val_data = batchify(corpus.valid, args.val_batch_size)
test_data = batchify(corpus.test, args.test_batch_size)
train_ppl_in_training = []
train_ppl_each_epoch = []
val_ppl_each_epoch = []
test_ppl_each_epoch = []
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
if args.model == 'Transformer':
model = model.TransformerModel(ntokens, args.emsize, args.nhead, args.nhid, args.nlayers, args.dropout).to(device)
else:
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied).to(device)
criterion = nn.NLLLoss()
###############################################################################
# Training code
###############################################################################
def repackage_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
class Timer:
"""
Timer for PyTorch code
Comes in the form of a contextmanager:
Example:
>>> timer = Timer()
... for i in range(10):
... with timer("expensive operation"):
... x = torch.randn(100)
... print(timer.summary())
"""
def __init__(self, verbosity_level=1, skip_first=True, use_cuda=True):
self.verbosity_level = verbosity_level
#self.log_fn = log_fn if log_fn is not None else self._default_log_fn
self.skip_first = skip_first
self.cuda_available = torch.cuda.is_available() and use_cuda
self.reset()
def reset(self):
"""Reset the timer"""
self.totals = {} # Total time per label
self.first_time = {} # First occurrence of a label (start time)
self.last_time = {} # Last occurence of a label (end time)
self.call_counts = {} # Number of times a label occurred
@contextmanager
def __call__(self, label, epoch=-1.0, verbosity=1):
# Don't measure this if the verbosity level is too high
if verbosity > self.verbosity_level:
yield
return
# Measure the time
self._cuda_sync()
start = time.time()
yield
self._cuda_sync()
end = time.time()
# Update first and last occurrence of this label
if label not in self.first_time:
self.first_time[label] = start
self.last_time[label] = end
# Update the totals and call counts
if label not in self.totals and self.skip_first:
self.totals[label] = 0.0
del self.first_time[label]
self.call_counts[label] = 0
elif label not in self.totals and not self.skip_first:
self.totals[label] = end - start
self.call_counts[label] = 1
else:
self.totals[label] += end - start
self.call_counts[label] += 1
#if self.call_counts[label] > 0:
# # We will reduce the probability of logging a timing
# # linearly with the number of time we have seen it.
# # It will always be recorded in the totals, though.
# if np.random.rand() < 1 / self.call_counts[label]:
# self.log_fn(
# "timer", {"epoch": epoch, "value": end - start}, {"event": label}
# )
def summary(self):
"""
Return a summary in string-form of all the timings recorded so far
"""
if len(self.totals) > 0:
with StringIO() as buffer:
total_avg_time = 0
print("--- Timer summary ------------------------", file=buffer)
print(" Event | Count | Average time | Frac.", file=buffer)
for event_label in sorted(self.totals):
total = self.totals[event_label]
count = self.call_counts[event_label]
if count == 0:
continue
avg_duration = total / count
total_runtime = (
self.last_time[event_label] - self.first_time[event_label]
)
runtime_percentage = 100 * total / total_runtime
total_avg_time += avg_duration if "." not in event_label else 0
print(
f"- {event_label:30s} | {count:6d} | {avg_duration:11.5f}s | {runtime_percentage:5.1f}%",
file=buffer,
)
print("-------------------------------------------", file=buffer)
event_label = "total_averaged_time"
print(
f"- {event_label:30s}| {count:6d} | {total_avg_time:11.5f}s |",
file=buffer,
)
print("-------------------------------------------", file=buffer)
return buffer.getvalue()
def _cuda_sync(self):
"""Finish all asynchronous GPU computations to get correct timings"""
if self.cuda_available:
torch.cuda.synchronize()
def _default_log_fn(self, _, values, tags):
label = tags["label"]
epoch = values["epoch"]
duration = values["value"]
class TrainDataset(torch.utils.data.Dataset):
def __init__(self, tensor, device, shuffle=False) -> None:
super().__init__()
self.data = tensor
self.device = device
self.shuffle = shuffle
if self.shuffle:
a = list(range(self.data.shape[0] // args.bptt))
b = list(range(self.data.shape[0] // args.bptt))
random.shuffle(b)
self.mapping = {i:j for i, j in zip(a, b)}
def __getitem__(self, i):
if self.shuffle:
i = self.mapping[i]
if i >= len(self): raise IndexError(f'index {i} out of range')
i = i * args.bptt
seq_len = min(args.bptt, self.data.shape[0] - 1 - i)
data = self.data[i:i + seq_len]
target = self.data[i + 1:i + 1 + seq_len]
return data.to(self.device), target.view(-1).to(self.device)
def __len__(self):
return (self.data.shape[0] // args.bptt)
def evaluate(dataset, counter):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
if args.model != 'Transformer':
hidden = model.init_hidden(dataset.data.shape[-1])
with torch.no_grad():
for idx, (data, targets) in enumerate(dataset):
if args.model == 'Transformer':
output = model(data)
output = output.view(-1, ntokens)
else:
output, hidden = model(data, hidden)
hidden = repackage_hidden(hidden)
total_loss += (len(data) * criterion(output, targets)).item()
counter.update(1)
return (total_loss / len(dataset.data))
def train(epoch, optimizer, dataset, counter, sorter, timer):
# Turn on training mode which enables dropout.
model.train()
if args.model != 'Transformer':
hidden = model.init_hidden(dataset.data.shape[-1])
total_loss = 0
if sorter is not None:
with timer("sorting", epoch=epoch):
if args.shuffle_type == _STALE_GRAD_SORT_:
orders = sorter.sort(epoch)
elif args.shuffle_type == _DM_SORT_:
orders = sorter.sort()
elif args.shuffle_type == _FLIPFLOP_SORT_:
orders = sorter.sort(epoch=epoch)
else:
raise NotImplementedError
else:
orders = {i:0 for i in range(len(dataset))}
if args.shuffle_type == _RANDOM_RESHUFFLING_:
a = list(range(len(dataset)))
random.shuffle(a)
orders = {i:0 for i in a}
for idx in orders.keys():
data, targets = dataset[idx]
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
with timer("forward pass", epoch=epoch):
optimizer.zero_grad()
if args.model == 'Transformer':
output = model(data)
output = output.view(-1, ntokens)
else:
hidden = repackage_hidden(hidden)
output, hidden = model(data, hidden)
loss = criterion(output, targets)
with timer("backward pass", epoch=epoch):
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
if sorter is not None and args.shuffle_type == _STALE_GRAD_SORT_:
with timer("sorting", epoch=epoch):
sorter.update_stale_grad(optimizer=optimizer,
batch_idx=idx,
epoch=epoch)
logging.info(f"Storing the staled gradient used in StaleGradGreedySort method.")
if sorter is not None and args.shuffle_type == _DM_SORT_:
with timer("sorting", epoch=epoch):
sorter.step(optimizer=optimizer, batch_idx=idx)
with timer("backward pass", epoch=epoch):
optimizer.step()
total_loss += loss.item()
if idx % args.log_interval == 0 and idx > 0:
cur_loss = total_loss / args.log_interval
print('| epoch {:3d} | {:5d}/{:5d} batches | loss {:5.2f}'\
.format(epoch, idx, len(dataset), cur_loss))
total_loss = 0
total_time = timer.totals["forward pass"] + timer.totals["backward pass"]
if sorter is not None:
total_time += timer.totals["sorting"]
return total_time
def main():
print(vars(args))
shuffle_flag = True if args.shuffle_type == _SHUFFLE_ONCE_ else False
train_loader_training = TrainDataset(train_data_train, device, shuffle=shuffle_flag)
train_loader_testing = TrainDataset(train_data_test, device)
val_loader = TrainDataset(val_data, device)
test_loader = TrainDataset(test_data, device)
total_steps = (len(train_loader_training) + len(train_loader_testing) + len(val_loader) + len(test_loader)) * args.epochs
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=5, threshold=5)
counter = tqdm.tqdm(range(total_steps), mininterval=10)
num_batches = len(train_loader_training)
grad_dimen = sum(p.numel() for p in model.parameters() if p.requires_grad)
timer = Timer(verbosity_level=1, use_cuda=args.use_cuda)
if args.shuffle_type in [_RANDOM_RESHUFFLING_, _SHUFFLE_ONCE_]:
sorter = None
else:
if args.shuffle_type == _STALE_GRAD_SORT_:
from dmsort.algo import StaleGradGreedySort
sorter = StaleGradGreedySort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _DM_SORT_:
from dmsort.algo import StaleGradDiscrepencyMinimizationSort
sorter = StaleGradDiscrepencyMinimizationSort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _FLIPFLOP_SORT_:
from dmsort.algo import FlipFlopSort
sorter = FlipFlopSort(args,
num_batches,
grad_dimen)
else:
raise NotImplementedError("This sorting method is not supported yet")
if args.use_tensorboard:
tb_path = os.path.join(args.tensorboard_path, 'runs', args.shuffle_type+'_'+str(args.seed))
tb_logger = SummaryWriter(tb_path)
else:
tb_logger = None
for epoch in range(0, args.epochs):
total_time = train(epoch, optimizer, train_loader_training, counter, sorter, timer)
train_loss = evaluate(train_loader_testing, counter)
val_loss = evaluate(val_loader, counter)
# test_loss = evaluate(test_loader, counter)
train_ppl = torch.exp(torch.as_tensor(train_loss))
val_ppl = torch.exp(torch.as_tensor(val_loss))
# test_ppl = torch.exp(torch.as_tensor(test_loss))
# train_ppl_each_epoch.append(torch.exp(torch.as_tensor(train_loss))) # perplexity
# val_ppl_each_epoch.append(torch.exp(torch.as_tensor(val_loss))) # perplexity
# test_ppl_each_epoch.append(torch.exp(torch.as_tensor(test_loss))) # perplexity
if tb_logger is not None:
tb_logger.add_scalar('train/epoch/loss', train_loss, epoch)
tb_logger.add_scalar('train/time/loss', train_loss, total_time)
tb_logger.add_scalar('val/epoch/ppl', val_ppl, epoch)
tb_logger.add_scalar('val/time/ppl', val_ppl, total_time)
tb_logger.add_scalar('val/epoch/loss', val_loss, epoch)
tb_logger.add_scalar('val/time/loss', val_loss, total_time)
lr_scheduler.step(val_ppl)
print(f'| end of epoch {epoch:3d} | train ppl {train_ppl:.2f} | valid ppl {val_ppl:8.2f}')
if tb_logger is not None:
tb_logger.close()
if __name__ == '__main__':
main() | 17,784 | 39.237557 | 125 | py |
GraB | GraB-main/neurips22/examples/nlp/word_language_model/constants.py | _RANDOM_RESHUFFLING_ = 'random_reshuffling'
_SHUFFLE_ONCE_ = 'shuffle_once'
_ZEROTH_ORDER_SORT_ = 'zeroth_order_greedy_sort'
_STALE_GRAD_SORT_ = 'stale_grad_greedy_sort'
_FRESH_GRAD_SORT_ = 'fresh_grad_greedy_sort'
_DM_SORT_ = 'dm'
_FLIPFLOP_SORT_ = 'flipflop' | 260 | 36.285714 | 48 | py |
GraB | GraB-main/neurips22/examples/nlp/word_language_model/generate.py | ###############################################################################
# Language Modeling on Wikitext-2
#
# This file generates new sentences sampled from the language model
#
###############################################################################
import argparse
import torch
import data
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 Language Model')
# Model parameters.
parser.add_argument('--data', type=str, default='./data/wikitext-2',
help='location of the data corpus')
parser.add_argument('--checkpoint', type=str, default='./model.pt',
help='model checkpoint to use')
parser.add_argument('--outf', type=str, default='generated.txt',
help='output file for generated text')
parser.add_argument('--words', type=int, default='1000',
help='number of words to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature - higher will increase diversity')
parser.add_argument('--log-interval', type=int, default=100,
help='reporting interval')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device("cuda" if args.cuda else "cpu")
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3")
with open(args.checkpoint, 'rb') as f:
model = torch.load(f).to(device)
model.eval()
corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
is_transformer_model = hasattr(model, 'model_type') and model.model_type == 'Transformer'
if not is_transformer_model:
hidden = model.init_hidden(1)
input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device)
with open(args.outf, 'w') as outf:
with torch.no_grad(): # no tracking history
for i in range(args.words):
if is_transformer_model:
output = model(input, False)
word_weights = output[-1].squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
word_tensor = torch.Tensor([[word_idx]]).long().to(device)
input = torch.cat([input, word_tensor], 0)
else:
output, hidden = model(input, hidden)
word_weights = output.squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
outf.write(word + ('\n' if i % 20 == 19 else ' '))
if i % args.log_interval == 0:
print('| Generated {}/{} words'.format(i, args.words))
| 3,080 | 38 | 89 | py |
GraB | GraB-main/neurips22/examples/nlp/word_language_model/model.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.ntoken = ntoken
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
nn.init.zeros_(self.decoder.bias)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output)
decoded = decoded.view(-1, self.ntoken)
return F.log_softmax(decoded, dim=1), hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
# Temporarily leave PositionalEncoding module here. Will be moved somewhere else.
class PositionalEncoding(nn.Module):
r"""Inject some information about the relative or absolute position of the tokens in the sequence.
The positional encodings have the same dimension as the embeddings, so that the two can be summed.
Here, we use sine and cosine functions of different frequencies.
.. math:
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerModel(nn.Module):
"""Container module with an encoder, a recurrent or transformer module, and a decoder."""
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerModel, self).__init__()
try:
from torch.nn import TransformerEncoder, TransformerEncoderLayer
except:
raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or lower.')
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
nn.init.zeros_(self.decoder.bias)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, src, has_mask=True):
if has_mask:
device = src.device
if self.src_mask is None or self.src_mask.size(0) != len(src):
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
else:
self.src_mask = None
src = self.encoder(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask)
output = self.decoder(output)
return F.log_softmax(output, dim=-1) | 6,353 | 41.07947 | 110 | py |
GraB | GraB-main/neurips22/examples/nlp/word_language_model/data.py | import os
from io import open
import torch
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, train_path, valid_path, test_path):
self.dictionary = Dictionary()
self.train = self.tokenize(train_path)
self.valid = self.tokenize(valid_path)
self.test = self.tokenize(test_path)
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding="utf8") as f:
for line in f:
words = line.split() + ['<eos>']
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r', encoding="utf8") as f:
idss = []
for line in f:
words = line.split() + ['<eos>']
ids = []
for word in words:
ids.append(self.dictionary.word2idx[word])
idss.append(torch.tensor(ids, dtype=torch.int64))
ids = torch.cat(idss)
return ids
| 1,449 | 28.591837 | 65 | py |
GraB | GraB-main/neurips22/examples/vision/arguments.py | import argparse
def get_args():
parser = argparse.ArgumentParser(description='Experimental code for the QMC paper')
parser.add_argument('--model',
metavar='ARCH',
default='resnet20',
help='model to use (lenet, resnetxx)')
parser.add_argument('--pretrained',
default=True,
action='store_true',
help='whether to use pretrained model (currently only for ImageNet)')
parser.add_argument('--dataset',
default='cifar10',
type=str,
help='dataset used in the experiment (default: cifar10)')
parser.add_argument('--data_path',
type=str,
help='the base directory for dataset')
parser.add_argument('--num_workers',
default=0,
type=int,
metavar='N',
help='number of data loading workers (default: 0)')
parser.add_argument('--epochs',
default=200,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument('--start_epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch_size',
default=64,
type=int,
metavar='N',
help='mini-batch size (default: 128)')
parser.add_argument('--grad_accumulation_step',
default=1,
type=int,
metavar='N',
help='gradient accumulation step in the optimization (default: 1)')
parser.add_argument('--test_batch_size',
default=1024,
type=int,
metavar='N',
help='mini-batch size used for testing (default: 1024)')
parser.add_argument('--lr',
default=0.1,
type=float,
metavar='LR',
help='initial learning rate')
parser.add_argument('--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument('--weight_decay',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 1e-4)')
parser.add_argument('--print_freq',
default=50,
type=int,
metavar='N',
help='print frequency (default: 50)')
parser.add_argument('--start_sort',
default=1,
type=int,
metavar='N',
help='the epoch where the greedy strategy will be first used (100 in CIFAR10 case)')
parser.add_argument('--seed',
default=0,
type=int,
metavar='N',
help='random seed used in the experiment')
parser.add_argument('--use_tensorboard',
default=False,
action='store_true',
help='log the seeds results in a txt file for consistent results')
parser.add_argument('--tensorboard_path',
type=str,
help='the base directory for tensorboard logs')
parser.add_argument('--zo_batch_size',
default=1,
type=int,
metavar='N',
help='zero-th order mini-batch size (default: 16)')
# greedy method related arguments
parser.add_argument('--shuffle_type',
default='random_reshuffling',
type=str,
help='shuffle type used for the optimization (choose from random_reshuffling, shuffle_once, stale_grad_greedy_sort, fresh_grad_greedy_sort)')
parser.add_argument('--task_name',
default='test',
type=str,
help='task name used for tensorboard')
parser.add_argument('--log_metric',
default=False,
action='store_true',
help='whether to log the LHS-QMC metric during training (default: False)')
parser.add_argument('--use_random_proj',
default=False,
action='store_true',
help='whether to use projection when doing the greedy sorting (default: True)')
parser.add_argument('--use_random_proj_full',
default=False,
action='store_true',
help='whether to use projection after storing all the full-dimension gradients (default: True)')
parser.add_argument('--use_qr',
default=False,
action='store_true',
help='whether to use qr_decomposition in the sorting part (default: True)')
parser.add_argument('--proj_ratio',
default=0.1,
type=float,
help='decide project how much ratio of the orginal entire model (default: 0.1)')
parser.add_argument('--proj_target',
default=1024,
type=int,
help='the target dimension for random projection')
args = parser.parse_args()
return args
| 6,078 | 38.219355 | 165 | py |
GraB | GraB-main/neurips22/examples/vision/constants.py | ##############################
# datasets
##############################
_MNIST_ = 'mnist'
_CIFAR10_ = 'cifar10'
_CIFAR100_ = 'cifar100'
_IMAGENET_ = 'imagenet'
##############################
# models
##############################
_LENET_ = 'lenet'
_RESNET_ = 'resnet'
_RESNET20_ = 'resnet20'
_RESNET18_ = 'resnet18'
_LOGISTIC_REGRESSION_ = 'logistic_regression'
_SQUEEZENET_ = 'squeezenet'
##############################
# algorithms
##############################
_RANDOM_RESHUFFLING_ = 'random_reshuffling'
_SHUFFLE_ONCE_ = 'shuffle_once'
_ZEROTH_ORDER_SORT_ = 'zeroth_order_greedy_sort'
_STALE_GRAD_SORT_ = 'stale_grad_greedy_sort'
_FRESH_GRAD_SORT_ = 'fresh_grad_greedy_sort'
_DM_SORT_ = 'dm'
_FLIPFLOP_SORT_ = 'flipflop' | 731 | 23.4 | 48 | py |
GraB | GraB-main/neurips22/examples/vision/utils.py | import os
import torch
import time
import copy
import pickle
import logging
import lmdb
from contextlib import contextmanager
from io import StringIO
from constants import _STALE_GRAD_SORT_, \
_FRESH_GRAD_SORT_, \
_DM_SORT_, \
_MNIST_, \
_FLIPFLOP_SORT_
import torch.utils.data as data
from dmsort.utils import compute_avg_grad_error
def build_task_name(args):
task_name = 'MODEL-' + args.model + \
'_DATA-' + args.dataset + \
'_SFTYPE-' + args.shuffle_type + \
'_SEED-' + str(args.seed) + \
'-LR-' + str(args.lr)
if args.shuffle_type == 'fresh':
task_name = task_name + '_proj-' + str(args.zo_batch_size)
if args.shuffle_type == 'greedy' and args.use_random_proj:
task_name = task_name + '_proj-' + str(args.proj_target)
return task_name
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def train(args,
loader,
model,
criterion,
optimizer,
epoch,
tb_logger,
timer=None,
sorter=None):
losses = AverageMeter()
top1 = AverageMeter()
model.train()
grad_buffer = copy.deepcopy(model)
for p in grad_buffer.parameters():
p.data.zero_()
train_batches = list(enumerate(loader))
num_batches = len(train_batches)
if sorter is not None:
with timer("sorting", epoch=epoch):
if args.shuffle_type == _STALE_GRAD_SORT_:
orders = sorter.sort(epoch)
elif args.shuffle_type == _FRESH_GRAD_SORT_:
orders = sorter.sort(epoch=epoch,
model=model,
train_batches=train_batches,
optimizer=optimizer,
oracle_type='cv')
elif args.shuffle_type == _DM_SORT_:
orders = sorter.sort()
elif args.shuffle_type == _FLIPFLOP_SORT_:
orders = sorter.sort(epoch=epoch)
else:
raise NotImplementedError
else:
orders = {i:0 for i in range(len(train_batches))}
if args.log_metric:
compute_avg_grad_error(args,
model,
train_batches,
optimizer,
epoch,
tb_logger,
oracle_type='cv',
orders=orders)
logging.warning(f"Logging the average gradient error. \
This is only for monitoring and will slow down training, \
please remove --log_metric for full-speed training.")
grad_step = 0
cur_step = 0
for i in orders.keys():
grad_step += 1
cur_step += 1
_, batch = train_batches[i]
with timer("forward pass", epoch=epoch):
loss, prec1, cur_batch_size = model(batch)
with timer("backward pass", epoch=epoch):
optimizer.zero_grad()
loss.backward()
for p1, p2 in zip(grad_buffer.parameters(), model.parameters()):
p1.data.add_(p2.grad.data)
if sorter is not None and args.shuffle_type == _STALE_GRAD_SORT_:
with timer("sorting", epoch=epoch):
sorter.update_stale_grad(optimizer=optimizer,
batch_idx=i,
epoch=epoch)
logging.info(f"Storing the staled gradient used in StaleGradGreedySort method.")
if sorter is not None and args.shuffle_type == _DM_SORT_:
with timer("sorting", epoch=epoch):
sorter.step(optimizer=optimizer, batch_idx=i)
if grad_step % args.grad_accumulation_step == 0 or grad_step == num_batches:
for p1, p2 in zip(grad_buffer.parameters(), model.parameters()):
p1.data.mul_(1/cur_step)
p2.grad.data.zero_().add_(p1.data)
p1.data.zero_()
with timer("backward pass", epoch=epoch):
optimizer.step()
cur_step = 0
loss = loss.float()
# measure accuracy and record loss
losses.update(loss.item(), cur_batch_size)
top1.update(prec1.item(), cur_batch_size)
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(loader), loss=losses, top1=top1))
total_time = timer.totals["forward pass"] + timer.totals["backward pass"]
if sorter is not None:
total_time += timer.totals["sorting"]
return total_time
def validate(args, loader, model, criterion, epoch, tb_logger, loader_name, total_time):
"""
Run evaluation
"""
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
for i, batch in enumerate(loader):
loss, prec1, cur_batch_size = model(batch)
loss = loss.float()
# measure accuracy and record loss
losses.update(loss.item(), cur_batch_size)
top1.update(prec1.item(), cur_batch_size)
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(loader), loss=losses,
top1=top1))
if args.use_tensorboard:
tb_logger.add_scalar(loader_name+'/epoch/accuracy', top1.avg, epoch)
tb_logger.add_scalar(loader_name+'/epoch/loss', losses.avg, epoch)
tb_logger.add_scalar(loader_name+'/time/accuracy', top1.avg, total_time)
tb_logger.add_scalar(loader_name+'/time/loss', losses.avg, total_time)
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return
class Timer:
"""
Timer for PyTorch code
Comes in the form of a contextmanager:
Example:
>>> timer = Timer()
... for i in range(10):
... with timer("expensive operation"):
... x = torch.randn(100)
... print(timer.summary())
"""
def __init__(self, verbosity_level=1, skip_first=True, use_cuda=True):
self.verbosity_level = verbosity_level
#self.log_fn = log_fn if log_fn is not None else self._default_log_fn
self.skip_first = skip_first
self.cuda_available = torch.cuda.is_available() and use_cuda
self.reset()
def reset(self):
"""Reset the timer"""
self.totals = {} # Total time per label
self.first_time = {} # First occurrence of a label (start time)
self.last_time = {} # Last occurence of a label (end time)
self.call_counts = {} # Number of times a label occurred
@contextmanager
def __call__(self, label, epoch=-1.0, verbosity=1):
# Don't measure this if the verbosity level is too high
if verbosity > self.verbosity_level:
yield
return
# Measure the time
self._cuda_sync()
start = time.time()
yield
self._cuda_sync()
end = time.time()
# Update first and last occurrence of this label
if label not in self.first_time:
self.first_time[label] = start
self.last_time[label] = end
# Update the totals and call counts
if label not in self.totals and self.skip_first:
self.totals[label] = 0.0
del self.first_time[label]
self.call_counts[label] = 0
elif label not in self.totals and not self.skip_first:
self.totals[label] = end - start
self.call_counts[label] = 1
else:
self.totals[label] += end - start
self.call_counts[label] += 1
#if self.call_counts[label] > 0:
# # We will reduce the probability of logging a timing
# # linearly with the number of time we have seen it.
# # It will always be recorded in the totals, though.
# if np.random.rand() < 1 / self.call_counts[label]:
# self.log_fn(
# "timer", {"epoch": epoch, "value": end - start}, {"event": label}
# )
def summary(self):
"""
Return a summary in string-form of all the timings recorded so far
"""
if len(self.totals) > 0:
with StringIO() as buffer:
total_avg_time = 0
print("--- Timer summary ------------------------", file=buffer)
print(" Event | Count | Average time | Frac.", file=buffer)
for event_label in sorted(self.totals):
total = self.totals[event_label]
count = self.call_counts[event_label]
if count == 0:
continue
avg_duration = total / count
total_runtime = (
self.last_time[event_label] - self.first_time[event_label]
)
runtime_percentage = 100 * total / total_runtime
total_avg_time += avg_duration if "." not in event_label else 0
print(
f"- {event_label:30s} | {count:6d} | {avg_duration:11.5f}s | {runtime_percentage:5.1f}%",
file=buffer,
)
print("-------------------------------------------", file=buffer)
event_label = "total_averaged_time"
print(
f"- {event_label:30s}| {count:6d} | {total_avg_time:11.5f}s |",
file=buffer,
)
print("-------------------------------------------", file=buffer)
return buffer.getvalue()
def _cuda_sync(self):
"""Finish all asynchronous GPU computations to get correct timings"""
if self.cuda_available:
torch.cuda.synchronize()
def _default_log_fn(self, _, values, tags):
label = tags["label"]
epoch = values["epoch"]
duration = values["value"]
print(f"Timer: {label:30s} @ {epoch:4.1f} - {duration:8.5f}s")
def raw_reader(path):
with open(path, 'rb') as f:
bin_data = f.read()
return bin_data
def dumps_data(obj):
"""
Serialize an object.
Returns:
Implementation-dependent bytes-like object
"""
return pickle.dumps(obj)
## Helper functions for ImageNet
def folder2lmdb(spath, dpath, name="train", write_frequency=5000):
directory = os.path.expanduser(os.path.join(spath, name))
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
dataset = ImageFolder(directory, loader=raw_reader)
data_loader = DataLoader(dataset, num_workers=16, collate_fn=lambda x: x)
lmdb_path = os.path.join(dpath, "%s.lmdb" % name)
isdir = os.path.isdir(lmdb_path)
db = lmdb.open(lmdb_path, subdir=isdir,
map_size=1099511627776 * 2, readonly=False,
meminit=False, map_async=True)
txn = db.begin(write=True)
for idx, data in enumerate(data_loader):
image, label = data[0]
txn.put(u'{}'.format(idx).encode('ascii'), dumps_data((image, label)))
if idx % write_frequency == 0:
print("[%d/%d]" % (idx, len(data_loader)))
txn.commit()
txn = db.begin(write=True)
# finish iterating through dataset
txn.commit()
keys = [u'{}'.format(k).encode('ascii') for k in range(idx + 1)]
with db.begin(write=True) as txn:
txn.put(b'__keys__', dumps_data(keys))
txn.put(b'__len__', dumps_data(len(keys)))
print("Flushing database ...")
db.sync()
db.close()
class ImageFolderLMDB(data.Dataset):
def __init__(self, db_path, transform=None, target_transform=None):
self.db_path = db_path
self.env = lmdb.open(db_path, subdir=os.path.isdir(db_path),
readonly=True, lock=False,
readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
self.length = loads_data(txn.get(b'__len__'))
self.keys = loads_data(txn.get(b'__keys__'))
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
env = self.env
with env.begin(write=False) as txn:
byteflow = txn.get(self.keys[index])
unpacked = loads_data(byteflow)
# load img
imgbuf = unpacked[0]
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
img = Image.open(buf).convert('RGB')
# load label
target = unpacked[1]
if self.transform is not None:
img = self.transform(img)
# im2arr = np.array(img)
# im2arr = torch.from_numpy(im2arr)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
# return im2arr, target
def __len__(self):
return self.length
def __repr__(self):
return self.__class__.__name__ + ' (' + self.db_path + ')' | 13,812 | 34.058376 | 113 | py |
GraB | GraB-main/neurips22/examples/vision/visionmodel.py | import torch
from constants import _MNIST_, _SQUEEZENET_
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class VisionModel:
def __init__(self, args, model, criterion):
self.args = args
self.model = model
self.criterion = criterion
def __call__(self, batch):
(input_var, target_var) = batch
if self.args.use_cuda:
input_var = input_var.cuda()
target_var = target_var.cuda()
if self.args.dataset == _MNIST_:
input_var = input_var.reshape(-1, 784)
output = self.model(input_var)
loss = self.criterion(output, target_var)
prec1 = accuracy(output.data, target_var)[0]
return loss, prec1, input_var.size(0)
def parameters(self):
return self.model.parameters()
def train(self):
self.model.train()
def eval(self):
self.model.eval()
| 1,312 | 26.93617 | 64 | py |
GraB | GraB-main/neurips22/examples/vision/train_logreg_mnist.py | import os
import random
import torch
import logging
import torchvision
import torchvision.datasets as datasets
from tensorboardX import SummaryWriter
import torchvision.transforms as transforms
from visionmodel import VisionModel
from arguments import get_args
from utils import train, validate, Timer, build_task_name
from constants import _RANDOM_RESHUFFLING_, \
_SHUFFLE_ONCE_, \
_STALE_GRAD_SORT_, \
_FRESH_GRAD_SORT_, \
_MNIST_, \
_DM_SORT_, \
_FLIPFLOP_SORT_
logger = logging.getLogger(__name__)
def main():
args = get_args()
if args.seed == 0:
args.seed = random.randint(0, 10000)
random.seed(args.seed)
torch.manual_seed(args.seed)
logger.info(f"Using random seed {args.seed} for random and torch module.")
args.use_cuda = torch.cuda.is_available()
logger.info(f"Using GPU: {args.use_cuda}")
timer = Timer(verbosity_level=1, use_cuda=args.use_cuda)
criterion = torch.nn.CrossEntropyLoss()
if args.use_cuda:
criterion.cuda()
logger.info(f"Using Cross Entropy Loss for classification.")
# The input feature for MNIST is 784, and it has 10 classes
model = torch.nn.DataParallel(torch.nn.Linear(784, 10))
if args.use_cuda:
model.cuda()
model_dimen = sum(p.numel() for p in model.parameters() if p.requires_grad)
model = VisionModel(args, model, criterion)
logger.info(f"Using model: {args.model} with dimension: {model_dimen}.")
optimizer = torch.optim.SGD(params=model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
logger.info(f"Using optimizer SGD with hyperparameters: learning rate={args.lr}; momentum={args.momentum}; weight decay={args.weight_decay}.")
logger.info(f"Using dataset: {args.dataset}")
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=1, last_epoch=args.start_epoch-1)
logger.info(f"Using dataset: {args.dataset}")
loaders = {}
shuffle_flag = True if args.shuffle_type in [_RANDOM_RESHUFFLING_, _FRESH_GRAD_SORT_] else False
data_path = os.path.join(args.data_path, "data")
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
trainset = dataset=datasets.MNIST(root=data_path, train=True, download=True, transform=transform)
testset = datasets.MNIST(root=data_path, train=False, transform=transform)
loaders['train'] = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size,
shuffle=shuffle_flag,
persistent_workers=False,
num_workers=args.num_workers,
pin_memory=False)
loaders['train_val'] = torch.utils.data.DataLoader(trainset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=False)
loaders['val'] = torch.utils.data.DataLoader(testset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=False)
# Epoch-wise data ordering
if args.shuffle_type in [_RANDOM_RESHUFFLING_, _SHUFFLE_ONCE_]:
sorter = None
logger.info(f"Not using any sorting algorithm.")
else:
grad_dimen = int(args.proj_ratio * model_dimen) if args.use_random_proj else model_dimen
num_batches = len(list(enumerate(loaders['train'])))
if args.shuffle_type == _STALE_GRAD_SORT_:
from dmsort.algo import StaleGradGreedySort
sorter = StaleGradGreedySort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _FRESH_GRAD_SORT_:
from dmsort.algo import FreshGradGreedySort
sorter = FreshGradGreedySort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _DM_SORT_:
from dmsort.algo import StaleGradDiscrepencyMinimizationSort
sorter = StaleGradDiscrepencyMinimizationSort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _FLIPFLOP_SORT_:
from dmsort.algo import FlipFlopSort
sorter = FlipFlopSort(args,
num_batches,
grad_dimen)
else:
raise NotImplementedError("This sorting method is not supported yet")
logger.info(f"Creating sorting algorithm: {args.shuffle_type}.")
args.task_name = build_task_name(args)
logger.info(f"Creating task name as: {args.task_name}.")
if args.use_tensorboard:
tb_path = os.path.join(args.tensorboard_path, 'runs', args.task_name)
logger.info(f"Streaming tensorboard logs to path: {tb_path}.")
tb_logger = SummaryWriter(tb_path)
else:
tb_logger = None
logger.info(f"Disable tensorboard logs currently.")
for epoch in range(args.start_epoch, args.epochs):
ttl_time = train(args=args,
loader=loaders['train'],
model=model,
criterion=criterion,
optimizer=optimizer,
epoch=epoch,
tb_logger=tb_logger,
timer=timer,
sorter=sorter)
# evaluate on training set
validate(args=args,
loader=loaders['train_val'],
model=model,
criterion=criterion,
epoch=epoch,
tb_logger=tb_logger,
loader_name='train',
total_time=ttl_time)
# evaluate on validation set
validate(args=args,
loader=loaders['val'],
model=model,
criterion=criterion,
epoch=epoch,
tb_logger=tb_logger,
loader_name='val',
total_time=ttl_time)
tb_logger.close()
logger.info(f"Finish training!")
if __name__ == '__main__':
torch.multiprocessing.set_sharing_strategy('file_system')
main()
| 6,925 | 41.231707 | 146 | py |
GraB | GraB-main/neurips22/examples/vision/train_lenet_cifar.py | import os
import random
import torch
import logging
import torchvision
import torchvision.datasets as datasets
from tensorboardX import SummaryWriter
import torchvision.transforms as transforms
from visionmodel import VisionModel
from arguments import get_args
from utils import train, validate, Timer, build_task_name
from constants import _RANDOM_RESHUFFLING_, \
_SHUFFLE_ONCE_, \
_STALE_GRAD_SORT_, \
_FRESH_GRAD_SORT_, \
_CIFAR10_, \
_CIFAR100_, \
_DM_SORT_, \
_FLIPFLOP_SORT_
logger = logging.getLogger(__name__)
def main():
args = get_args()
if args.seed == 0:
args.seed = random.randint(0, 10000)
random.seed(args.seed)
torch.manual_seed(args.seed)
logger.info(f"Using random seed {args.seed} for random and torch module.")
args.use_cuda = torch.cuda.is_available()
logger.info(f"Using GPU: {args.use_cuda}")
timer = Timer(verbosity_level=1, use_cuda=args.use_cuda)
criterion = torch.nn.CrossEntropyLoss()
if args.use_cuda:
criterion.cuda()
logger.info(f"Using Cross Entropy Loss for classification.")
from models.lenet import LeNet
model = torch.nn.DataParallel(LeNet())
if args.use_cuda:
model.cuda()
model_dimen = sum(p.numel() for p in model.parameters() if p.requires_grad)
model = VisionModel(args, model, criterion)
logger.info(f"Using model: {args.model} with dimension: {model_dimen}.")
optimizer = torch.optim.SGD(params=model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
logger.info(f"Using optimizer SGD with hyperparameters: learning rate={args.lr}; momentum={args.momentum}; weight decay={args.weight_decay}.")
logger.info(f"Using dataset: {args.dataset}")
loaders = {}
shuffle_flag = True if args.shuffle_type in [_RANDOM_RESHUFFLING_, _FRESH_GRAD_SORT_] else False
data_path = os.path.join(args.data_path, "data")
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# The data augmentation would affect the ordering, and thus disabled.
if args.dataset == _CIFAR10_:
trainset = datasets.CIFAR10(root='./data', train=True, transform=transforms.Compose([
# transforms.RandomHorizontalFlip(),
# transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True)
testset = datasets.CIFAR10(root='./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]))
elif args.dataset == _CIFAR100_:
trainset = datasets.CIFAR100(root='./data', train=True, transform=transforms.Compose([
# transforms.RandomHorizontalFlip(),
# transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True)
testset = datasets.CIFAR100(root='./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]))
else:
raise NotImplementedError("This script is for CIFAR datasets. Please input cifar10 or cifar100 in --dataset.")
loaders['train'] = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size,
shuffle=shuffle_flag,
persistent_workers=False,
num_workers=args.num_workers,
pin_memory=False)
# The evaluation should be given on the ENTIRE traing set
loaders['train_val'] = torch.utils.data.DataLoader(trainset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=False)
loaders['val'] = torch.utils.data.DataLoader(testset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=False)
# Epoch-wise data ordering
if args.shuffle_type in [_RANDOM_RESHUFFLING_, _SHUFFLE_ONCE_]:
sorter = None
logger.info(f"Not using any sorting algorithm.")
else:
grad_dimen = int(args.proj_ratio * model_dimen) if args.use_random_proj else model_dimen
num_batches = len(list(enumerate(loaders['train'])))
if args.shuffle_type == _STALE_GRAD_SORT_:
from dmsort.algo import StaleGradGreedySort
sorter = StaleGradGreedySort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _FRESH_GRAD_SORT_:
from dmsort.algo import FreshGradGreedySort
sorter = FreshGradGreedySort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _DM_SORT_:
from dmsort.algo import StaleGradDiscrepencyMinimizationSort
sorter = StaleGradDiscrepencyMinimizationSort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _FLIPFLOP_SORT_:
from dmsort.algo import FlipFlopSort
sorter = FlipFlopSort(args,
num_batches,
grad_dimen)
else:
raise NotImplementedError("This sorting method is not supported yet")
logger.info(f"Creating sorting algorithm: {args.shuffle_type}.")
args.task_name = build_task_name(args)
logger.info(f"Creating task name as: {args.task_name}.")
if args.use_tensorboard:
tb_path = os.path.join(args.tensorboard_path, 'runs', args.task_name)
logger.info(f"Streaming tensorboard logs to path: {tb_path}.")
tb_logger = SummaryWriter(tb_path)
else:
tb_logger = None
logger.info(f"Disable tensorboard logs currently.")
for epoch in range(args.start_epoch, args.epochs):
ttl_time = train(args=args,
loader=loaders['train'],
model=model,
criterion=criterion,
optimizer=optimizer,
epoch=epoch,
tb_logger=tb_logger,
timer=timer,
sorter=sorter)
# evaluate on training set
validate(args=args,
loader=loaders['train_val'],
model=model,
criterion=criterion,
epoch=epoch,
tb_logger=tb_logger,
loader_name='train',
total_time=ttl_time)
# evaluate on validation set
validate(args=args,
loader=loaders['val'],
model=model,
criterion=criterion,
epoch=epoch,
tb_logger=tb_logger,
loader_name='val',
total_time=ttl_time)
tb_logger.close()
logger.info(f"Finish training!")
if __name__ == '__main__':
torch.multiprocessing.set_sharing_strategy('file_system')
main()
| 7,986 | 41.71123 | 146 | py |
GraB | GraB-main/neurips22/examples/vision/models/resnet.py | '''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
__all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']
def _weights_init(m):
classname = m.__class__.__name__
#print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print() | 5,001 | 30.459119 | 120 | py |
GraB | GraB-main/neurips22/examples/vision/models/lenet.py | # -*- coding: utf-8 -*-
from collections import OrderedDict
import torch.nn as nn
__all__ = ["lenet"]
class LeNet(nn.Module):
"""
Input - 3x32x32
C1 - 6@28x28 (5x5 kernel)
tanh
S2 - 6@14x14 (2x2 kernel, stride 2) Subsampling
C3 - 16@10x10 (5x5 kernel)
tanh
S4 - 16@5x5 (2x2 kernel, stride 2) Subsampling
C5 - 120@1x1 (5x5 kernel)
F6 - 84
ReLU
F7 - 10 (Output)
"""
def __init__(self, dataset="cifar10"):
super(LeNet, self).__init__()
# some init.
self.dataset = dataset
self.num_classes = self._decide_num_classes()
# init layers.
self.convnet = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(self._decide_input_dim(), 6, kernel_size=(5, 5)),
),
("relu1", nn.ReLU()),
("s2", nn.MaxPool2d(kernel_size=(2, 2), stride=2)),
("conv3", nn.Conv2d(6, 16, kernel_size=(5, 5))),
("relu3", nn.ReLU()),
("s4", nn.MaxPool2d(kernel_size=(2, 2), stride=2)),
("conv5", nn.Conv2d(16, 120, kernel_size=(5, 5))),
("relu5", nn.ReLU()),
]
)
)
self.fc = nn.Sequential(
OrderedDict(
[
("fc6", nn.Linear(120, 84)),
("relu6", nn.ReLU()),
("fc7", nn.Linear(84, self.num_classes)),
]
)
)
def forward(self, x):
out = self.convnet(x)
out = out.view(x.size(0), -1)
out = self.fc(out)
return out
def _decide_num_classes(self):
if (
self.dataset == "cifar10"
or self.dataset == "svhn"
or self.dataset == "mnist"
):
return 10
elif self.dataset == "cifar100":
return 100
elif self.dataset == "imagenet":
return 1000
def _decide_input_dim(self):
if (
"cifar" in self.dataset
or self.dataset == "svhn"
or self.dataset == "imagenet"
):
return 3
elif "mnist" == self.dataset:
return 1
else:
raise RuntimeError("incorrect input dim.")
def lenet(conf):
"""Constructs a lenet model."""
return LeNet(dataset='cifar10') | 2,480 | 25.677419 | 83 | py |
GraB | GraB-main/neurips22/examples/vision/models/__init__.py | from .resnet import *
from .lenet import * | 42 | 20.5 | 21 | py |
GraB | GraB-main/neurips22/src/dmsort/algo.py | import torch
import copy
import random
from sklearn import random_projection
from .utils import flatten_grad
class Sort:
def sort(self, orders):
raise NotImplementedError
class StaleGradGreedySort(Sort):
"""
Implementation of the algorithm that greedily sort the examples using staled gradients,
the details can be found in: https://openreview.net/pdf?id=7gWSJrP3opB.
"""
def __init__(self,
args,
num_batches,
grad_dimen):
self.args = args
self.num_batches = num_batches
self.grad_dimen = grad_dimen
self.stale_grad_matrix = torch.zeros(num_batches, grad_dimen)
self.avg_grad = torch.zeros(grad_dimen)
if args.use_cuda:
self.stale_grad_matrix = self.stale_grad_matrix.cuda()
self.avg_grad = self.avg_grad.cuda()
self._reset_random_proj_matrix()
def _skip_sort_this_epoch(self, epoch):
return epoch <= self.args.start_sort
def _reset_random_proj_matrix(self):
rs = random.randint(0, 10000)
self.rp = random_projection.SparseRandomProjection(n_components=self.grad_dimen, random_state=rs)
def update_stale_grad(self, optimizer, batch_idx, epoch, add_to_avg=True):
tensor = flatten_grad(optimizer)
if self.args.use_random_proj:
# Currently random projection in sklearn only supports CPU.
if self.args.use_cuda:
tensor = tensor.cpu()
tensor = torch.from_numpy(self.rp.fit_transform(tensor.reshape(1, -1)))
if self.args.use_cuda:
tensor = tensor.cuda()
self.stale_grad_matrix[batch_idx].copy_(tensor[0])
else:
self.stale_grad_matrix[batch_idx].copy_(tensor)
if add_to_avg:
self.avg_grad.add_(tensor / self.num_batches)
# make sure the same random matrix is used in one epoch
if batch_idx == self.num_batches - 1 and self.args.use_random_proj:
self._reset_random_proj_matrix()
def sort(self, epoch, orders=None):
if orders is None:
orders = {i:0 for i in range(self.num_batches)}
if self._skip_sort_this_epoch(epoch):
return orders
if self.args.use_qr:
assert self.args.use_random_proj_full is False
_, X = torch.qr(self.stale_grad_matrix.t())
X = X.t()
if self.args.use_random_proj_full:
# Currently random projection in sklearn only supports CPU.
X = self.stale_grad_matrix.clone()
if self.args.use_cuda:
X = X.cpu()
rp = random_projection.SparseRandomProjection()
X = torch.from_numpy(rp.fit_transform(X))
if self.args.use_cuda:
X = X.cuda()
if not (self.args.use_qr and self.args.use_random_proj_full):
X = self.stale_grad_matrix.clone()
cur_sum = torch.zeros_like(self.avg_grad)
X.add_(-1 * self.avg_grad)
remain_ids = set(range(self.num_batches))
for i in range(1, self.num_batches+1):
cur_id = -1
max_norm = float('inf')
for cand_id in remain_ids:
cand_norm = torch.norm(
X[cand_id] + cur_sum*(i-1)
).item()
if cand_norm < max_norm:
max_norm = cand_norm
cur_id = cand_id
remain_ids.remove(cur_id)
orders[cur_id] = i
cur_sum.add_(X[cur_id])
self.avg_grad.zero_()
orders = {k: v for k, v in sorted(orders.items(), key=lambda item: item[1], reverse=False)}
return orders
class StaleGradDiscrepencyMinimizationSort(Sort):
"""
Implementation of the GraB algorithm, which uses stale gradient to sort the examples
via minimizing the discrepancy bound. The details can be found in:
https://arxiv.org/abs/2205.10733.
"""
def __init__(self,
args,
num_batches,
grad_dimen):
self.args = args
self.num_batches = num_batches
self.grad_dimen = grad_dimen
self.avg_grad = torch.zeros(grad_dimen)
if args.use_cuda:
self.avg_grad = self.avg_grad.cuda()
self.cur_sum = torch.zeros_like(self.avg_grad)
self.next_epoch_avg_grad = torch.zeros_like(self.avg_grad)
self.orders = {i:0 for i in range(self.num_batches)}
self.first = 0
self.last = self.num_batches
def _skip_sort_this_epoch(self, epoch):
return epoch <= self.args.start_sort
def sort(self):
self.orders = {k: v for k, v in sorted(self.orders.items(), key=lambda item: item[1], reverse=False)}
self.avg_grad.copy_(self.next_epoch_avg_grad)
self.next_epoch_avg_grad.zero_()
self.cur_sum.zero_()
self.first = 0
self.last = self.num_batches
return self.orders
def step(self, optimizer, batch_idx):
cur_grad = flatten_grad(optimizer)
self.next_epoch_avg_grad.add_(cur_grad / self.num_batches)
cur_grad.add_(-1 * self.avg_grad)
# The balancing algorithm used here is described in Algorithm 5 in
# https://arxiv.org/abs/2205.10733. We can always replace it with other balancing variants.
if torch.norm(self.cur_sum + cur_grad) <= torch.norm(self.cur_sum - cur_grad):
self.orders[batch_idx] = self.first
self.first += 1
self.cur_sum.add_(cur_grad)
else:
self.orders[batch_idx] = self.last
self.last -= 1
self.cur_sum.add_(-1 * cur_grad)
class FlipFlopSort(Sort):
def __init__(self,
args,
num_batches,
grad_dimen):
self.args = args
self.num_batches = num_batches
self.orders = {i:0 for i in range(self.num_batches)}
def sort(self, epoch):
if epoch % 2 == 0:
idx_list = [i for i in range(self.num_batches)]
idx_list_copy = [i for i in range(self.num_batches)]
random.shuffle(idx_list)
self.orders = {i:j for i, j in zip(idx_list, idx_list_copy)}
self.orders = {k: v for k, v in sorted(self.orders.items(), key=lambda item: item[1], reverse=False)}
else:
self.orders = {k: v for k, v in sorted(self.orders.items(), key=lambda item: item[1], reverse=True)}
return self.orders | 6,539 | 38.39759 | 113 | py |
GraB | GraB-main/neurips22/src/dmsort/utils.py | import torch
from sklearn import random_projection
def random_proj(data):
rp = random_projection.SparseRandomProjection(random_state=1)
return torch.from_numpy(rp.fit_transform(data))
def compute_avg_grad_error(args,
model,
train_batches,
optimizer,
epoch,
tb_logger,
oracle_type='cv',
orders=None):
grads = dict()
for i in range(len(train_batches)):
grads[i] = flatten_params(model).zero_()
full_grad = flatten_params(model).zero_()
if orders is None:
orders = {i:0 for i in range(len(train_batches))}
for j in orders.keys():
i, batch = train_batches[j]
if oracle_type == 'cv':
loss, _, _ = model(batch)
optimizer.zero_grad()
loss.backward()
else:
raise NotImplementedError
grads[i] = flatten_grad(optimizer)
full_grad.add_(grads[i])
cur_grad = flatten_params(model).zero_()
index, cur_var = 0, 0
for j in orders.keys():
i, _ = train_batches[j]
for p1, p2, p3 in zip(cur_grad, grads[i], full_grad):
p1.data.add_(p2.data)
cur_var += torch.norm(p1.data/(index+1) - p3.data/len(train_batches)).item()**2
index += 1
tb_logger.add_scalar('train/metric', cur_var, epoch)
def flatten_grad(optimizer):
t = []
for _, param_group in enumerate(optimizer.param_groups):
for p in param_group['params']:
if p.grad is not None: t.append(p.grad.data.view(-1))
return torch.concat(t)
def flatten_params(model):
t = []
for _, param in enumerate(model.parameters()):
if param is not None: t.append(param.data.view(-1))
return torch.concat(t) | 1,844 | 33.166667 | 91 | py |
GraB | GraB-main/neurips22/src/dmsort/__init__.py | from .algo import *
from .utils import * | 40 | 19.5 | 20 | py |
GraB | GraB-main/examples/train_logistic_regression.py | import random
import torch
import torchvision
from torch.nn import CrossEntropyLoss, Linear
from orderedsampler import OrderedSampler
from tensorboardX import SummaryWriter
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
SEED = random.randint(0, 10000)
EPOCHS=100
random.seed(SEED)
torch.manual_seed(SEED)
use_cuda = torch.cuda.is_available()
# model
model = Linear(784, 10)
if use_cuda:
model = model.cuda()
# optimizer
optimizer = torch.optim.SGD(params=model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
# loss
lossfunc = CrossEntropyLoss()
if use_cuda:
lossfunc = lossfunc.cuda()
# dataset
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.1307,), (0.3081,))])
trainset = torchvision.datasets.MNIST('./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.MNIST('./data', train=False, transform=transform)
# data loader
ordered_sampler = OrderedSampler(trainset,
batch_size=64,
order_level=2,
model=model,
lossfunc=lossfunc,
balance_type='pair_balance')
model, lossfunc = ordered_sampler.model, ordered_sampler.lossfunc
train_loader = torch.utils.data.DataLoader(trainset, batch_sampler=ordered_sampler, num_workers=0, pin_memory=False)
train_val_loader = torch.utils.data.DataLoader(trainset, batch_size=1024, shuffle=False, num_workers=0, pin_memory=False)
val_loader = torch.utils.data.DataLoader(testset, batch_size=1024, shuffle=False, num_workers=0, pin_memory=False)
def train(loader, model, lossfunc, optimizer):
model.train()
for i, batch in enumerate(loader):
x, y = batch
if use_cuda:
x, y = x.cuda(), y.cuda()
x = x.reshape(-1, 784)
optimizer.zero_grad()
loss = lossfunc(model(x), y)
loss.backward()
ordered_sampler.step()
optimizer.step()
def val(loader, model, lossfunc, epoch):
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
with torch.no_grad():
for i, batch in enumerate(loader):
x, y = batch
if use_cuda:
x, y = x.cuda(), y.cuda()
x = x.reshape(-1, 784)
output = model(x)
loss = lossfunc(output, y)
prec1 = accuracy(output.data, y)[0]
cur_batch_size = x.size(0)
losses.update(loss.item(), cur_batch_size)
top1.update(prec1.item(), cur_batch_size)
print('Epoch: [{0}]\t'
'Loss {losses.avg:.4f}\t'
'Prec@1 {top1.avg:.3f}'.format(
epoch, losses=losses, top1=top1))
return top1.avg, losses.avg
tb_writer = SummaryWriter('./runs/release_SEED' + str(SEED))
for epoch in range(EPOCHS):
train(train_loader, model, lossfunc, optimizer)
train_acc, train_loss = val(train_val_loader, model, lossfunc, epoch)
test_acc, test_loss = val(val_loader, model, lossfunc, epoch)
tb_writer.add_scalar('train/epoch/accuracy', train_acc, epoch)
tb_writer.add_scalar('train/epoch/loss', train_loss, epoch)
tb_writer.add_scalar('val/epoch/accuracy', test_acc, epoch)
tb_writer.add_scalar('val/epoch/loss', test_loss, epoch)
tb_writer.close()
| 4,204 | 31.346154 | 121 | py |
GraB | GraB-main/src/orderedsampler/utils.py | from typing import List
class IndicesTracker:
def __init__(self) -> None:
self.curr_indices = []
def _is_empty(self):
return self.curr_indices == []
def reset(self) -> None:
self.curr_indices = []
def get_indices(self) -> List[int]:
indices = self.curr_indices.pop(0)
return indices
def update(self, indices: List[int]) -> None:
self.curr_indices.append(indices)
def sanity_check(self):
return self._is_empty()
def is_last_batch(self):
return self._is_empty()
| 579 | 22.2 | 49 | py |
GraB | GraB-main/src/orderedsampler/__init__.py | from absl import logging
from collections import OrderedDict
from typing import List, Union, Sized, Tuple, Dict
import torch
from torch.nn import Module
from torch.utils.data import IterableDataset
from torch.utils.data.sampler import Sampler
from backpack import extend, backpack
from backpack.extensions import BatchGrad
from backpack.context import CTX
from .utils import IndicesTracker
MEAN_BALANCE = 'mean_balance'
PAIR_BALANCE = 'pair_balance'
class OrderedSampler(Sampler[List[int]]):
r"""Implement a batch sampler that uses GraB-style data ordering.
Technical details can be found in: https://arxiv.org/abs/2205.10733.
Args:
data_source (Dataset): Dataset to sample from.
batch_size (int): Size of mini-batch (default: 1).
order_level (int): Granularity of ordering (default: 1).
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size`` (default: False).
init_order_random (bool): If ``True``, the initial order (first scan of the dataset)
will be random (default: True).
model (nn.Module): Model to train (default: None).
lossfunc: (nn.Module): Loss function used during the training (default: None).
debug (bool): Whether to turn on the debugging mode (default: False).
balance_type (str): the balancing algorithm to use. Currently ``pair_balance`` and
``mean_balance`` are supported. Note that if ``mean_balance`` is used, the stale
gradient mean from previous epoch will be applied. If the training involves large
learning rate or contains few epochs, ``pair_balance`` is recommended (default: pair_balance).
prob_balance (bool): If ``True``, probabilistic balancing will be performed. This is useful when
the data is highly adversrial. for technical details, please refer to:
https://arxiv.org/abs/2006.14009 (default: False).
Example:
>>> sampler = OrderedSampler(dataset, batch_size=16, order_level=2)
>>> dataloader = torch.utils.data.DataLoader(dataset, batch_sampler=sampler)
"""
def __init__(self,
data_source: Sized,
batch_size: int = 1,
order_level: int = 1,
drop_last: bool = False,
init_order_random: bool = True,
model: Union[None, Module] = None,
lossfunc: Union[None, Module] = None,
debug: bool = False,
balance_type: str = PAIR_BALANCE,
prob_balance: bool = False) -> None:
if isinstance(data_source, IterableDataset):
raise ValueError("Currently the OrderedSampler does not support iterable-style dataset "
"since it has no notion of indices, and has no meaning of ordering.")
if not isinstance(batch_size, int) or batch_size <= 0:
raise ValueError("batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size))
if not isinstance(order_level, int) or order_level <= 0 or order_level > batch_size or batch_size % order_level != 0:
raise ValueError("order_level should be a positive integer that divides batch size, "
"but got order_level={}".format(order_level))
if order_level != batch_size and (model is None or lossfunc is None):
raise ValueError("If order_level < batch size, model and loss MUST be passed to OrderedSampler.")
if balance_type == PAIR_BALANCE and (batch_size // order_level) % 2 != 0:
logging.warning("Currently the mod(batch_size // order_level, 2) is not zero, this could incur additional noise "
"in the pair balancing (but still works). To maximize the ordering gain, "
"Please either use mean_balance, or make sure mod(batch_size // order_level, 2) is zero.")
if drop_last:
logging.warning("drop_last is set to be True, note that this could lead to random ordering on the last batch "
"since no gradients are computed on them. It is recommended to NOT to drop last, especially "
"when the size for the last batch is large.")
self.data_source = data_source
self.batch_size = batch_size
self.per_batch_order = order_level == batch_size
self.drop_last = drop_last
self.debug = debug
self.balance_type = balance_type
if self.debug:
print("[DEBUG] use per batch order: {}".format(self.per_batch_order))
if not self.per_batch_order:
self.model = model = extend(model)
self.lossfunc = lossfunc = extend(lossfunc)
# backpack helper for computing per-example gradients.
self.bp = backpack(BatchGrad(), extension_hook=None, debug=debug)
CTX.set_active_exts(self.bp.exts)
CTX.set_debug(self.bp.debug)
CTX.set_extension_hook(self.bp.extension_hook)
CTX.set_retain_graph(self.bp.retain_graph)
else:
logging.warning("Currently the ordering is performed at the batch level. "
"While this is the most efficient setting, the ordering benefits "
"can be compromised since the examples within each batch are fixed. "
"To enable finer-grained ordering, please set order_level < batch_size.")
self.model = model
self.lossfunc = lossfunc
# map: index of example -> rank in the current order.
# this mapping will change at the end of each full scan at __iter__()
if init_order_random:
seed = int(torch.empty((), dtype=torch.int64).random_().item())
generator = torch.Generator()
generator.manual_seed(seed)
new_ranks = torch.randperm(len(data_source), generator=generator).tolist()
self._index_to_rank = OrderedDict()
for i in range(len(new_ranks)):
self._index_to_rank[new_ranks[i]] = i
else:
self._index_to_rank = OrderedDict({i:i for i in range(len(data_source))})
self.indices_tracker = IndicesTracker()
self.use_tracker = True
self._set_up_sorter(order_level=order_level,
balance_type=balance_type,
prob_balance=prob_balance,
per_batch_order=self.per_batch_order)
def _set_up_sorter(self,
order_level: int,
balance_type: str = PAIR_BALANCE,
prob_balance: bool = False,
per_batch_order: bool = False) -> None:
if balance_type == PAIR_BALANCE:
from .sorter.pairbalance import PairBalance
self.sorter = PairBalance(num_examples=len(self.data_source),
order_level=order_level,
prob_balance=prob_balance,
per_batch_order=per_batch_order)
elif balance_type == MEAN_BALANCE:
from .sorter.meanbalance import MeanBalance
self.sorter = MeanBalance(num_examples=len(self.data_source),
order_level=order_level,
prob_balance=prob_balance,
per_batch_order=per_batch_order)
else:
raise NotImplementedError("Unrecognized balancing algorithm: {}.".format(balance_type))
def get_orders(self):
return self._index_to_rank
def step(self, sorter_args: Dict = {}) -> None:
indices = self.indices_tracker.get_indices()
if self.balance_type == PAIR_BALANCE:
sorter_args['is_last_batch'] = self.indices_tracker.is_last_batch()
updated_ranks = self.sorter.step(indices=indices, model=self.model, **sorter_args)
self._update_index_rank(updated_ranks=updated_ranks)
def _update_index_rank(self, updated_ranks: OrderedDict) -> None:
for k in updated_ranks.keys():
self._index_to_rank[k] = updated_ranks[k]
def reset_epoch(self):
if not self.indices_tracker.sanity_check():
raise ValueError("The OrderedSampler encounters an issue of non-empty indices cache. "
"This could happen when the ``.step()`` function of OrderedSampler "
"is missed between ``.backward()`` and ``.zero_grad()`` in your script. "
"Note that if you are using gradient accumulation steps, then "
"``.step()`` must be called right after every ``backward()``. "
"This could also happen when the dataloader wrapping the OrderedSampler "
"is called before the actual training. If this is the case, please turn off the "
"indices tracker by ``.stop_tracker()`` and turn it on right before the training "
"by ``.start_tracker()``.")
self._index_to_rank = OrderedDict(
{k: v for k, v in sorted(self._index_to_rank.items(), key=lambda item: item[1], reverse=False)}
)
self.sorter.reset_epoch()
def stop_tracker(self):
self.use_tracker = False
def start_tracker(self):
self.use_tracker = True
def __iter__(self):
self.reset_epoch()
if self.drop_last:
sampler_iter = iter(self._index_to_rank.keys())
while True:
try:
batch = [next(sampler_iter) for _ in range(self.batch_size)]
if self.use_tracker:
self.indices_tracker.update(batch)
yield batch
except StopIteration:
break
else:
batch = [0] * self.batch_size
idx_in_batch = 0
for idx in self._index_to_rank.keys():
batch[idx_in_batch] = idx
idx_in_batch += 1
if idx_in_batch == self.batch_size:
if self.use_tracker:
self.indices_tracker.update(batch)
yield batch
idx_in_batch = 0
batch = [0] * self.batch_size
if idx_in_batch > 0:
if self.use_tracker:
self.indices_tracker.update(batch[:idx_in_batch])
yield batch[:idx_in_batch]
def __len__(self):
if self.drop_last:
return len(self.data_source) // self.batch_size # type: ignore[arg-type]
else:
return (len(self.data_source) + self.batch_size - 1) // self.batch_size # type: ignore[arg-type]
| 10,988 | 50.834906 | 125 | py |
GraB | GraB-main/src/orderedsampler/sorter/meanbalance.py | import torch
from .sorterbase import Sort
from typing import List, Dict
from torch.nn import Module
class MeanBalance(Sort):
r"""Implement Gradient Balancing using stale mean.
More details can be found in: https://arxiv.org/abs/2205.10733.
Args:
prob_balance (bool): If ``True``, the balancing will be performed
in a probabilistic way. More details can be found in:
https://arxiv.org/abs/2006.14009.
per_batch_order (bool): If ``True``, the ordering will be carried out in a
per batch level.
"""
def __init__(self,
num_examples: int,
order_level: int = 1,
prob_balance: bool = False,
per_batch_order: bool = False) -> None:
super(MeanBalance, self).__init__(prob_balance, per_batch_order)
self.num_examples = num_examples
self.order_level = order_level
self.first_idx = 0
self.last_idx = self.num_examples - 1
self.aggregator = None
self.prev_mean_estimator = None
self.next_mean_estimator = None
if prob_balance:
from .subroutine import probabilistic_balance
self.balance = probabilistic_balance
else:
from .subroutine import deterministic_balance
self.balance = deterministic_balance
if per_batch_order:
from .utils import flatten_batch_grads
self.flatten_grads = flatten_batch_grads
else:
from .utils import flatten_example_grads
self.flatten_grads = flatten_example_grads
def reset_epoch(self):
if self.next_mean_estimator is None:
return
if self.prev_mean_estimator is None:
self.prev_mean_estimator = torch.zeros_like(self.next_mean_estimator)
self.prev_mean_estimator.copy_(self.next_mean_estimator)
self.next_mean_estimator.zero_()
self.aggregator.zero_()
self.first_idx = 0
self.last_idx = self.num_examples - 1
@torch.no_grad()
def step(self,
indices: List[int],
model: Module) -> Dict[int, int]:
if self.per_batch_order:
grads = self.flatten_grads(model=model)
if self.aggregator is None:
self.aggregator = torch.zeros_like(grads)
if self.next_mean_estimator is None:
self.next_mean_estimator = torch.zeros_like(grads)
if self.prev_mean_estimator is not None:
grads.sub_(self.prev_mean_estimator)
sign = self.balance(vec=grads, aggregator=self.aggregator)
self.aggregator.add_(sign * grads)
self.next_mean_estimator.add_(grads / self.num_examples * self.order_level)
if sign > 0:
updated_ranks = {i:self.first_idx for i in indices}
self.first_idx += len(indices)
else:
updated_ranks = {i:self.last_idx for i in indices}
self.last_idx -= len(indices)
else:
updated_ranks = {}
start_idx, end_idx = 0, min(self.order_level, len(indices))
while end_idx <= len(indices):
grads = self.flatten_grads(model=model, start_idx=start_idx, end_idx=end_idx)
if self.aggregator is None:
self.aggregator = torch.zeros_like(grads)
if self.next_mean_estimator is None:
self.next_mean_estimator = torch.zeros_like(grads)
if self.prev_mean_estimator is not None:
grads.sub_(self.prev_mean_estimator)
sign = self.balance(vec=grads, aggregator=self.aggregator)
self.aggregator.add_(sign * grads)
self.next_mean_estimator.add_(grads / self.num_examples * (end_idx - start_idx))
if sign > 0:
for i in indices[start_idx:end_idx]:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.first_idx
self.first_idx += end_idx - start_idx
else:
for i in indices[start_idx:end_idx]:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.last_idx
self.last_idx -= end_idx - start_idx
start_idx = end_idx
if start_idx == len(indices):
break
end_idx = min(end_idx + self.order_level, len(indices))
del grads
return updated_ranks | 4,637 | 40.410714 | 96 | py |
GraB | GraB-main/src/orderedsampler/sorter/sorterbase.py | from typing import Dict, Union
class Sort:
def __init__(self,
prob_balance: bool = False,
per_batch_order: bool = False) -> None:
self.prob_balance = prob_balance
self.per_batch_order = per_batch_order
def reset_epoch(self):
pass
def step(self) -> Union[None, Dict]:
raise NotImplementedError | 379 | 26.142857 | 55 | py |
GraB | GraB-main/src/orderedsampler/sorter/utils.py | import torch
from torch import Tensor
from torch.nn import Module
from torch._utils import _flatten_dense_tensors
from typing import Tuple
from collections import OrderedDict
def flatten_batch_grads(model: Module) -> Tensor:
all_grads = []
for param in model.parameters():
if param.grad is not None:
all_grads.append(param.grad.data)
return _flatten_dense_tensors(tuple(all_grads))
def flatten_example_grads(model: Module,
start_idx: int,
end_idx: int) -> Tensor:
all_grads = []
for param in model.parameters():
if param.grad is not None:
all_grads.append(param.grad_batch.data[start_idx:end_idx].mean(0))
return _flatten_dense_tensors(tuple(all_grads))
| 771 | 28.692308 | 78 | py |
GraB | GraB-main/src/orderedsampler/sorter/pairbalance.py | import torch
from .sorterbase import Sort
from typing import List, Dict
from torch.nn import Module
class PairBalance(Sort):
r"""Implement Pair Balance algorithm.
For a given sequence z_i, i = 1, 2, ..., n, we balance z_{2t} - z_{2t-1}.
This avoids using the stale mean as in MeanBalance, and can be useful
when the learning rate is large.
Args:
prob_balance (bool): If ``True``, the balancing will be performed
in a probabilistic way. More details can be found in:
https://arxiv.org/abs/2006.14009.
per_batch_order (bool): If ``True``, the ordering will be carried out in a
per batch level.
"""
def __init__(self,
num_examples: int,
order_level: int = 1,
prob_balance: bool = False,
per_batch_order: bool = False) -> None:
super(PairBalance, self).__init__(prob_balance, per_batch_order)
self.num_examples = num_examples
self.order_level = order_level
self.first_idx = 0
self.last_idx = self.num_examples - 1
self.aggregator = None
self.prev_grad_indices = []
self.prev_grad_buffer = None
if prob_balance:
from .subroutine import probabilistic_balance
self.balance = probabilistic_balance
else:
from .subroutine import deterministic_balance
self.balance = deterministic_balance
if per_batch_order:
from .utils import flatten_batch_grads
self.flatten_grads = flatten_batch_grads
else:
from .utils import flatten_example_grads
self.flatten_grads = flatten_example_grads
def reset_epoch(self):
if self.aggregator is None:
return
self.aggregator.zero_()
self.first_idx = 0
self.last_idx = self.num_examples - 1
@torch.no_grad()
def step(self,
indices: List[int],
model: Module,
is_last_batch: bool = False) -> Dict[int, int]:
if self.per_batch_order:
updated_ranks = {}
grads = self.flatten_grads(model=model)
if self.aggregator is None:
self.aggregator = torch.zeros_like(grads)
if self.prev_grad_buffer is None:
if is_last_batch:
sign = self.balance(vec=grads, aggregator=self.aggregator)
if sign > 0:
updated_ranks = {i:self.first_idx for i in indices}
self.first_idx += len(indices)
else:
updated_ranks = {i:self.last_idx for i in indices}
self.last_idx -= len(indices)
else:
self.prev_grad_buffer = torch.zeros_like(grads)
self.prev_grad_buffer.add_(grads)
self.prev_grad_indices = indices
else:
self.prev_grad_buffer.sub_(grads)
sign = self.balance(vec=self.prev_grad_buffer, aggregator=self.aggregator)
self.aggregator.add_(sign * self.prev_grad_buffer)
if sign > 0:
for i in self.prev_grad_indices:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.first_idx
for i in indices:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.last_idx
self.first_idx += len(self.prev_grad_indices)
self.last_idx -= len(indices)
else:
for i in indices:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.first_idx
for i in self.prev_grad_indices:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.last_idx
self.first_idx += len(indices)
self.last_idx -= len(self.prev_grad_indices)
self.prev_grad_indices = []
self.prev_grad_buffer = None
else:
updated_ranks = {}
start_idx, end_idx = 0, min(self.order_level, len(indices))
while end_idx <= len(indices):
grads = self.flatten_grads(model=model, start_idx=start_idx, end_idx=end_idx)
if self.aggregator is None:
self.aggregator = torch.zeros_like(grads)
if self.prev_grad_buffer is None:
if end_idx == len(indices) and is_last_batch:
sign = self.balance(vec=grads, aggregator=self.aggregator)
if sign > 0:
for i in indices[start_idx:end_idx]:
updated_ranks[i] = self.first_idx
self.first_idx += end_idx - start_idx
else:
for i in indices[start_idx:end_idx]:
updated_ranks[i] = self.last_idx
self.last_idx -= end_idx - start_idx
else:
self.prev_grad_buffer = torch.zeros_like(grads)
self.prev_grad_buffer.add_(grads)
self.prev_grad_indices = indices[start_idx:end_idx]
else:
self.prev_grad_buffer.sub_(grads)
sign = self.balance(vec=self.prev_grad_buffer, aggregator=self.aggregator)
self.aggregator.add_(sign * self.prev_grad_buffer)
if sign > 0:
for i in self.prev_grad_indices:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.first_idx
for i in indices[start_idx:end_idx]:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.last_idx
self.first_idx += len(self.prev_grad_indices)
self.last_idx -= end_idx - start_idx
else:
for i in indices[start_idx:end_idx]:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.first_idx
for i in self.prev_grad_indices:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.last_idx
self.first_idx += end_idx - start_idx
self.last_idx -= len(self.prev_grad_indices)
self.prev_grad_indices = []
self.prev_grad_buffer = None
start_idx = end_idx
if start_idx == len(indices):
break
end_idx = min(end_idx + self.order_level, len(indices))
del grads
return updated_ranks | 7,142 | 43.924528 | 94 | py |
GraB | GraB-main/src/orderedsampler/sorter/subroutine.py | import random
import torch
from torch import Tensor
def deterministic_balance(vec: Tensor, aggregator: Tensor):
if torch.norm(aggregator + vec) <= torch.norm(aggregator - vec):
return 1
else:
return -1
def probabilistic_balance(vec, aggregator):
p = 0.5 - torch.dot(vec, aggregator) / 60
if random.random() <= p:
return 1
else:
return -1
| 395 | 18.8 | 68 | py |
GraB | GraB-main/src/orderedsampler/sorter/__init__.py | 0 | 0 | 0 | py |
|
pke | pke-master/setup.py | from distutils.core import setup
setup(name='pke',
version='2.0.0',
description='Python Keyphrase Extraction module',
author='pke contributors',
author_email='[email protected]',
license='gnu',
packages=['pke', 'pke.unsupervised', 'pke.supervised',
'pke.supervised.feature_based', 'pke.unsupervised.graph_based',
'pke.unsupervised.statistical'],
url="https://github.com/boudinfl/pke",
install_requires=[
'nltk',
'networkx',
'numpy',
'scipy',
'scikit-learn',
'unidecode',
'future',
'joblib',
'spacy>=3.2.3'
],
package_data={'pke': ['models/*.pickle', 'models/*.gz']}
)
| 769 | 28.615385 | 79 | py |
pke | pke-master/pke/base.py | # -*- coding: utf-8 -*-
"""Base classes for the pke module."""
from collections import defaultdict
from pke.data_structures import Candidate
from pke.readers import RawTextReader, SpacyDocReader, PreprocessedReader
from nltk import RegexpParser
from nltk.stem.snowball import SnowballStemmer
from pke.lang import stopwords, langcodes
from string import punctuation
import os
import logging
import spacy
class LoadFile(object):
"""The LoadFile class that provides base functions."""
def __init__(self):
"""Initializer for LoadFile class."""
self.language = None
"""Language of the input file."""
self.normalization = None
"""Word normalization method."""
self.sentences = []
"""Sentence container (list of Sentence objects)."""
self.candidates = defaultdict(Candidate)
"""Keyphrase candidates container (dict of Candidate objects)."""
self.weights = {}
"""Weight container (can be either word or candidate weights)."""
self._models = os.path.join(os.path.dirname(__file__), 'models')
"""Root path of the models."""
self._df_counts = os.path.join(self._models, "df-semeval2010.tsv.gz")
"""Path to the document frequency counts provided in pke."""
self.stoplist = None
"""List of stopwords."""
def load_document(self, input, language=None, stoplist=None,
normalization='stemming', spacy_model=None):
"""Loads the content of a document/string/stream in a given language.
Args:
input (str): input.
language (str): language of the input, defaults to 'en'.
stoplist (list): custom list of stopwords, defaults to
pke.lang.stopwords[language].
normalization (str): word normalization method, defaults to
'stemming'. Other possible value is 'none'
for using word surface forms instead of stems/lemmas.
spacy_model (spacy.lang): preloaded spacy model when input is a
string.
"""
# Reset object for new document
self.__init__()
# get the language parameter
if language is None:
language = 'en'
# set the language of the document
self.language = language
# word normalization (filling self.sentences[].stems)
self.normalization = normalization
# initialize the stoplist
if stoplist is not None:
self.stoplist = stoplist
else:
try:
self.stoplist = stopwords[self.language]
except KeyError:
logging.warning('No stoplist available in pke for \'{}\' language.'.format(self.language))
# logging.warning('Set `stoplist` to `[]` or a custom stoplist to suppress this warning.')
self.stoplist = []
# check whether input is a spacy doc object instance
if isinstance(input, spacy.tokens.doc.Doc):
parser = SpacyDocReader()
sents = parser.read(spacy_doc=input)
# check whether input is a string
elif isinstance(input, str):
parser = RawTextReader(language=self.language)
sents = parser.read(text=input, spacy_model=spacy_model)
# check whether input is processed text
elif isinstance(input, list) and all(isinstance(item, list) for item in input):
parser = PreprocessedReader()
sents = parser.read(list_of_sentence_tuples=input)
else:
raise TypeError('Cannot process input. It is neither a spacy doc, a string or a list of list of tuple: {}'.format(type(input)))
# populate the sentences
self.sentences = sents
# TODO: this code could go into Reader.normalize ? Hum, not sure
if self.normalization == 'stemming':
# fall back to porter if english language (or unavailable languages) is used
try:
langcode = langcodes.get(self.language)
if langcode == "english":
langcode = 'porter'
stemmer = SnowballStemmer(langcode)
except ValueError:
logging.warning('No stemmer available in pke for \'{}\' language -> falling back to porter stemmer.'.format(self.language))
stemmer = SnowballStemmer("porter")
# populate Sentence.stems
for i, sentence in enumerate(self.sentences):
self.sentences[i].stems = [stemmer.stem(w).lower() for w in sentence.words]
else:
for i, sentence in enumerate(self.sentences):
self.sentences[i].stems = [w.lower() for w in sentence.words]
def is_redundant(self, candidate, prev, minimum_length=1):
"""Test if one candidate is redundant with respect to a list of already
selected candidates. A candidate is considered redundant if it is
included in another candidate that is ranked higher in the list.
Args:
candidate (str): the lexical form of the candidate.
prev (list): the list of already selected candidates (lexical
forms).
minimum_length (int): minimum length (in words) of the candidate
to be considered, defaults to 1.
"""
# get the tokenized lexical form from the candidate
candidate = self.candidates[candidate].lexical_form
# only consider candidate greater than one word
if len(candidate) < minimum_length:
return False
# get the tokenized lexical forms from the selected candidates
prev = [self.candidates[u].lexical_form for u in prev]
# loop through the already selected candidates
for prev_candidate in prev:
for i in range(len(prev_candidate) - len(candidate) + 1):
if candidate == prev_candidate[i:i + len(candidate)]:
return True
return False
def get_n_best(self, n=10, redundancy_removal=False, stemming=False):
"""Returns the n-best candidates given the weights.
Args:
n (int): the number of candidates, defaults to 10.
redundancy_removal (bool): whether redundant keyphrases are
filtered out from the n-best list, defaults to False.
stemming (bool): whether to extract stems or surface forms
(lowercased, first occurring form of candidate), default to
False.
"""
# sort candidates by descending weight
best = sorted(self.weights, key=self.weights.get, reverse=True)
# remove redundant candidates
if redundancy_removal:
# initialize a new container for non redundant candidates
non_redundant_best = []
# loop through the best candidates
for candidate in best:
# test wether candidate is redundant
if self.is_redundant(candidate, non_redundant_best):
continue
# add the candidate otherwise
non_redundant_best.append(candidate)
# break computation if the n-best are found
if len(non_redundant_best) >= n:
break
# copy non redundant candidates in best container
best = non_redundant_best
# get the list of best candidates as (lexical form, weight) tuples
n_best = [(u, self.weights[u]) for u in best[:min(n, len(best))]]
# replace with surface forms if no stemming
if not stemming:
n_best = [(' '.join(self.candidates[u].surface_forms[0]).lower(),
self.weights[u]) for u in best[:min(n, len(best))]]
# return the list of best candidates
return n_best
def add_candidate(self, words, stems, pos, offset, sentence_id):
"""Add a keyphrase candidate to the candidates container.
Args:
words (list): the words (surface form) of the candidate.
stems (list): the stemmed words of the candidate.
pos (list): the Part-Of-Speeches of the words in the candidate.
offset (int): the offset of the first word of the candidate.
sentence_id (int): the sentence id of the candidate.
"""
# build the lexical (canonical) form of the candidate using stems
lexical_form = ' '.join(stems)
# add/update the surface forms
self.candidates[lexical_form].surface_forms.append(words)
# add/update the lexical_form
self.candidates[lexical_form].lexical_form = stems
# add/update the POS patterns
self.candidates[lexical_form].pos_patterns.append(pos)
# add/update the offsets
self.candidates[lexical_form].offsets.append(offset)
# add/update the sentence ids
self.candidates[lexical_form].sentence_ids.append(sentence_id)
def ngram_selection(self, n=3):
"""Select all the n-grams and populate the candidate container.
Args:
n (int): the n-gram length, defaults to 3.
"""
# reset the candidates
self.candidates.clear()
# loop through the sentences
for i, sentence in enumerate(self.sentences):
# limit the maximum n for short sentence
skip = min(n, sentence.length)
# compute the offset shift for the sentence
shift = sum([s.length for s in self.sentences[0:i]])
# generate the ngrams
for j in range(sentence.length):
for k in range(j + 1, min(j + 1 + skip, sentence.length + 1)):
# add the ngram to the candidate container
self.add_candidate(words=sentence.words[j:k],
stems=sentence.stems[j:k],
pos=sentence.pos[j:k],
offset=shift + j,
sentence_id=i)
def longest_pos_sequence_selection(self, valid_pos=None):
self.longest_sequence_selection(
key=lambda s: s.pos, valid_values=valid_pos)
def longest_keyword_sequence_selection(self, keywords):
self.longest_sequence_selection(
key=lambda s: s.stems, valid_values=keywords)
def longest_sequence_selection(self, key, valid_values):
"""Select the longest sequences of given POS tags as candidates.
Args:
key (func) : function that given a sentence return an iterable
valid_values (set): the set of valid values, defaults to None.
"""
# reset the candidates
self.candidates.clear()
# loop through the sentences
for i, sentence in enumerate(self.sentences):
# compute the offset shift for the sentence
shift = sum([s.length for s in self.sentences[0:i]])
# container for the sequence (defined as list of offsets)
seq = []
# loop through the tokens
for j, value in enumerate(key(self.sentences[i])):
# add candidate offset in sequence and continue if not last word
if value in valid_values:
seq.append(j)
if j < (sentence.length - 1):
continue
# add sequence as candidate if non empty
if seq:
# add the ngram to the candidate container
self.add_candidate(words=sentence.words[seq[0]:seq[-1] + 1],
stems=sentence.stems[seq[0]:seq[-1] + 1],
pos=sentence.pos[seq[0]:seq[-1] + 1],
offset=shift + seq[0],
sentence_id=i)
# flush sequence container
seq = []
def grammar_selection(self, grammar=None):
"""Select candidates using nltk RegexpParser with a grammar defining
noun phrases (NP).
Args:
grammar (str): grammar defining POS patterns of NPs.
"""
# reset the candidates
self.candidates.clear()
# initialize default grammar if none provided
if grammar is None:
grammar = r"""
NBAR:
{<NOUN|PROPN|ADJ>*<NOUN|PROPN>}
NP:
{<NBAR>}
{<NBAR><ADP><NBAR>}
"""
# initialize chunker
chunker = RegexpParser(grammar)
# loop through the sentences
for i, sentence in enumerate(self.sentences):
# compute the offset shift for the sentence
shift = sum([s.length for s in self.sentences[0:i]])
# convert sentence as list of (offset, pos) tuples
tuples = [(str(j), sentence.pos[j]) for j in range(sentence.length)]
# parse sentence
tree = chunker.parse(tuples)
# find candidates
for subtree in tree.subtrees():
if subtree.label() == 'NP':
leaves = subtree.leaves()
# get the first and last offset of the current candidate
first = int(leaves[0][0])
last = int(leaves[-1][0])
# add the NP to the candidate container
self.add_candidate(words=sentence.words[first:last + 1],
stems=sentence.stems[first:last + 1],
pos=sentence.pos[first:last + 1],
offset=shift + first,
sentence_id=i)
@staticmethod
def _is_alphanum(word, valid_punctuation_marks='-'):
"""Check if a word is valid, i.e. it contains only alpha-numeric
characters and valid punctuation marks.
Args:
word (string): a word.
valid_punctuation_marks (str): punctuation marks that are valid
for a candidate, defaults to '-'.
"""
for punct in valid_punctuation_marks.split():
word = word.replace(punct, '')
return word.isalnum()
def candidate_filtering(self,
minimum_length=3,
minimum_word_size=2,
valid_punctuation_marks='-',
maximum_word_number=5,
only_alphanum=True,
pos_blacklist=None):
"""Filter the candidates containing strings from the stoplist. Only
keep the candidates containing alpha-numeric characters (if the
non_latin_filter is set to True) and those length exceeds a given
number of characters.
Args:
minimum_length (int): minimum number of characters for a
candidate, defaults to 3.
minimum_word_size (int): minimum number of characters for a
token to be considered as a valid word, defaults to 2.
valid_punctuation_marks (str): punctuation marks that are valid
for a candidate, defaults to '-'.
maximum_word_number (int): maximum length in words of the
candidate, defaults to 5.
only_alphanum (bool): filter candidates containing non (latin)
alpha-numeric characters, defaults to True.
pos_blacklist (list): list of unwanted Part-Of-Speeches in
candidates, defaults to [].
"""
if pos_blacklist is None:
pos_blacklist = []
# loop through the candidates
for k in list(self.candidates):
# get the candidate
v = self.candidates[k]
# get the words from the first occurring surface form
words = [u.lower() for u in v.surface_forms[0]]
# discard if words are in the stoplist
# TODO: shouldn't it be the stems ?
if set(words).intersection(self.stoplist):
del self.candidates[k]
# discard if tags are in the pos_blacklist
elif set(v.pos_patterns[0]).intersection(pos_blacklist):
del self.candidates[k]
# discard if containing tokens composed of only punctuation
elif any([set(u).issubset(set(punctuation)) for u in words]):
del self.candidates[k]
# discard candidates composed of 1-2 characters
elif len(''.join(words)) < minimum_length:
del self.candidates[k]
# discard candidates containing small words (1-character)
elif min([len(u) for u in words]) < minimum_word_size:
del self.candidates[k]
# discard candidates composed of more than 5 words
elif len(v.lexical_form) > maximum_word_number:
del self.candidates[k]
# discard if not containing only alpha-numeric characters
if only_alphanum and k in self.candidates:
if not all([self._is_alphanum(w, valid_punctuation_marks)
for w in words]):
del self.candidates[k]
| 17,433 | 37.485651 | 139 | py |
pke | pke-master/pke/readers.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Readers for the pke module."""
import logging
import spacy
from pke.data_structures import Sentence
# https://spacy.io/usage/linguistic-features#native-tokenizer-additions
from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER
from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS
from spacy.util import compile_infix_regex
# Modify tokenizer infix patterns
infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
# ✅ Commented out regex that splits on hyphens between letters:
# r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
]
)
infix_re = compile_infix_regex(infixes)
class Reader(object):
"""Reader default class."""
def read(self, path):
raise NotImplementedError
class RawTextReader(Reader):
"""Reader for raw text."""
def __init__(self, language=None):
"""Constructor for RawTextReader.
Args:
language (str): language of text to process.
"""
self.language = language
if language is None:
self.language = 'en'
if len(self.language) != 2:
raise ValueError('`language` is \'{}\', but should be an iso2 language code (\'en\' instead of \'english\')'.format(self.language))
def read(self, text, spacy_model=None):
"""Read the input file and use spacy to pre-process.
Spacy model selection: By default this function will load the spacy
model that is closest to the `language` parameter ('fr' language will
load the spacy model linked to 'fr' or any 'fr_core_web_*' available
model). In order to select the model that will be used please provide a
preloaded model via the `spacy_model` parameter, or link the model you
wish to use to the corresponding language code
`python3 -m spacy link spacy_model lang_code`.
Args:
text (str): raw text to pre-process.
spacy_model (model): an already loaded spacy model.
"""
nlp = spacy_model
if nlp is None:
# list installed models
installed_models = [m for m in spacy.util.get_installed_models() if m[:2] == self.language]
# select first model for the language
if len(installed_models):
nlp = spacy.load(installed_models[0], disable=['ner', 'textcat', 'parser'])
# stop execution is no model is available
else:
excp_msg = 'No downloaded spacy model for \'{}\' language.'.format(self.language)
excp_msg += '\nA list of downloadable spacy models is available at https://spacy.io/models.'
excp_msg += '\nAlternatively, preprocess your document as a list of sentence tuple (word, pos), such as:'
excp_msg += "\n\t[[('The', 'DET'), ('brown', 'ADJ'), ('fox', 'NOUN'), ('.', 'PUNCT')]]"
raise Exception(excp_msg)
# add the sentence splitter
nlp.add_pipe('sentencizer')
# Fix for non splitting words with hyphens with spacy taken from
# https://spacy.io/usage/linguistic-features#native-tokenizer-additions
nlp.tokenizer.infix_finditer = infix_re.finditer
# process the document
spacy_doc = nlp(text)
sentences = []
for sentence_id, sentence in enumerate(spacy_doc.sents):
sentences.append(Sentence(
words=[token.text for token in sentence],
pos=[token.pos_ or token.tag_ for token in sentence],
meta={
"lemmas": [token.lemma_ for token in sentence],
"char_offsets": [(token.idx, token.idx + len(token.text))
for token in sentence]
}
))
return sentences
class SpacyDocReader(Reader):
"""Minimal Spacy Doc Reader."""
def read(self, spacy_doc):
sentences = []
for sentence_id, sentence in enumerate(spacy_doc.sents):
sentences.append(Sentence(
words=[token.text for token in sentence],
pos=[token.pos_ or token.tag_ for token in sentence],
meta={
"lemmas": [token.lemma_ for token in sentence],
"char_offsets": [(token.idx, token.idx + len(token.text))
for token in sentence]
}
))
return sentences
class PreprocessedReader(Reader):
"""Reader for preprocessed text."""
def read(self, list_of_sentence_tuples):
sentences = []
for sentence_id, sentence in enumerate(list_of_sentence_tuples):
words = [word for word, pos_tag in sentence]
pos_tags = [pos_tag for word, pos_tag in sentence]
shift = 0
sentences.append(Sentence(
words=words,
pos=pos_tags
))
shift += len(' '.join(words))
return sentences
| 5,330 | 34.072368 | 143 | py |
pke | pke-master/pke/lang.py | # -*- coding: utf-8 -*-
"""Language resources of pke.
Lists of stopwords in different languages.
These lists are taken from spacy.
Langcodes.
"""
import importlib
# This dictionnary holds only languages supported by `pke`.
# Supported languages need a stemmer and a spacy model.
# This dictionnary maps spacy's langcode to stemmer language
# (ordered by language name).
# The list of languages was obtained using:
# `nltk.stem.SnowballStemmer.languages`
langcodes = {
# "ar": "arabic", # no spacy model yet ;)
"da": "danish",
"nl": "dutch",
"en": "english",
"fi": "finnish",
"fr": "french",
"de": "german",
# "hu": "hungarian", # no spacy model yet ;)
"it": "italian",
"nb": "norwegian",
"pt": "portuguese",
"ro": "romanian",
"ru": "russian",
"es": "spanish",
"sv": "swedish",
}
stopwords = {}
for langcode in langcodes:
try:
tmp = importlib.import_module('spacy.lang.{}'.format(langcode))
stopwords[langcode] = tmp.stop_words.STOP_WORDS
except ModuleNotFoundError:
continue
| 1,079 | 21.5 | 71 | py |
pke | pke-master/pke/utils.py | # -*- coding: utf-8 -*-
"""Useful functions for the pke module."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import csv
import pickle
import gzip
import json
import codecs
import logging
from collections import defaultdict
from pke.base import LoadFile
from pke.lang import stopwords, langcodes
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from nltk.stem.snowball import SnowballStemmer
def load_document_frequency_file(input_file,
delimiter='\t'):
"""Load a tsv (tab-separated-values) file containing document frequencies.
Automatically detects if input file is compressed (gzip) by looking at its
extension (.gz).
Args:
input_file (str): the input file containing document frequencies in
csv format.
delimiter (str): the delimiter used for separating term-document
frequencies tuples, defaults to '\t'.
Returns:
dict: a dictionary of the form {term_1: freq}, freq being an integer.
"""
# initialize the DF dictionary
frequencies = {}
# open the input file
with (gzip.open(input_file, 'rt', encoding='utf-8')
if input_file.endswith('.gz')
else codecs.open(input_file, 'rt', encoding='utf-8')) as f:
# read the csv file
df_reader = csv.reader(f, delimiter=delimiter)
# populate the dictionary
for row in df_reader:
frequencies[row[0]] = int(row[1])
# return the populated dictionary
return frequencies
def compute_document_frequency(documents,
output_file,
language='en',
stoplist=None,
normalization='stemming',
delimiter='\t',
# TODO: What is the use case for changing this ?
n=3):
"""Compute the n-gram document frequencies from a set of input documents.
An extra row is added to the output file for specifying the number of
documents from which the document frequencies were computed
(--NB_DOC-- tab XXX). The output file is compressed using gzip.
Args:
documents (list): list of pke-readable documents.
output_file (str): the output file.
language (str): language of the input documents (used for computing the
n-stem or n-lemma forms), defaults to 'en' (english).
stoplist (list): the stop words for filtering n-grams, default to
pke.lang.stopwords[language].
normalization (str): word normalization method, defaults to
'stemming'. Other possible value is 'none' for using word surface
forms instead of stems/lemmas.
delimiter (str): the delimiter between n-grams and document
frequencies, defaults to tabulation (\t).
n (int): the size of the n-grams, defaults to 3.
"""
# document frequency container
frequencies = defaultdict(int)
# initialize number of documents
nb_documents = 0
# loop through the documents
for document in documents:
# initialize load file object
doc = LoadFile()
# read the input file
doc.load_document(input=document,
language=language,
stoplist=stoplist,
normalization=normalization)
# candidate selection
doc.ngram_selection(n=n)
# filter candidates containing punctuation marks
doc.candidate_filtering()
# loop through candidates
for lexical_form in doc.candidates:
frequencies[lexical_form] += 1
nb_documents += 1
if nb_documents % 1000 == 0:
logging.info("{} docs, memory used: {} mb".format(
nb_documents,
sys.getsizeof(frequencies) / 1024 / 1024))
# create directories from path if not exists
if os.path.dirname(output_file):
os.makedirs(os.path.dirname(output_file), exist_ok=True)
# dump the df container
with gzip.open(output_file, 'wt', encoding='utf-8') as f:
# add the number of documents as special token
first_line = '--NB_DOC--' + delimiter + str(nb_documents)
f.write(first_line + '\n')
for ngram in frequencies:
line = ngram + delimiter + str(frequencies[ngram])
f.write(line + '\n')
def train_supervised_model(documents,
references,
model_file,
language='en',
stoplist=None,
normalization="stemming",
df=None,
model=None,
leave_one_out=False):
"""Build a supervised keyphrase extraction model from a set of documents
and reference keywords.
Args:
documents (list): list of tuple (id, pke-readable documents). `id`s
should match the one in reference.
references (dict): reference keywords.
model_file (str): the model output file.
language (str): language of the input documents (used for computing the
n-stem or n-lemma forms), defaults to 'en' (english).
stoplist (list): the stop words for filtering n-grams, default to
pke.lang.stopwords[language].
normalization (str): word normalization method, defaults to 'stemming'.
Other possible values are 'lemmatization' or 'None' for using word
surface forms instead of stems/lemmas.
df (dict): df weights dictionary.
model (object): the supervised model to train, defaults to Kea.
leave_one_out (bool): whether to use a leave-one-out procedure for
training, creating one model per input, defaults to False.
"""
training_instances = []
training_classes = []
masks = {}
# get the input files from the input directory
for doc_id, document in documents:
# logging.info('reading file {}'.format(input_file))
# get the document id from file name
# doc_id = '.'.join(os.path.basename(input_file).split('.')[0:-1])
# initialize the input file
model.__init__()
# load the document
model.load_document(input=document,
language=language,
stoplist=stoplist,
normalization=normalization)
# candidate selection
model.candidate_selection()
# skipping documents without candidates
if not len(model.candidates):
continue
# extract features
model.feature_extraction(df=df, training=True)
# add the first offset for leave-one-out masking
masks[doc_id] = [len(training_classes)]
# annotate the reference keyphrases in the instances
for candidate in model.instances:
if candidate in references[doc_id]:
training_classes.append(1)
else:
training_classes.append(0)
training_instances.append(model.instances[candidate])
# add the last offset for leave-one-out masking
masks[doc_id].append(len(training_classes))
if not leave_one_out:
logging.info('writing model to {}'.format(model_file))
model.train(training_instances=training_instances,
training_classes=training_classes,
model_file=model_file)
else:
logging.info('leave-one-out training procedure')
for doc_id in masks:
logging.info('writing model to {}'.format(doc_id))
ind = masks[doc_id]
fold = training_instances[:ind[0]] + training_instances[ind[1]:]
gold = training_classes[:ind[0]] + training_classes[ind[1]:]
model.train(training_instances=fold,
training_classes=gold,
model_file='{}.{}.pickle'.format(model_file, doc_id))
def load_references(input_file,
sep_doc_id=':',
sep_ref_keyphrases=',',
normalize_reference=False,
language="en",
encoding=None,
excluded_file=None):
"""Load a reference file. Reference file can be either in json format or in
the SemEval-2010 official format.
Args:
input_file (str): path to the reference file.
sep_doc_id (str): the separator used for doc_id in reference file,
defaults to ':'.
sep_ref_keyphrases (str): the separator used for keyphrases in
reference file, defaults to ','.
normalize_reference (bool): whether to normalize the reference
keyphrases using stemming, default to False.
language (str): language of the input documents (used for computing the
stems), defaults to 'en' (english).
encoding (str): file encoding, default to None.
excluded_file (str): file to exclude (for leave-one-out
cross-validation), defaults to None.
"""
logging.info('loading reference keyphrases from {}'.format(input_file))
references = defaultdict(list)
# open input file
with codecs.open(input_file, 'r', encoding) as f:
# load json data
if input_file.endswith('.json'):
references = json.load(f)
for doc_id in references:
references[doc_id] = [keyphrase for variants in
references[doc_id] for keyphrase in
variants]
# or load SemEval-2010 file
else:
for line in f:
cols = line.strip().split(sep_doc_id)
doc_id = cols[0].strip()
keyphrases = cols[1].strip().split(sep_ref_keyphrases)
for v in keyphrases:
if '+' in v:
for s in v.split('+'):
references[doc_id].append(s)
else:
references[doc_id].append(v)
# normalize reference if needed
if normalize_reference:
# initialize stemmer
langcode = langcodes.get(language.replace('en', 'xx'), 'porter')
stemmer = SnowballStemmer(langcode)
for doc_id in references:
for i, keyphrase in enumerate(references[doc_id]):
stems = [stemmer.stem(w) for w in keyphrase.split()]
references[doc_id][i] = ' '.join(stems)
# remove excluded file if needed
if excluded_file is not None:
if excluded_file not in references:
logging.warning("{} is not in references".format(excluded_file))
else:
logging.info("{} removed from references".format(excluded_file))
del references[excluded_file]
return references
def load_lda_model(input_file):
"""Load a gzip file containing lda model.
Args:
input_file (str): the gzip input file containing lda model.
Returns:
dictionary: a dictionary of the form {term_1: freq}, freq being an
integer.
model: an initialized sklearn.decomposition.LatentDirichletAllocation
model.
"""
model = LatentDirichletAllocation()
with gzip.open(input_file, 'rb') as f:
(dictionary,
model.components_,
model.exp_dirichlet_component_,
model.doc_topic_prior_) = pickle.load(f)
return dictionary, model
def compute_lda_model(documents,
output_file,
n_topics=500,
language="en",
stoplist=None,
normalization="stemming"):
"""Compute a LDA model from a collection of documents. Latent Dirichlet
Allocation is computed using sklearn module.
Args:
documents (str): list fo pke-readable documents.
output_file (str): the output file.
n_topics (int): number of topics for the LDA model, defaults to 500.
language (str): language of the input documents, used for stop_words
in sklearn CountVectorizer, defaults to 'en'.
stoplist (list): the stop words for filtering words, default to
pke.lang.stopwords[language].
normalization (str): word normalization method, defaults to
'stemming'. Other possible value is 'none'
for using word surface forms instead of stems/lemmas.
"""
# texts container
texts = []
# loop throught the documents
for document in documents:
# initialize load file object
doc = LoadFile()
# read the input file
doc.load_document(input=document,
language=language,
normalization=normalization)
# container for current document
text = []
# loop through sentences
for sentence in doc.sentences:
# get the tokens (stems) from the sentence if they are not
# punctuation marks
text.extend([sentence.stems[i] for i in range(sentence.length)
if sentence.pos[i] != 'PUNCT'
and sentence.pos[i].isalpha()])
# add the document to the texts container
texts.append(' '.join(text))
# vectorize dataset
# get the stoplist from pke.lang because CountVectorizer only contains
# english stopwords atm
if stoplist is None:
# CountVectorizer expects a list
# stopwords.get is a set
stoplist = list(stopwords.get(language))
tf_vectorizer = CountVectorizer(
stop_words=stoplist)
tf = tf_vectorizer.fit_transform(texts)
# extract vocabulary
vocabulary = list(tf_vectorizer.get_feature_names_out())
# create LDA model and train
lda_model = LatentDirichletAllocation(n_components=n_topics,
random_state=0,
learning_method='batch')
lda_model.fit(tf)
# save all data necessary for later prediction
saved_model = (vocabulary,
lda_model.components_,
lda_model.exp_dirichlet_component_,
lda_model.doc_topic_prior_)
# Dump the df container
logging.info('writing LDA model to {}'.format(output_file))
# create directories from path if not exists
if os.path.dirname(output_file):
os.makedirs(os.path.dirname(output_file), exist_ok=True)
# dump the LDA model
with gzip.open(output_file, 'wb') as fp:
pickle.dump(saved_model, fp)
| 14,963 | 34.971154 | 79 | py |
pke | pke-master/pke/__init__.py | from __future__ import absolute_import
from pke.data_structures import Candidate, Sentence
from pke.base import LoadFile
from pke.utils import (
load_document_frequency_file, compute_document_frequency,
train_supervised_model, load_references,
compute_lda_model, load_lda_model)
import pke.unsupervised
import pke.supervised
| 338 | 29.818182 | 61 | py |
pke | pke-master/pke/data_structures.py | # -*- coding: utf-8 -*-
from dataclasses import dataclass
"""Data structures for the pke module."""
@dataclass
class Sentence:
"""The sentence data structure."""
def __init__(self, words, pos=[], meta={}):
self.words = words
"""list of words (tokens) in the sentence."""
self.pos = pos
"""list of Part-Of-Speeches."""
self.stems = []
"""list of stems."""
self.length = len(words)
"""length (number of tokens) of the sentence."""
self.meta = meta
"""meta-information of the sentence."""
@dataclass
class Candidate:
"""The keyphrase candidate data structure."""
def __init__(self):
self.surface_forms = []
""" the surface forms of the candidate. """
self.offsets = []
""" the offsets of the surface forms. """
self.sentence_ids = []
""" the sentence id of each surface form. """
self.pos_patterns = []
""" the Part-Of-Speech patterns of the candidate. """
self.lexical_form = []
""" the lexical form of the candidate. """
| 1,114 | 21.3 | 61 | py |
pke | pke-master/pke/unsupervised/__init__.py | # -*- coding: utf-8 -*-
# Python Keyphrase Extraction toolkit: unsupervised models
from __future__ import absolute_import
from pke.unsupervised.graph_based.topicrank import TopicRank
from pke.unsupervised.graph_based.singlerank import SingleRank
from pke.unsupervised.graph_based.multipartiterank import MultipartiteRank
from pke.unsupervised.graph_based.positionrank import PositionRank
from pke.unsupervised.graph_based.single_tpr import TopicalPageRank
from pke.unsupervised.graph_based.textrank import TextRank
from pke.unsupervised.statistical.tfidf import TfIdf
from pke.unsupervised.statistical.yake import YAKE
from pke.unsupervised.statistical.firstphrases import FirstPhrases
from pke.unsupervised.statistical.kpminer import KPMiner
| 747 | 40.555556 | 74 | py |
pke | pke-master/pke/unsupervised/statistical/firstphrases.py | # -*- coding: utf-8 -*-
# Author: ygor Gallina
# Date: 19-10-2018
"""StupidKE keyphrase extraction model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pke.base import LoadFile
class FirstPhrases(LoadFile):
"""Baseline model that extracts the first phrases of a document.
Parameterized example::
import pke
# define the set of valid Part-of-Speeches
pos = {'NOUN', 'PROPN', 'ADJ'}
# 1. create a FirstPhrases baseline extractor.
extractor = pke.unsupervised.FirstPhrases()
# 2. load the content of the document.
extractor.load_document(input='path/to/input',
language='en',
normalization=None)
# 3. select the longest sequences of nouns and adjectives as candidates.
extractor.candidate_selection(pos=pos)
# 4. weight the candidates using their position
extractor.candidate_weighting()
# 5. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=10)
"""
def candidate_selection(self, pos=None):
"""Candidate selection using longest sequences of PoS.
Args:
pos (set): set of valid POS tags, defaults to ('NOUN', 'PROPN',
'ADJ').
"""
if pos is None:
pos = {'NOUN', 'PROPN', 'ADJ'}
# select sequence of adjectives and nouns
self.longest_pos_sequence_selection(valid_pos=pos)
def candidate_weighting(self):
"""Candidate weighting function using position."""
# rank candidates using inverse position
for k in self.candidates.keys():
# the '-' ensures that the first item will have the higher weight
self.weights[k] = -min(self.candidates[k].offsets)
| 1,891 | 28.5625 | 80 | py |
pke | pke-master/pke/unsupervised/statistical/tfidf.py | # -*- coding: utf-8 -*-
# Author: Florian Boudin
# Date: 09-10-2018
"""TF-IDF keyphrase extraction model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import logging
from pke.base import LoadFile
from pke.utils import load_document_frequency_file
class TfIdf(LoadFile):
"""TF*IDF keyphrase extraction model.
Parameterized example::
import string
import pke
# 1. create a TfIdf extractor.
extractor = pke.unsupervised.TfIdf()
# 2. load the content of the document.
stoplist = list(string.punctuation)
stoplist += pke.lang.stopwords.get('en')
extractor.load_document(input='path/to/input',
language='en',
stoplist=stoplist,
normalization=None)
# 3. select {1-3}-grams not containing punctuation marks as candidates.
extractor.candidate_selection(n=3)
# 4. weight the candidates using a `tf` x `idf`
df = pke.load_document_frequency_file(input_file='path/to/df.tsv.gz')
extractor.candidate_weighting(df=df)
# 5. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=10)
"""
def candidate_selection(self, n=3):
"""Select 1-3 grams as keyphrase candidates.
Args:
n (int): the length of the n-grams, defaults to 3.
"""
# select ngrams from 1 to 3 grams
self.ngram_selection(n=n)
# filter candidates containing punctuation marks
self.candidate_filtering()
# TODO: is filtering only candidate with punctuation mandatory ?
#self.candidate_filtering(list(string.punctuation))
def candidate_weighting(self, df=None):
"""Candidate weighting function using document frequencies.
Args:
df (dict): document frequencies, the number of documents should be
specified using the "--NB_DOC--" key.
"""
# initialize default document frequency counts if none provided
if df is None:
logging.warning('LoadFile._df_counts is hard coded to {}'.format(
self._df_counts))
df = load_document_frequency_file(self._df_counts, delimiter='\t')
# initialize the number of documents as --NB_DOC-- + 1 (current)
N = 1 + df.get('--NB_DOC--', 0)
# loop throught the candidates
for k, v in self.candidates.items():
# get candidate document frequency
candidate_df = 1 + df.get(k, 0)
# compute the idf score
idf = math.log(N / candidate_df, 2)
# add the idf score to the weights container
self.weights[k] = len(v.surface_forms) * idf
| 2,862 | 30.461538 | 79 | py |
pke | pke-master/pke/unsupervised/statistical/kpminer.py | # -*- coding: utf-8 -*-
# Author: Florian Boudin
# Date: 09-10-2018
"""KP-Miner keyphrase extraction model.
Statistical approach to keyphrase extraction described in:
* Samhaa R. El-Beltagy and Ahmed Rafea.
KP-Miner: Participation in SemEval-2.
*Proceedings of SemEval*, pages 190-193, 2010.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import string
import logging
from pke.base import LoadFile
from pke.utils import load_document_frequency_file
class KPMiner(LoadFile):
"""KP-Miner keyphrase extraction model.
Parameterized example::
import pke
# 1. create a KPMiner extractor.
extractor = pke.unsupervised.KPMiner()
# 2. load the content of the document.
extractor.load_document(input='path/to/input',
language='en',
normalization=None)
# 3. select {1-5}-grams that do not contain punctuation marks or
# stopwords as keyphrase candidates. Set the least allowable seen
# frequency to 5 and the number of words after which candidates are
# filtered out to 200.
lasf = 5
cutoff = 200
extractor.candidate_selection(lasf=lasf, cutoff=cutoff)
# 4. weight the candidates using KPMiner weighting function.
df = pke.load_document_frequency_file(input_file='path/to/df.tsv.gz')
alpha = 2.3
sigma = 3.0
extractor.candidate_weighting(df=df, alpha=alpha, sigma=sigma)
# 5. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=10)
"""
def candidate_selection(self, lasf=3, cutoff=400):
"""The candidate selection as described in the KP-Miner paper.
Args:
lasf (int): least allowable seen frequency, defaults to 3.
cutoff (int): the number of words after which candidates are
filtered out, defaults to 400.
stoplist (list): the stoplist for filtering candidates, defaults
to the nltk stoplist. Words that are punctuation marks from
string.punctuation are not allowed.
"""
# select ngrams from 1 to 5 grams
self.ngram_selection(n=5)
# filter candidates containing stopwords
self.candidate_filtering()
# further filter candidates using lasf and cutoff
# Python 2/3 compatible
for k in list(self.candidates):
# get the candidate
v = self.candidates[k]
# delete if first candidate offset is greater than cutoff
if v.offsets[0] > cutoff:
del self.candidates[k]
# delete if frequency is lower than lasf
elif len(v.surface_forms) < lasf:
del self.candidates[k]
def candidate_weighting(self, df=None, sigma=3.0, alpha=2.3):
"""Candidate weight calculation as described in the KP-Miner paper.
Note:
w = tf * idf * B * P_f
with
* B = N_d / (P_d * alpha) and B = min(sigma, B)
* N_d = the number of all candidate terms
* P_d = number of candidates whose length exceeds one
* P_f = 1
Args:
df (dict): document frequencies, the number of documents should
be specified using the "--NB_DOC--" key.
sigma (int): parameter for boosting factor, defaults to 3.0.
alpha (int): parameter for boosting factor, defaults to 2.3.
"""
# initialize default document frequency counts if none provided
if df is None:
logging.warning('LoadFile._df_counts is hard coded to {}'.format(
self._df_counts))
df = load_document_frequency_file(self._df_counts, delimiter='\t')
# initialize the number of documents as --NB_DOC-- + 1 (current)
N = 1 + df.get('--NB_DOC--', 0)
# compute the number of candidates whose length exceeds one
P_d = sum([len(v.surface_forms) for v in self.candidates.values()
if len(v.lexical_form) > 1])
# fall back to 1 if all candidates are words
P_d = max(1, P_d)
# compute the number of all candidate terms
N_d = sum([len(v.surface_forms) for v in self.candidates.values()])
# compute the boosting factor
B = min(N_d / (P_d * alpha), sigma)
# loop throught the candidates
for k, v in self.candidates.items():
# get candidate document frequency
candidate_df = 1
# get the df for unigram only
if len(v.lexical_form) == 1:
candidate_df += df.get(k, 0)
# compute the idf score
idf = math.log(N / candidate_df, 2)
if len(v.lexical_form) == 1:
# If single word candidate do not apply boosting factor
self.weights[k] = len(v.surface_forms) * idf
else:
self.weights[k] = len(v.surface_forms) * B * idf
| 5,139 | 32.594771 | 78 | py |
pke | pke-master/pke/unsupervised/statistical/yake.py | # -*- coding: utf-8 -*-
# Author: Florian Boudin and Vítor Mangaravite
# Date: 09-10-2018
"""YAKE keyphrase extraction model.
Statistical approach to keyphrase extraction described in:
* Ricardo Campos, Vítor Mangaravite, Arian Pasquali, Alípio Mário Jorge,
Célia Nunes and Adam Jatowt.
YAKE! Keyword extraction from single documents using multiple local features.
*Information Sciences*, pages 257-289, 2020.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import re
from collections import defaultdict
import numpy
from nltk.metrics import edit_distance
from pke.base import LoadFile
class YAKE(LoadFile):
"""YAKE keyphrase extraction model.
Parameterized example::
import pke
from pke.lang import stopwords
# 1. create a YAKE extractor.
extractor = pke.unsupervised.YAKE()
# 2. load the content of the document.
stoplist = stopwords.get('english')
extractor.load_document(input='path/to/input',
language='en',
stoplist=stoplist,
normalization=None)
# 3. select {1-3}-grams not containing punctuation marks and not
# beginning/ending with a stopword as candidates.
extractor.candidate_selection(n=3)
# 4. weight the candidates using YAKE weighting scheme, a window (in
# words) for computing left/right contexts can be specified.
window = 2
use_stems = False # use stems instead of words for weighting
extractor.candidate_weighting(window=window,
use_stems=use_stems)
# 5. get the 10-highest scored candidates as keyphrases.
# redundant keyphrases are removed from the output using levenshtein
# distance and a threshold.
threshold = 0.8
keyphrases = extractor.get_n_best(n=10, threshold=threshold)
"""
def __init__(self):
"""Redefining initializer for YAKE.
"""
super(YAKE, self).__init__()
self.words = defaultdict(set)
""" Container for the vocabulary. """
self.contexts = defaultdict(lambda: ([], []))
""" Container for word contexts. """
self.features = defaultdict(dict)
""" Container for word features. """
self.surface_to_lexical = {}
""" Mapping from surface form to lexical form. """
def candidate_selection(self, n=3):
"""Select 1-3 grams as keyphrase candidates. Candidates beginning or
ending with a stopword are filtered out. Words that do not contain
at least one alpha-numeric character are not allowed.
Args:
n (int): the n-gram length, defaults to 3.
"""
# select ngrams from 1 to 3 grams
self.ngram_selection(n=n)
# filter candidates containing punctuation marks
self.candidate_filtering()
# TODO: is filtering only candidate with punctuation mandatory ?
#self.candidate_filtering(list(string.punctuation))
# further filter candidates
for k in list(self.candidates):
# get the candidate
v = self.candidates[k]
# filter candidates starting/ending with a stopword or containing
# a first/last word with less than 3 characters
if v.surface_forms[0][0].lower() in self.stoplist or v.surface_forms[0][
-1].lower() in self.stoplist or len(
v.surface_forms[0][0]) < 3 or len(
v.surface_forms[0][-1]) < 3:
del self.candidates[k]
def _vocabulary_building(self, use_stems=False):
"""Build the vocabulary that will be used to weight candidates. Only
words containing at least one alpha-numeric character are kept.
Args:
use_stems (bool): whether to use stems instead of lowercase words
for weighting, defaults to False.
"""
# loop through sentences
for i, sentence in enumerate(self.sentences):
# compute the offset shift for the sentence
shift = sum([s.length for s in self.sentences[0:i]])
# loop through words in sentence
for j, word in enumerate(sentence.words):
# get the word or stem
index = word.lower()
if use_stems:
index = sentence.stems[j]
# add the word occurrence
self.words[index].add((shift + j, shift, i, word))
def _contexts_building(self, use_stems=False, window=2):
"""Build the contexts of the words for computing the relatedness
feature. Words that occur within a window of n words are considered as
context words. Only words co-occurring in a block (sequence of words
that appear in the vocabulary) are considered.
Args:
use_stems (bool): whether to use stems instead of lowercase words
for weighting, defaults to False.
window (int): the size in words of the window used for computing
co-occurrence counts, defaults to 2.
"""
# loop through sentences
for i, sentence in enumerate(self.sentences):
# lowercase the words
words = [w.lower() for w in sentence.words]
# replace with stems if needed
if use_stems:
words = sentence.stems
# block container
block = []
# loop through words in sentence
for j, word in enumerate(words):
# skip and flush block if word is not in vocabulary
if word not in self.words:
block = []
continue
# add the left context
self.contexts[word][0].extend(
[w for w in block[max(0, len(block) - window):len(block)]]
)
# add the right context
for w in block[max(0, len(block) - window):len(block)]:
self.contexts[w][1].append(word)
# add word to the current block
block.append(word)
def _feature_extraction(self):
"""Compute the weight of individual words using the following five
features:
1. CASING: gives importance to acronyms or words starting with a
capital letter.
CASING(w) = max(TF(U(w)), TF(A(w))) / (1 + log(TF(w)))
with TF(U(w) being the # times the word starts with an uppercase
letter, excepts beginning of sentences. TF(A(w)) is the # times
the word is marked as an acronym.
2. POSITION: gives importance to words occurring at the beginning of
the document.
POSITION(w) = log( log( 3 + Median(Sen(w)) ) )
with Sen(w) contains the position of the sentences where w
occurs.
3. FREQUENCY: gives importance to frequent words.
FREQUENCY(w) = TF(w) / ( MEAN_TF + STD_TF)
with MEAN_TF and STD_TF computed on valid_tfs which are words
that are not stopwords.
4. RELATEDNESS: gives importance to words that do not have the
characteristics of stopwords.
RELATEDNESS(w) = 1 + (WR+WL)*(TF(w)/MAX_TF) + PL + PR
5. DIFFERENT: gives importance to words that occurs in multiple
sentences.
DIFFERENT(w) = SF(w) / # sentences
with SF(w) being the sentence frequency of word w.
"""
# get the Term Frequency of each word
TF = [len(self.words[w]) for w in self.words]
# get the Term Frequency of non-stop words
TF_nsw = [len(self.words[w]) for w in self.words if w not in self.stoplist]
# compute statistics
mean_TF = numpy.mean(TF_nsw)
std_TF = numpy.std(TF_nsw)
max_TF = max(TF)
# Loop through the words
for word in self.words:
# Indicating whether the word is a stopword (vitordouzi change)
self.features[word]['isstop'] = word in self.stoplist or len(word) < 3
# Term Frequency
self.features[word]['TF'] = len(self.words[word])
# Uppercase/Acronym Term Frequencies
self.features[word]['TF_A'] = 0
self.features[word]['TF_U'] = 0
for (offset, shift, sent_id, surface_form) in self.words[word]:
if surface_form.isupper() and len(word) > 1:
self.features[word]['TF_A'] += 1
elif surface_form[0].isupper() and offset != shift:
self.features[word]['TF_U'] += 1
# 1. CASING feature
self.features[word]['CASING'] = max(self.features[word]['TF_A'],
self.features[word]['TF_U'])
self.features[word]['CASING'] /= 1.0 + math.log(
self.features[word]['TF'])
# 2. POSITION feature
sentence_ids = list(set([t[2] for t in self.words[word]]))
self.features[word]['POSITION'] = math.log(
3.0 + numpy.median(sentence_ids))
self.features[word]['POSITION'] = math.log(
self.features[word]['POSITION'])
# 3. FREQUENCY feature
self.features[word]['FREQUENCY'] = self.features[word]['TF']
self.features[word]['FREQUENCY'] /= (mean_TF + std_TF)
# 4. RELATEDNESS feature
self.features[word]['WL'] = 0.0
if len(self.contexts[word][0]):
self.features[word]['WL'] = len(set(self.contexts[word][0]))
self.features[word]['WL'] /= len(self.contexts[word][0])
self.features[word]['PL'] = len(set(self.contexts[word][0])) / max_TF
self.features[word]['WR'] = 0.0
if len(self.contexts[word][1]):
self.features[word]['WR'] = len(set(self.contexts[word][1]))
self.features[word]['WR'] /= len(self.contexts[word][1])
self.features[word]['PR'] = len(set(self.contexts[word][1])) / max_TF
self.features[word]['RELATEDNESS'] = 1
#self.features[word]['RELATEDNESS'] += self.features[word]['PL']
#self.features[word]['RELATEDNESS'] += self.features[word]['PR']
self.features[word]['RELATEDNESS'] += (self.features[word]['WR'] +
self.features[word]['WL']) * \
(self.features[word]['TF'] / max_TF)
# 5. DIFFERENT feature
self.features[word]['DIFFERENT'] = len(set(sentence_ids))
self.features[word]['DIFFERENT'] /= len(self.sentences)
# assemble the features to weight words
A = self.features[word]['CASING']
B = self.features[word]['POSITION']
C = self.features[word]['FREQUENCY']
D = self.features[word]['RELATEDNESS']
E = self.features[word]['DIFFERENT']
self.features[word]['weight'] = (D * B) / (A + (C / D) + (E / D))
def candidate_weighting(self, window=2, use_stems=False):
"""Candidate weight calculation as described in the YAKE paper.
Args:
use_stems (bool): whether to use stems instead of lowercase words
for weighting, defaults to False.
window (int): the size in words of the window used for computing
co-occurrence counts, defaults to 2.
"""
if not self.candidates:
return
# build the vocabulary
self._vocabulary_building(use_stems=use_stems)
# extract the contexts
self._contexts_building(use_stems=use_stems, window=window)
# compute the word features
self._feature_extraction()
# compute candidate weights
for k, v in self.candidates.items():
# use stems
if use_stems:
weights = [self.features[t]['weight'] for t in v.lexical_form]
self.weights[k] = numpy.prod(weights)
self.weights[k] /= len(v.offsets) * (1 + sum(weights))
# use words
else:
lowercase_forms = [' '.join(t).lower() for t in v.surface_forms]
for i, candidate in enumerate(lowercase_forms):
TF = lowercase_forms.count(candidate)
# computing differentiated weights for words and stopwords
# (vitordouzi change)
tokens = [t.lower() for t in v.surface_forms[i]]
prod_ = 1.
sum_ = 0.
for j, token in enumerate(tokens):
if self.features[token]['isstop']:
term_stop = token
prob_t1 = prob_t2 = 0
if j - 1 >= 0:
term_left = tokens[j-1]
prob_t1 = self.contexts[term_left][1].count(
term_stop) / self.features[term_left]['TF']
if j + 1 < len(tokens):
term_right = tokens[j+1]
prob_t2 = self.contexts[term_stop][0].count(
term_right) / self.features[term_right]['TF']
prob = prob_t1 * prob_t2
prod_ *= (1 + (1 - prob))
sum_ -= (1 - prob)
else:
prod_ *= self.features[token]['weight']
sum_ += self.features[token]['weight']
if sum_ == -1:
# The candidate is a one token stopword at the start or
# the end of the sentence
# Setting sum_ to -1+eps so 1+sum_ != 0
sum_ = -0.99999999999
self.weights[candidate] = prod_
self.weights[candidate] /= TF * (1 + sum_)
self.surface_to_lexical[candidate] = k
# weights = [self.features[t.lower()]['weight'] for t
# in v.surface_forms[i]]
# self.weights[candidate] = numpy.prod(weights)
# self.weights[candidate] /= TF * (1 + sum(weights))
# self.surface_to_lexical[candidate] = k
def is_redundant(self, candidate, prev, threshold=0.8):
"""Test if one candidate is redundant with respect to a list of already
selected candidates. A candidate is considered redundant if its
levenshtein distance, with another candidate that is ranked higher in
the list, is greater than a threshold.
Args:
candidate (str): the lexical form of the candidate.
prev (list): the list of already selected candidates.
threshold (float): the threshold used when computing the
levenshtein distance, defaults to 0.8.
"""
# loop through the already selected candidates
for prev_candidate in prev:
dist = edit_distance(candidate, prev_candidate)
dist /= max(len(candidate), len(prev_candidate))
if (1.0 - dist) > threshold:
return True
return False
def get_n_best(self,
n=10,
redundancy_removal=True,
stemming=False,
threshold=0.8):
""" Returns the n-best candidates given the weights.
Args:
n (int): the number of candidates, defaults to 10.
redundancy_removal (bool): whether redundant keyphrases are
filtered out from the n-best list using levenshtein
distance, defaults to True.
stemming (bool): whether to extract stems or surface forms
(lowercased, first occurring form of candidate), default to
stems.
threshold (float): the threshold used when computing the
levenshtein distance, defaults to 0.8.
"""
# sort candidates by ascending weight
best = sorted(self.weights, key=self.weights.get, reverse=False)
# remove redundant candidates
if redundancy_removal:
# initialize a new container for non redundant candidates
non_redundant_best = []
# loop through the best candidates
for candidate in best:
# test wether candidate is redundant
if self.is_redundant(candidate,
non_redundant_best,
threshold=threshold):
continue
# add the candidate otherwise
non_redundant_best.append(candidate)
# break computation if the n-best are found
if len(non_redundant_best) >= n:
break
# copy non redundant candidates in best container
best = non_redundant_best
# get the list of best candidates as (lexical form, weight) tuples
n_best = [(u, self.weights[u]) for u in best[:min(n, len(best))]]
# replace with surface forms if no stemming
if stemming:
for i, (candidate, weight) in enumerate(n_best):
if candidate not in self.candidates:
candidate = self.surface_to_lexical[candidate]
candidate = ' '.join(self.candidates[candidate].lexical_form)
n_best[i] = (candidate, weight)
# return the list of best candidates
return n_best
| 18,089 | 37.903226 | 86 | py |
pke | pke-master/pke/unsupervised/statistical/__init__.py | # -*- coding: utf-8 -*-
# Python Keyphrase Extraction toolkit: unsupervised statistical ranking models
| 103 | 33.666667 | 78 | py |
pke | pke-master/pke/unsupervised/graph_based/single_tpr.py | # -*- coding: utf-8 -*-
# Author: Florian Boudin
# Date: 09-11-2018
"""Single Topical PageRank keyphrase extraction model.
This implementation is an improvement on a keyphrase extraction algorithm,
Topical PageRank (TPR), incorporating topical information from topic model and
described in:
* Lucas Sterckx, Thomas Demeester, Johannes Deleu and Chris Develder.
Topical Word Importance for Fast Keyphrase Extraction.
*In proceedings of WWW*, pages 121-122, 2015.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import networkx as nx
import numpy as np
from scipy.spatial.distance import cosine
from sklearn.feature_extraction.text import CountVectorizer
import pke.utils
from pke.unsupervised import SingleRank
class TopicalPageRank(SingleRank):
"""Single TopicalPageRank keyphrase extraction model.
Parameterized example::
import pke
# define the valid Part-of-Speeches to occur in the graph
pos = {'NOUN', 'PROPN', 'ADJ'}
# define the grammar for selecting the keyphrase candidates
grammar = "NP: {<ADJ>*<NOUN|PROPN>+}"
# 1. create a TopicalPageRank extractor.
extractor = pke.unsupervised.TopicalPageRank()
# 2. load the content of the document.
extractor.load_document(input='path/to/input',
language='en',
normalization=None)
# 3. select the noun phrases as keyphrase candidates.
extractor.candidate_selection(grammar=grammar)
# 4. weight the keyphrase candidates using Single Topical PageRank.
# Builds a word-graph in which edges connecting two words occurring
# in a window are weighted by co-occurrence counts.
extractor.candidate_weighting(window=10,
pos=pos,
lda_model='path/to/lda_model')
# 5. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=10)
"""
def __init__(self):
"""Redefining initializer for TopicalPageRank."""
super(TopicalPageRank, self).__init__()
def candidate_selection(self, grammar=None):
"""Candidate selection heuristic.
Here we select noun phrases that match the regular expression
(adjective)*(noun)+, which represents zero or more adjectives followed
by one or more nouns (Liu et al., 2010).
Note that there is no details on this in the Single TPR paper, and these
are the only information that can be found:
... a set of expressions or noun phrases ...
... Adjectives and nouns are then merged into keyphrases and
corresponding scores are summed and ranked. ...
Args:
grammar (str): grammar defining POS patterns of NPs, defaults to
"NP: {<ADJ>*<NOUN|PROPN>+}".
"""
if grammar is None:
grammar = "NP:{<ADJ>*<NOUN|PROPN>+}"
# select sequence of adjectives and nouns
self.grammar_selection(grammar=grammar)
def candidate_weighting(self,
window=10,
pos=None,
lda_model=None,
normalized=False):
"""Candidate weight calculation using a biased PageRank towards LDA
topic distributions.
Args:
window (int): the window within the sentence for connecting two
words in the graph, defaults to 10.
pos (set): the set of valid pos for words to be considered as
nodes in the graph, defaults to ('NOUN', 'PROPN', 'ADJ').
lda_model (pickle.gz): an LDA model produced by sklearn in
pickle compressed (.gz) format
normalized (False): normalize keyphrase score by their length,
defaults to False.
"""
if not self.candidates:
return
if pos is None:
pos = {'NOUN', 'PROPN', 'ADJ'}
# build the word graph
# ``Since keyphrases are usually noun phrases, we only add adjectives
# and nouns in word graph.'' -> (Liu et al., 2010)
self.build_word_graph(window=window,
pos=pos)
# set the default LDA model if none provided
if lda_model is None:
lda_model = os.path.join(self._models, "lda-1000-semeval2010.py3.pickle.gz")
logging.warning('LDA model is hard coded to {}'.format(lda_model))
# load parameters from file
if isinstance(lda_model, str):
dictionary, model = pke.utils.load_lda_model(lda_model)
# otherwise, we expect a loaded lda model
else:
dictionary, model = lda_model
# build the document representation
doc = []
for s in self.sentences:
doc.extend([s.stems[i] for i in range(s.length)])
# vectorize document
tf_vectorizer = CountVectorizer(stop_words=list(self.stoplist),
vocabulary=dictionary)
tf = tf_vectorizer.fit_transform([' '.join(doc)])
# compute the topic distribution over the document
distribution_topic_document = model.transform(tf)[0]
# compute the word distributions over topics
distributions = model.components_ / model.components_.sum(axis=1)[:,
np.newaxis]
# Computing W(w_i) indicating the full topical importance of each word
# w_i in the PageRank
# First, we determine the cosine similarity between the vector of
# word-topic probabilities P(w_i, Z) and the document-topic
# probabilities of the document P(Z, d)
K = len(distribution_topic_document)
W = {}
for word in self.graph.nodes():
if word in dictionary:
index = dictionary.index(word)
distribution_word_topic = [distributions[k][index] for k
in range(K)]
W[word] = 1 - cosine(distribution_word_topic,
distribution_topic_document)
# get the default probability for OOV words
default_similarity = min(W.values())
# set the default probability for OOV words
for word in self.graph.nodes():
if word not in W:
W[word] = default_similarity
# Normalize the topical word importance of words
norm = sum(W.values())
for word in W:
W[word] /= norm
# compute the word scores using biased random walk
w = nx.pagerank(self.graph,
personalization=W,
alpha=0.85,
tol=0.0001,
weight='weight')
# loop through the candidates
for k in self.candidates.keys():
tokens = self.candidates[k].lexical_form
self.weights[k] = sum([w[t] for t in tokens])
if normalized:
self.weights[k] /= len(tokens)
| 7,247 | 35.059701 | 88 | py |
pke | pke-master/pke/unsupervised/graph_based/singlerank.py | # -*- coding: utf-8 -*-
# Author: Florian Boudin
# Date: 09-11-2018
"""SingleRank keyphrase extraction model.
Simple extension of the TextRank model described in:
* Xiaojun Wan and Jianguo Xiao.
CollabRank: Towards a Collaborative Approach to Single-Document Keyphrase
Extraction.
*In proceedings of the COLING*, pages 969-976, 2008.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import networkx as nx
from pke.unsupervised.graph_based.textrank import TextRank
class SingleRank(TextRank):
"""SingleRank keyphrase extraction model.
This model is an extension of the TextRank model that uses the number of
co-occurrences to weigh edges in the graph.
Parameterized example::
import pke
# define the set of valid Part-of-Speeches
pos = {'NOUN', 'PROPN', 'ADJ'}
# 1. create a SingleRank extractor.
extractor = pke.unsupervised.SingleRank()
# 2. load the content of the document.
extractor.load_document(input='path/to/input',
language='en',
normalization=None)
# 3. select the longest sequences of nouns and adjectives as candidates.
extractor.candidate_selection(pos=pos)
# 4. weight the candidates using the sum of their word's scores that are
# computed using random walk. In the graph, nodes are words of
# certain part-of-speech (nouns and adjectives) that are connected if
# they occur in a window of 10 words.
extractor.candidate_weighting(window=10,
pos=pos)
# 5. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=10)
"""
def __init__(self):
"""Redefining initializer for SingleRank."""
super(SingleRank, self).__init__()
def build_word_graph(self, window=10, pos=None):
"""Build a graph representation of the document in which nodes/vertices
are words and edges represent co-occurrence relation. Syntactic filters
can be applied to select only words of certain Part-of-Speech.
Co-occurrence relations can be controlled using the distance (window)
between word occurrences in the document.
The number of times two words co-occur in a window is encoded as *edge
weights*. Sentence boundaries **are not** taken into account in the
window.
Args:
window (int): the window for connecting two words in the graph,
defaults to 10.
pos (set): the set of valid pos for words to be considered as nodes
in the graph, defaults to ('NOUN', 'PROPN', 'ADJ').
"""
if pos is None:
pos = {'NOUN', 'PROPN', 'ADJ'}
# flatten document as a sequence of (word, pass_syntactic_filter) tuples
text = [(word, sentence.pos[i] in pos) for sentence in self.sentences
for i, word in enumerate(sentence.stems)]
# add nodes to the graph
self.graph.add_nodes_from([word for word, valid in text if valid])
# add edges to the graph
for i, (node1, is_in_graph1) in enumerate(text):
# speed up things
if not is_in_graph1:
continue
for j in range(i + 1, min(i + window, len(text))):
node2, is_in_graph2 = text[j]
if is_in_graph2 and node1 != node2:
if not self.graph.has_edge(node1, node2):
self.graph.add_edge(node1, node2, weight=0.0)
self.graph[node1][node2]['weight'] += 1.0
def candidate_weighting(self, window=10, pos=None, normalized=False):
"""Keyphrase candidate ranking using the weighted variant of the
TextRank formulae. Candidates are scored by the sum of the scores of
their words.
Args:
window (int): the window within the sentence for connecting two
words in the graph, defaults to 10.
pos (set): the set of valid pos for words to be considered as nodes
in the graph, defaults to ('NOUN', 'PROPN', 'ADJ').
normalized (False): normalize keyphrase score by their length,
defaults to False.
"""
if pos is None:
pos = {'NOUN', 'PROPN', 'ADJ'}
# build the word graph
self.build_word_graph(window=window, pos=pos)
# compute the word scores using random walk
w = nx.pagerank(self.graph,
alpha=0.85,
tol=0.0001,
weight='weight')
# loop through the candidates
for k in self.candidates.keys():
tokens = self.candidates[k].lexical_form
self.weights[k] = sum([w[t] for t in tokens])
if normalized:
self.weights[k] /= len(tokens)
# use position to break ties
self.weights[k] += (self.candidates[k].offsets[0] * 1e-8)
| 5,143 | 35.225352 | 80 | py |
pke | pke-master/pke/unsupervised/graph_based/textrank.py | # -*- coding: utf-8 -*-
# Authors: Ygor Gallina, Florian Boudin
# Date: 10-18-2018
"""TextRank keyphrase extraction model.
Implementation of the TextRank model for keyword extraction described in:
* Rada Mihalcea and Paul Tarau.
TextRank: Bringing Order into Texts
*In Proceedings of EMNLP*, 2004.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import logging
import networkx as nx
from pke.base import LoadFile
class TextRank(LoadFile):
"""TextRank for keyword extraction.
This model builds a graph that represents the text. A graph based ranking
algorithm is then applied to extract the lexical units (here the words) that
are most important in the text.
In this implementation, nodes are words of certain part-of-speech (nouns
and adjectives) and edges represent co-occurrence relation, controlled by
the distance between word occurrences (here a window of 2 words). Nodes
are ranked by the TextRank graph-based ranking algorithm in its unweighted
variant.
Parameterized example::
import pke
# define the set of valid Part-of-Speeches
pos = {'NOUN', 'PROPN', 'ADJ'}
# 1. create a TextRank extractor.
extractor = pke.unsupervised.TextRank()
# 2. load the content of the document.
extractor.load_document(input='path/to/input',
language='en',
normalization=None)
# 3. build the graph representation of the document and rank the words.
# Keyphrase candidates are composed from the 33-percent
# highest-ranked words.
extractor.candidate_weighting(window=2,
pos=pos,
top_percent=0.33)
# 4. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=10)
"""
def __init__(self):
"""Redefining initializer for TextRank."""
super(TextRank, self).__init__()
self.graph = nx.Graph()
"""The word graph."""
def candidate_selection(self, pos=None):
"""Candidate selection using longest sequences of PoS.
Args:
pos (set): set of valid POS tags, defaults to ('NOUN', 'PROPN',
'ADJ').
"""
if pos is None:
pos = {'NOUN', 'PROPN', 'ADJ'}
# select sequence of adjectives and nouns
self.longest_pos_sequence_selection(valid_pos=pos)
def build_word_graph(self, window=2, pos=None):
"""Build a graph representation of the document in which nodes/vertices
are words and edges represent co-occurrence relation. Syntactic filters
can be applied to select only words of certain Part-of-Speech.
Co-occurrence relations can be controlled using the distance between
word occurrences in the document.
As the original paper does not give precise details on how the word
graph is constructed, we make the following assumptions from the example
given in Figure 2: 1) sentence boundaries **are not** taken into account
and, 2) stopwords and punctuation marks **are** considered as words when
computing the window.
Args:
window (int): the window for connecting two words in the graph,
defaults to 2.
pos (set): the set of valid pos for words to be considered as nodes
in the graph, defaults to ('NOUN', 'PROPN', 'ADJ').
"""
if pos is None:
pos = {'NOUN', 'PROPN', 'ADJ'}
# flatten document as a sequence of (word, pass_syntactic_filter) tuples
text = [(word, sentence.pos[i] in pos) for sentence in self.sentences
for i, word in enumerate(sentence.stems)]
# add nodes to the graph
self.graph.add_nodes_from([word for word, valid in text if valid])
# add edges to the graph
for i, (node1, is_in_graph1) in enumerate(text):
# speed up things
if not is_in_graph1:
continue
for j in range(i + 1, min(i + window, len(text))):
node2, is_in_graph2 = text[j]
if is_in_graph2 and node1 != node2:
self.graph.add_edge(node1, node2)
def candidate_weighting(self,
window=2,
pos=None,
top_percent=None,
normalized=False):
"""Tailored candidate ranking method for TextRank. Keyphrase candidates
are either composed from the T-percent highest-ranked words as in the
original paper or extracted using the `candidate_selection()` method.
Candidates are ranked using the sum of their (normalized?) words.
Args:
window (int): the window for connecting two words in the graph,
defaults to 2.
pos (set): the set of valid pos for words to be considered as nodes
in the graph, defaults to ('NOUN', 'PROPN', 'ADJ').
top_percent (float): percentage of top vertices to keep for phrase
generation.
normalized (False): normalize keyphrase score by their length,
defaults to False.
"""
if pos is None:
pos = {'NOUN', 'PROPN', 'ADJ'}
# build the word graph
self.build_word_graph(window=window, pos=pos)
# compute the word scores using the unweighted PageRank formulae
w = nx.pagerank(self.graph, alpha=0.85, tol=0.0001, weight=None)
# generate the phrases from the T-percent top ranked words
if top_percent is not None:
# warn user as this is not the pke way of doing it
logging.warning("Candidates are generated using {}-top".format(
top_percent))
# computing the number of top keywords
nb_nodes = self.graph.number_of_nodes()
to_keep = min(math.floor(nb_nodes * top_percent), nb_nodes)
# sorting the nodes by decreasing scores
top_words = sorted(w, key=w.get, reverse=True)
# creating keyphrases from the T-top words
self.longest_keyword_sequence_selection(top_words[:int(to_keep)])
# weight candidates using the sum of their word scores
for k in self.candidates.keys():
tokens = self.candidates[k].lexical_form
self.weights[k] = sum([w[t] for t in tokens])
if normalized:
self.weights[k] /= len(tokens)
# use position to break ties
self.weights[k] += (self.candidates[k].offsets[0]*1e-8)
| 6,822 | 35.682796 | 80 | py |
pke | pke-master/pke/unsupervised/graph_based/multipartiterank.py | # -*- coding: utf-8 -*-
# Author: Florian Boudin
# Date: 09-11-2018
"""Multipartite graph keyphrase extraction model.
Graph-based ranking approach to keyphrase extraction described in:
* Florian Boudin.
Unsupervised Keyphrase Extraction with Multipartite Graphs.
*In proceedings of NAACL*, pages 667-672, 2018.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from itertools import combinations
import networkx as nx
import numpy as np
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import pdist
from pke.unsupervised import TopicRank
class MultipartiteRank(TopicRank):
"""Multipartite graph keyphrase extraction model.
Parameterized example::
import pke
import string
# 1. create a MultipartiteRank extractor.
extractor = pke.unsupervised.MultipartiteRank()
stoplist = list(string.punctuation)
stoplist += pke.lang.stopwords.get('en')
# 2. load the content of the document.
extractor.load_document(input='path/to/input.xml',
stoplist=stoplist)
# 3. select the longest sequences of nouns and adjectives, that do
# not contain punctuation marks or stopwords as candidates.
pos = {'NOUN', 'PROPN', 'ADJ'}
extractor.candidate_selection(pos=pos)
# 4. build the Multipartite graph and rank candidates using random
# walk, alpha controls the weight adjustment mechanism, see
# TopicRank for threshold/method parameters.
extractor.candidate_weighting(alpha=1.1,
threshold=0.74,
method='average')
# 5. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=10)
"""
def __init__(self):
"""Redefining initializer for MultipartiteRank.
"""
super(MultipartiteRank, self).__init__()
self.topic_identifiers = {}
""" A container for linking candidates to topic identifiers. """
self.graph = nx.DiGraph()
""" Redefine the graph as a directed graph. """
def topic_clustering(self,
threshold=0.74,
method='average'):
""" Clustering candidates into topics.
Args:
threshold (float): the minimum similarity for clustering,
defaults to 0.74, i.e. more than 1/4 of stem overlap
similarity.
method (str): the linkage method, defaults to average.
"""
# handle document with only one candidate
if len(self.candidates) == 1:
candidate = list(self.candidates)[0]
self.topics.append([candidate])
self.topic_identifiers[candidate] = 0
return
# vectorize the candidates
candidates, X = self.vectorize_candidates()
# compute the distance matrix
Y = pdist(X, 'jaccard')
Y = np.nan_to_num(Y)
# compute the clusters
Z = linkage(Y, method=method)
# form flat clusters
clusters = fcluster(Z, t=threshold, criterion='distance')
# for each cluster id
for cluster_id in range(1, max(clusters) + 1):
self.topics.append([candidates[j] for j in range(len(clusters))
if clusters[j] == cluster_id])
# assign cluster identifiers to candidates
for i, cluster_id in enumerate(clusters):
self.topic_identifiers[candidates[i]] = cluster_id - 1
def build_topic_graph(self):
""" Build the Multipartite graph. """
# adding the nodes to the graph
self.graph.add_nodes_from(self.candidates.keys())
# pre-compute edge weights
for node_i, node_j in combinations(self.candidates.keys(), 2):
# discard intra-topic edges
if self.topic_identifiers[node_i] \
== self.topic_identifiers[node_j]:
continue
weights = []
for p_i in self.candidates[node_i].offsets:
for p_j in self.candidates[node_j].offsets:
len_i = len(self.candidates[node_i].lexical_form)
len_j = len(self.candidates[node_j].lexical_form)
# gap is the number of token between the 2 candidates + 1
gap = self.compute_gap(p_i, p_j, len_i, len_j)
weights.append(1.0 / gap)
# add weighted edges
if weights:
# node_i -> node_j
self.graph.add_edge(node_i, node_j, weight=sum(weights))
# node_j -> node_i
self.graph.add_edge(node_j, node_i, weight=sum(weights))
def weight_adjustment(self, alpha=1.1):
""" Adjust edge weights for boosting some candidates.
Args:
alpha (float): hyper-parameter that controls the strength of
the weight adjustment, defaults to 1.1.
"""
# weighted_edges = defaultdict(list)
weighted_edges = {}
# find the sum of all first positions
norm = sum([s.length for s in self.sentences])
# Topical boosting
for variants in self.topics:
# skip one candidate topics
if len(variants) == 1:
continue
# get the offsets
offsets = [self.candidates[v].offsets[0] for v in variants]
# get the first occurring variant
first = variants[offsets.index(min(offsets))]
# find the nodes to which it connects -- Python 2/3 compatible
# for start, end in self.graph.edges_iter(first):
for start, end in self.graph.edges(first):
boosters = []
for v in variants:
if v != first and self.graph.has_edge(v, end):
boosters.append(self.graph[v][end]['weight'])
if boosters:
weighted_edges[(start, end)] = np.sum(boosters)
# update edge weights -- Python 2/3 compatible
# for nodes, boosters in weighted_edges.iteritems():
for nodes, boosters in weighted_edges.items():
node_i, node_j = nodes
position_i = 1.0 / (1 + self.candidates[node_i].offsets[0])
position_i = math.exp(position_i)
self.graph[node_j][node_i]['weight'] += (
boosters * alpha * position_i)
def candidate_weighting(self,
threshold=0.74,
method='average',
alpha=1.1):
""" Candidate weight calculation using random walk.
Args:
threshold (float): the minimum similarity for clustering,
defaults to 0.25.
method (str): the linkage method, defaults to average.
alpha (float): hyper-parameter that controls the strength of
the weight adjustment, defaults to 1.1.
"""
if not self.candidates:
return
# cluster the candidates
self.topic_clustering(threshold=threshold, method=method)
# build the topic graph
self.build_topic_graph()
if alpha > 0.0:
self.weight_adjustment(alpha)
# compute the word scores using random walk
self.weights = nx.pagerank(self.graph)
| 7,587 | 32.875 | 77 | py |
pke | pke-master/pke/unsupervised/graph_based/topicrank.py | # -*- coding: utf-8 -*-
# Author: Florian Boudin
# Date: 09-10-2018
"""TopicRank keyphrase extraction model.
Graph-based ranking approach to keyphrase extraction described in:
* Adrien Bougouin, Florian Boudin and Béatrice Daille.
TopicRank: Graph-Based Topic Ranking for Keyphrase Extraction.
*In proceedings of IJCNLP*, pages 543-551, 2013.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from itertools import combinations
import networkx as nx
import numpy as np
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import pdist
from pke.base import LoadFile
class TopicRank(LoadFile):
"""TopicRank keyphrase extraction model.
Parameterized example::
import pke
import string
# 1. create a TopicRank extractor.
extractor = pke.unsupervised.TopicRank()
# 2. load the content of the document.
stoplist = list(string.punctuation)
stoplist += pke.lang.stopwords.get('en')
extractor.load_document(input='path/to/input.xml',
stoplist=stoplist)
# 3. select the longest sequences of nouns and adjectives, that do
# not contain punctuation marks or stopwords as candidates.
pos = {'NOUN', 'PROPN', 'ADJ'}
extractor.candidate_selection(pos=pos)
# 4. build topics by grouping candidates with HAC (average linkage,
# threshold of 1/4 of shared stems). Weight the topics using random
# walk, and select the first occuring candidate from each topic.
extractor.candidate_weighting(threshold=0.74, method='average')
# 5. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=10)
"""
def __init__(self):
"""Redefining initializer for TopicRank.
"""
super(TopicRank, self).__init__()
self.graph = nx.Graph()
""" The topic graph. """
self.topics = []
""" The topic container. """
self._w = {}
""" Weights computed for each topic. """
def candidate_selection(self, pos=None):
"""Selects longest sequences of nouns and adjectives as keyphrase
candidates.
Args:
pos (set): the set of valid POS tags, defaults to ('NOUN',
'PROPN', 'ADJ').
"""
# define default pos tags set
if pos is None:
pos = {'NOUN', 'PROPN', 'ADJ'}
# select sequence of adjectives and nouns
self.longest_pos_sequence_selection(valid_pos=pos)
# filter candidates containing stopwords
self.candidate_filtering()
# TODO: is filtering only candidate with punctuation mandatory ?
#self.candidate_filtering(list(string.punctuation))
def vectorize_candidates(self):
"""Vectorize the keyphrase candidates.
Returns:
C (list): the list of candidates.
X (matrix): vectorized representation of the candidates.
"""
# build the vocabulary, i.e. setting the vector dimensions
dim = set([])
# for k, v in self.candidates.iteritems():
# iterate Python 2/3 compatible
for (k, v) in self.candidates.items():
for w in v.lexical_form:
dim.add(w)
dim = list(dim)
# vectorize the candidates Python 2/3 + sort for random issues
C = list(self.candidates) # .keys()
C.sort()
X = np.zeros((len(C), len(dim)))
for i, k in enumerate(C):
for w in self.candidates[k].lexical_form:
X[i, dim.index(w)] += 1
return C, X
def topic_clustering(self, threshold=0.74, method='average'):
"""Clustering candidates into topics.
Args:
threshold (float): the minimum similarity for clustering, defaults
to 0.74, i.e. more than 1/4 of stem overlap similarity.
method (str): the linkage method, defaults to average.
"""
# handle document with only one candidate
if len(self.candidates) == 1:
self.topics.append([list(self.candidates)[0]])
return
# vectorize the candidates
candidates, X = self.vectorize_candidates()
# compute the distance matrix
Y = pdist(X, 'jaccard')
# compute the clusters
Z = linkage(Y, method=method)
# form flat clusters
clusters = fcluster(Z, t=threshold, criterion='distance')
# for each topic identifier
for cluster_id in range(1, max(clusters) + 1):
self.topics.append([candidates[j] for j in range(len(clusters))
if clusters[j] == cluster_id])
def compute_gap(self, p_i, p_j, len_i, len_j):
# compute gap
gap = abs(p_i - p_j)
# alter gap according to candidate length
if p_i < p_j:
gap -= len_i - 1
elif p_i > p_j:
gap -= len_j - 1
if gap == 0:
gap = 1
return gap
def build_topic_graph(self):
"""Build topic graph."""
# adding the nodes to the graph
self.graph.add_nodes_from(range(len(self.topics)))
# loop through the topics to connect the nodes
for i, j in combinations(range(len(self.topics)), 2):
self.graph.add_edge(i, j, weight=0.0)
for c_i in self.topics[i]:
for c_j in self.topics[j]:
for p_i in self.candidates[c_i].offsets:
for p_j in self.candidates[c_j].offsets:
len_i = len(self.candidates[c_i].lexical_form)
len_j = len(self.candidates[c_j].lexical_form)
# gap is the number of token between the 2 candidates + 1
gap = self.compute_gap(p_i, p_j, len_i, len_j)
self.graph[i][j]["weight"] += 1.0 / gap
def candidate_weighting(self,
threshold=0.74,
method='average',
heuristic=None):
"""Candidate ranking using random walk.
Args:
threshold (float): the minimum similarity for clustering, defaults
to 0.74.
method (str): the linkage method, defaults to average.
heuristic (str): the heuristic for selecting the best candidate for
each topic, defaults to first occurring candidate. Other
options are 'frequent' (most frequent candidate, position is
used for ties).
"""
if not self.candidates:
return
# cluster the candidates
self.topic_clustering(threshold=threshold, method=method)
# build the topic graph
self.build_topic_graph()
# compute the word scores using random walk
self._w = nx.pagerank(self.graph, alpha=0.85, weight='weight')
# loop through the topics
for i, topic in enumerate(self.topics):
# get the offsets of the topic candidates
offsets = [self.candidates[t].offsets[0] for t in topic]
# get first candidate from topic
if heuristic == 'frequent':
# get frequencies for each candidate within the topic
freq = [len(self.candidates[t].surface_forms) for t in topic]
# get the indexes of the most frequent candidates
indexes = [j for j, f in enumerate(freq) if f == max(freq)]
# offsets of the indexes
indexes_offsets = [offsets[j] for j in indexes]
# Choosing the first occuring most frequent candidate
most_frequent = offsets.index(min(indexes_offsets))
self.weights[topic[most_frequent]] = self._w[i]
else:
first = offsets.index(min(offsets))
self.weights[topic[first]] = self._w[i]
| 8,087 | 32.012245 | 85 | py |
pke | pke-master/pke/unsupervised/graph_based/__init__.py | # -*- coding: utf-8 -*-
# Python Keyphrase Extraction toolkit: unsupervised graph-based ranking models
| 103 | 33.666667 | 78 | py |
pke | pke-master/pke/unsupervised/graph_based/positionrank.py | # -*- coding: utf-8 -*-
# Author: Florian Boudin
# Date: 09-11-2018
"""PositionRank keyphrase extraction model.
PositionRank is an unsupervised model for keyphrase extraction from scholarly
documents that incorporates information from all positions of a word's
occurrences into a biased PageRank. The model is described in:
* Corina Florescu and Cornelia Caragea.
PositionRank: An Unsupervised Approach to Keyphrase Extraction from Scholarly
Documents.
*In proceedings of ACL*, pages 1105-1115, 2017.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pke.unsupervised import SingleRank
import networkx as nx
from collections import defaultdict
class PositionRank(SingleRank):
"""PositionRank keyphrase extraction model.
Parameterized example::
import pke
# define the valid Part-of-Speeches to occur in the graph
pos = {'NOUN', 'PROPN', 'ADJ'}
# define the grammar for selecting the keyphrase candidates
grammar = "NP: {<ADJ>*<NOUN|PROPN>+}"
# 1. create a PositionRank extractor.
extractor = pke.unsupervised.PositionRank()
# 2. load the content of the document.
extractor.load_document(input='path/to/input',
language='en',
normalization=None)
# 3. select the noun phrases up to 3 words as keyphrase candidates.
extractor.candidate_selection(grammar=grammar,
maximum_word_number=3)
# 4. weight the candidates using the sum of their word's scores that are
# computed using random walk biaised with the position of the words
# in the document. In the graph, nodes are words (nouns and
# adjectives only) that are connected if they occur in a window of
# 10 words.
extractor.candidate_weighting(window=10,
pos=pos)
# 5. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=10)
"""
def __init__(self):
"""Redefining initializer for PositionRank."""
super(PositionRank, self).__init__()
self.positions = defaultdict(float)
"""Container the sums of word's inverse positions."""
def candidate_selection(self,
grammar=None,
maximum_word_number=3):
"""Candidate selection heuristic using a syntactic PoS pattern for
noun phrase extraction.
Keyphrase candidates are noun phrases that match the regular expression
(adjective)*(noun)+, of length up to three.
Args:
grammar (str): grammar defining POS patterns of NPs, defaults to
"NP: {<ADJ>*<NOUN|PROPN>+}".
maximum_word_number (int): the maximum number of words allowed for
keyphrase candidates, defaults to 3.
"""
if grammar is None:
grammar = "NP:{<ADJ>*<NOUN|PROPN>+}"
# select sequence of adjectives and nouns
self.grammar_selection(grammar=grammar)
# filter candidates greater than 3 words
for k in list(self.candidates):
v = self.candidates[k]
if len(v.lexical_form) > maximum_word_number:
del self.candidates[k]
def build_word_graph(self, window=10, pos=None):
"""Build the graph representation of the document.
In the graph, nodes are words that passes a Part-of-Speech filter. Two
nodes are connected if the words corresponding to these nodes co-occur
within a `window` of contiguous tokens. The weight of an edge is
computed based on the co-occurrence count of the two words within a
`window` of successive tokens.
Args:
window (int): the window within the sentence for connecting two
words in the graph, defaults to 10.
pos (set): the set of valid pos for words to be considered as nodes
in the graph, defaults to ('NOUN', 'PROPN', 'ADJ').
"""
if pos is None:
pos = {'NOUN', 'PROPN', 'ADJ'}
# flatten document as a sequence of only valid (word, position) tuples
text = []
for i, sentence in enumerate(self.sentences):
shift = sum([s.length for s in self.sentences[0:i]])
for j, word in enumerate(sentence.stems):
if sentence.pos[j] in pos:
text.append((word, shift+j))
# add nodes to the graph
self.graph.add_nodes_from([word for (word, position) in text])
# add edges to the graph
for i, (node1, position1) in enumerate(text):
j = i+1
while j < len(text) and (text[j][1] - position1) < window:
node2, position2 = text[j]
if node1 != node2:
if not self.graph.has_edge(node1, node2):
self.graph.add_edge(node1, node2, weight=0)
self.graph[node1][node2]['weight'] += 1
j = j + 1
# compute the sums of the word's inverse positions
for word, position in text:
self.positions[word] += 1 / (position + 1)
def candidate_weighting(self, window=10, pos=None, normalized=False):
"""Candidate weight calculation using a biased PageRank.
Args:
window (int): the window within the sentence for connecting two
words in the graph, defaults to 10.
pos (set): the set of valid pos for words to be considered as nodes
in the graph, defaults to ('NOUN', 'PROPN', 'ADJ').
normalized (False): normalize keyphrase score by their length,
defaults to False.
"""
if pos is None:
pos = {'NOUN', 'PROPN', 'ADJ'}
# build the word graph
self.build_word_graph(window=window,
pos=pos)
# normalize cumulated inverse positions
norm = sum(self.positions.values())
for word in self.positions:
self.positions[word] /= norm
# compute the word scores using biased random walk
w = nx.pagerank(self.graph,
alpha=0.85,
tol=0.0001,
personalization=self.positions,
weight='weight')
# loop through the candidates
for k in self.candidates.keys():
tokens = self.candidates[k].lexical_form
self.weights[k] = sum([w.get(t, 0.0) for t in tokens])
if normalized:
self.weights[k] /= len(tokens)
| 6,775 | 35.826087 | 80 | py |
pke | pke-master/pke/supervised/api.py | # -*- coding: utf-8 -*-
""" Abstract base class for Supervised models. """
from __future__ import division
from __future__ import absolute_import
import os
from pke.base import LoadFile
from sklearn.preprocessing import MinMaxScaler
from joblib import load as load_model
class SupervisedLoadFile(LoadFile):
""" The SupervisedLoadFile class that provides extra base functions for
supervised models. """
def __init__(self):
""" Redefining initializer. """
super(SupervisedLoadFile, self).__init__()
self.instances = {}
""" The instances container. """
def feature_scaling(self):
""" Scale features to [0,1]. """
candidates = self.instances.keys()
X = [self.instances[u] for u in candidates]
X = MinMaxScaler().fit_transform(X)
for i, candidate in enumerate(candidates):
self.instances[candidate] = X[i]
def feature_extraction(self):
""" Skeleton for feature extraction. """
pass
def classify_candidates(self, model=None):
""" Classify the candidates as keyphrase or not keyphrase.
Args:
model (str): the path to load the model in pickle format,
default to None.
"""
# set the default model if none provided
if model is None:
instance = self.__class__.__name__
model = os.path.join(self._models, instance + "-semeval2010.py3.pickle")
# load the model
clf = load_model(model)
# with open(model, 'rb') as f:
# clf = pickle.load(f)
# get matrix of instances
candidates = self.instances.keys()
X = [self.instances[u] for u in candidates]
# classify candidates
y = clf.predict_proba(X)
for i, candidate in enumerate(candidates):
self.weights[candidate] = y[i][1]
def candidate_weighting(self):
""" Extract features and classify candidates with default parameters."""
if not self.candidates:
return
self.feature_extraction()
self.classify_candidates()
| 2,140 | 27.546667 | 84 | py |
pke | pke-master/pke/supervised/__init__.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from pke.supervised.api import SupervisedLoadFile
from pke.supervised.feature_based.kea import Kea
from pke.supervised.feature_based.wingnus import WINGNUS
| 220 | 30.571429 | 56 | py |
pke | pke-master/pke/supervised/feature_based/kea.py | # -*- coding: utf-8 -*-
# Author: Florian Boudin
# Date: 09-10-2018
"""Kea supervised keyphrase extraction model.
Kea is a supervised model for keyphrase extraction that uses two features,
namely TF x IDF and first occurrence, to classify keyphrase candidates as
keyphrase or not. The model is described in:
* Ian Witten, Gordon Paynter, Eibe Frank, Carl Gutwin and Craig Nevill-Mannin.
KEA: Practical Automatic Keyphrase Extraction.
*Proceedings of the 4th ACM Conference on Digital Libraries*, pages 254–255,
1999.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import logging
import numpy as np
from joblib import dump as dump_model
from sklearn.naive_bayes import MultinomialNB
from pke.supervised.api import SupervisedLoadFile
from pke.utils import load_document_frequency_file
class Kea(SupervisedLoadFile):
"""Kea keyphrase extraction model.
Parameterized example::
import pke
# 1. create a Kea extractor.
extractor = pke.supervised.Kea()
# 2. load the content of the document.
stoplist = pke.lang.stopwords.get('en')
extractor.load_document(input='path/to/input',
language='en',
stoplist=stoplist,
normalization=None)
# 3. select 1-3 grams that do not start or end with a stopword as
# candidates. Candidates that contain punctuation marks as words
# are discarded.
extractor.candidate_selection()
# 4. classify candidates as keyphrase or not keyphrase.
df = pke.load_document_frequency_file(input_file='path/to/df.tsv.gz')
model_file = 'path/to/kea_model'
extractor.candidate_weighting(model_file=model_file,
df=df)
# 5. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=10)
"""
def __init__(self):
"""Redefining initializer for Kea."""
super(Kea, self).__init__()
def candidate_selection(self):
"""Select 1-3 grams of `normalized` words as keyphrase candidates.
Candidates that start or end with a stopword are discarded. Candidates
that contain punctuation marks (from `string.punctuation`) as words are
filtered out.
"""
# select ngrams from 1 to 3 grams
self.ngram_selection(n=3)
# filter candidates
self.candidate_filtering()
# TODO: is filtering only candidate with punctuation mandatory ?
#self.candidate_filtering(list(string.punctuation))
# filter candidates that start or end with a stopword
for k in list(self.candidates):
# get the candidate
v = self.candidates[k]
# delete if candidate contains a stopword in first/last position
words = [u.lower() for u in v.surface_forms[0]]
if words[0] in self.stoplist or words[-1] in self.stoplist:
del self.candidates[k]
def feature_extraction(self, df=None, training=False):
"""Extract features for each keyphrase candidate. Features are the
tf*idf of the candidate and its first occurrence relative to the
document.
Args:
df (dict): document frequencies, the number of documents should be
specified using the "--NB_DOC--" key.
training (bool): indicates whether features are computed for the
training set for computing IDF weights, defaults to false.
"""
# initialize default document frequency counts if none provided
if df is None:
logging.warning('LoadFile._df_counts is hard coded to {}'.format(
self._df_counts))
df = load_document_frequency_file(self._df_counts, delimiter='\t')
# initialize the number of documents as --NB_DOC--
N = df.get('--NB_DOC--', 0) + 1
if training:
N -= 1
# find the maximum offset
maximum_offset = float(sum([s.length for s in self.sentences]))
for k, v in self.candidates.items():
# get candidate document frequency
candidate_df = 1 + df.get(k, 0)
# hack for handling training documents
if training and candidate_df > 1:
candidate_df -= 1
# compute the tf*idf of the candidate
idf = math.log(N / candidate_df, 2)
# add the features to the instance container
self.instances[k] = np.array([len(v.surface_forms) * idf,
v.offsets[0] / maximum_offset])
# scale features
self.feature_scaling()
def candidate_weighting(self, model_file=None, df=None):
"""Extract features and classify candidates.
Args:
model_file (str): path to the model file.
df (dict): document frequencies, the number of documents should
be specified using the "--NB_DOC--" key.
"""
if not self.candidates:
return
self.feature_extraction(df=df)
self.classify_candidates(model=model_file)
@staticmethod
def train(training_instances, training_classes, model_file):
""" Train a Naive Bayes classifier and store the model in a file.
Args:
training_instances (list): list of features.
training_classes (list): list of binary values.
model_file (str): the model output file.
"""
clf = MultinomialNB()
clf.fit(training_instances, training_classes)
dump_model(clf, model_file)
| 5,778 | 33.60479 | 79 | py |
pke | pke-master/pke/supervised/feature_based/wingnus.py | # -*- coding: utf-8 -*-
# Author: Florian Boudin
# Date: 09-10-2018
"""Kea keyphrase extraction model.
Supervised approach to keyphrase extraction described in:
* Thuy Dung Nguyen and Minh-Thang Luong.
WINGNUS: Keyphrase Extraction Utilizing Document Logical Structure.
*Proceedings of SemEval*, pages 166–169, 2010.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import logging
import numpy as np
from joblib import dump as dump_model
from sklearn.naive_bayes import MultinomialNB
from pke.supervised.api import SupervisedLoadFile
from pke.utils import load_document_frequency_file
class WINGNUS(SupervisedLoadFile):
"""WINGNUS keyphrase extraction model.
Parameterized example::
import pke
# 1. create a WINGNUS extractor.
extractor = pke.supervised.WINGNUS()
# 2. load the content of the document.
extractor.load_document(input='path/to/input.xml')
# 3. select simplex noun phrases as candidates.
extractor.candidate_selection()
# 4. classify candidates as keyphrase or not keyphrase.
df = pke.load_document_frequency_file(input_file='path/to/df.tsv.gz')
model_file = 'path/to/wingnus_model'
extractor.candidate_weighting(self, model_file=model_file, df=df)
# 5. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=10)
"""
def __init__(self):
"""Redefining initializer for WINGNUS."""
super(WINGNUS, self).__init__()
def candidate_selection(self, grammar=None):
"""Select noun phrases (NP) and NP containing a pre-propositional phrase
(NP IN NP) as keyphrase candidates.
Args:
grammar (str): grammar defining POS patterns of NPs.
"""
# initialize default grammar if none provided
if grammar is None:
grammar = r"""
NBAR:
{<NOUN|PROPN|ADJ>{,2}<NOUN|PROPN>}
NP:
{<NBAR>}
{<NBAR><ADP><NBAR>}
"""
self.grammar_selection(grammar)
def feature_extraction(self, df=None, training=False, features_set=None):
"""Extract features for each candidate.
Args:
df (dict): document frequencies, the number of documents should be
specified using the "--NB_DOC--" key.
training (bool): indicates whether features are computed for the
training set for computing IDF weights, defaults to false.
features_set (list): the set of features to use, defaults to
[1, 4, 6].
"""
# define the default features_set
if features_set is None:
features_set = [1, 4, 6]
# initialize default document frequency counts if none provided
if df is None:
logging.warning('LoadFile._df_counts is hard coded to {}'.format(
self._df_counts))
df = load_document_frequency_file(self._df_counts, delimiter='\t')
# initialize the number of documents as --NB_DOC--
N = df.get('--NB_DOC--', 0) + 1
if training:
N -= 1
# find the maximum offset
maximum_offset = float(sum([s.length for s in self.sentences]))
# loop through the candidates
for k, v in self.candidates.items():
# initialize features array
feature_array = []
# get candidate document frequency
candidate_df = 1 + df.get(k, 0)
# hack for handling training documents
if training and candidate_df > 1:
candidate_df -= 1
# compute the tf*idf of the candidate
idf = math.log(N / candidate_df, 2)
# [F1] TF*IDF
feature_array.append(len(v.surface_forms) * idf)
# [F2] -> TF
feature_array.append(len(v.surface_forms))
# [F3] -> term frequency of substrings
tf_of_substrings = 0
for i in range(len(v.lexical_form)):
for j in range(i, min(len(v.lexical_form), i + 3)):
sub_words = v.lexical_form[i:j + 1]
sub_string = ' '.join(sub_words)
# skip if substring is fullstring
if sub_string == ' '.join(v.lexical_form):
continue
# skip if substring contains a stopword
if set(sub_words).intersection(self.stoplist):
continue
# check whether the substring occurs "as it"
if sub_string in self.candidates:
# loop throught substring offsets
for offset_1 in self.candidates[sub_string].offsets:
is_included = False
for offset_2 in v.offsets:
if offset_2 <= offset_1 <= offset_2 + len(v.lexical_form):
is_included = True
if not is_included:
tf_of_substrings += 1
feature_array.append(tf_of_substrings)
# [F4] -> relative first occurrence
feature_array.append(v.offsets[0] / maximum_offset)
# [F5] -> relative last occurrence
feature_array.append(v.offsets[-1] / maximum_offset)
# [F6] -> length of phrases in words
feature_array.append(len(v.lexical_form))
# [F7] -> typeface
feature_array.append(0)
# extract information from sentence meta information
meta = [self.sentences[sid].meta for sid in v.sentence_ids]
# extract meta information of candidate
sections = [u['section'] for u in meta if 'section' in u]
types = [u['type'] for u in meta if 'type' in u]
# [F8] -> Is in title
feature_array.append('title' in sections)
# [F9] -> TitleOverlap
feature_array.append(0)
# [F10] -> Header
feature_array.append('sectionHeader' in types or
'subsectionHeader' in types or
'subsubsectionHeader' in types)
# [F11] -> abstract
feature_array.append('abstract' in sections)
# [F12] -> introduction
feature_array.append('introduction' in sections)
# [F13] -> related work
feature_array.append('related work' in sections)
# [F14] -> conclusions
feature_array.append('conclusions' in sections)
# [F15] -> HeaderF
feature_array.append(types.count('sectionHeader') +
types.count('subsectionHeader') +
types.count('subsubsectionHeader'))
# [F11] -> abstractF
feature_array.append(sections.count('abstract'))
# [F12] -> introductionF
feature_array.append(sections.count('introduction'))
# [F13] -> related workF
feature_array.append(sections.count('related work'))
# [F14] -> conclusionsF
feature_array.append(sections.count('conclusions'))
# add the features to the instance container
self.instances[k] = np.array([feature_array[i - 1] for i
in features_set])
# scale features
self.feature_scaling()
def candidate_weighting(self, model_file=None, df=None):
"""Extract features and classify candidates.
Args:
model_file (str): path to the model file.
df (dict): document frequencies, the number of documents should
be specified using the "--NB_DOC--" key.
"""
if not self.candidates:
return
self.feature_extraction(df=df)
self.classify_candidates(model=model_file)
@staticmethod
def train(training_instances, training_classes, model_file):
""" Train a Naive Bayes classifier and store the model in a file.
Args:
training_instances (list): list of features.
training_classes (list): list of binary values.
model_file (str): the model output file.
"""
clf = MultinomialNB()
clf.fit(training_instances, training_classes)
dump_model(clf, model_file)
# with open(model_file, 'wb') as f:
# pickle.dump(clf, f)
| 8,740 | 32.619231 | 90 | py |
pke | pke-master/pke/supervised/feature_based/__init__.py | # -*- coding: utf-8 -*-
| 24 | 11.5 | 23 | py |
pke | pke-master/examples/compute-lda_model.py | # -*- coding: utf-8 -*-
import sys
import logging
from glob import glob
import xml.etree.ElementTree as etree
from pke import compute_lda_model
# setting info in terminal
logging.basicConfig(level=logging.INFO)
# path to the collection of xml documents
input_dir = sys.argv[1]
# path to the lda model, saved as a gzip file
output_file = sys.argv[2]
# number of topics for the LDA model
n_topics = int(sys.argv[3])
def read_corenlp_xml(path):
sentences = []
tree = etree.parse(path, etree.XMLParser())
for sentence in tree.iterfind('./document/sentences/sentence'):
# get the character offsets
starts = [int(u.text) for u in
sentence.iterfind('tokens/token/CharacterOffsetBegin')]
ends = [int(u.text) for u in
sentence.iterfind('tokens/token/CharacterOffsetEnd')]
doc = {
'words': [u.text for u in
sentence.iterfind('tokens/token/word')],
'lemmas': [u.text for u in
sentence.iterfind('tokens/token/lemma')],
'POS': [u.text for u in sentence.iterfind('tokens/token/POS')],
'char_offsets': [(starts[k], ends[k]) for k in
range(len(starts))]
}
sentences.append(
[(doc['words'][i], doc['POS'][i])
for i in range(len(doc['words']))])
return sentences
documents = []
for fn in glob(input_dir + '*.xml'):
doc = read_corenlp_xml(fn)
documents.append(doc)
compute_lda_model(
documents,
output_file=output_file,
n_topics=n_topics,
language='en',
normalization='stemming'
)
| 1,646 | 26.915254 | 75 | py |
pke | pke-master/examples/benchmarking-models.py | # -*- coding: utf-8 -*-
import re
import spacy
import numpy as np
from tqdm import tqdm
from spacy.tokenizer import _get_regex_pattern
from datasets import load_dataset
from pke.unsupervised import *
from pke import compute_document_frequency, load_document_frequency_file
# load the inspec dataset
dataset = load_dataset('boudinfl/inspec', "all")
nlp = spacy.load("en_core_web_sm")
# Tokenization fix for in-word hyphens (e.g. 'non-linear' would be kept
# as one token instead of default spacy behavior of 'non', '-', 'linear')
re_token_match = _get_regex_pattern(nlp.Defaults.token_match)
re_token_match = f"({re_token_match}|\w+-\w+)"
nlp.tokenizer.token_match = re.compile(re_token_match).match
# populates a docs list with spacy doc objects
train_docs = []
for sample in tqdm(dataset['train']):
train_docs.append(nlp(sample["title"]+". "+sample["abstract"]))
test_docs = []
for sample in tqdm(dataset['test']):
test_docs.append(nlp(sample["title"]+". "+sample["abstract"]))
# compute document frequencies
compute_document_frequency(
documents=train_docs,
output_file="df-inspec.tsv.gz",
language='en', # language of the input files
normalization='stemming', # use porter stemmer
n=5 # compute n-grams up to 5-grams
)
# load df counts
df = load_document_frequency_file(input_file='df-inspec.tsv.gz')
outputs = {}
for model in [FirstPhrases, TopicRank, PositionRank, MultipartiteRank, TextRank]:
outputs[model.__name__] = []
extractor = model()
for i, doc in enumerate(tqdm(test_docs)):
extractor.load_document(input=doc, language='en')
extractor.grammar_selection(grammar="NP: {<ADJ>*<NOUN|PROPN>+}")
extractor.candidate_weighting()
outputs[model.__name__].append([u for u, v in extractor.get_n_best(n=5, stemming=True)])
for model in [KPMiner, TfIdf]:
outputs[model.__name__] = []
extractor = model()
for i, doc in enumerate(tqdm(test_docs)):
extractor.load_document(input=doc, language='en')
extractor.grammar_selection(grammar="NP: {<ADJ>*<NOUN|PROPN>+}")
extractor.candidate_weighting(df=df)
outputs[model.__name__].append([u for u, v in extractor.get_n_best(n=5, stemming=True)])
def evaluate(top_N_keyphrases, references):
P = len(set(top_N_keyphrases) & set(references)) / len(top_N_keyphrases)
R = len(set(top_N_keyphrases) & set(references)) / len(references)
F = (2 * P * R) / (P + R) if (P + R) > 0 else 0
return (P, R, F)
# loop through the models
for model in outputs:
# compute the P, R, F scores for the model
scores = []
for i, output in enumerate(tqdm(outputs[model])):
references = dataset['test'][i]["uncontr_stems"]
scores.append(evaluate(output, references))
# compute the average scores
avg_scores = np.mean(scores, axis=0)
# print out the performance of the model
print("Model: {} P@5: {:.3f} R@5: {:.3f} F@5: {:.3f}".format(model, avg_scores[0], avg_scores[1], avg_scores[2]))
| 3,002 | 33.918605 | 117 | py |
pke | pke-master/examples/keyphrase-extraction.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# this example uses TopicRank
from pke.unsupervised import TopicRank
# create a TopicRank extractor
extractor = TopicRank()
# load the content of the document, here in raw text format
# the input language is set to English (used for the stoplist)
# normalization is set to stemming (computed with Porter's stemming algorithm)
with open('2.txt') as f:
doc = f.read()
extractor.load_document(
doc,
language='en',
normalization='stemming')
# select the keyphrase candidates, for TopicRank the longest sequences of
# nouns and adjectives
extractor.candidate_selection(pos={'NOUN', 'PROPN', 'ADJ'})
# weight the candidates using a random walk. The threshold parameter sets the
# minimum similarity for clustering, and the method parameter defines the
# linkage method
extractor.candidate_weighting(threshold=0.74,
method='average')
# print the n-highest (10) scored candidates
for (keyphrase, score) in extractor.get_n_best(n=10, stemming=True):
print(keyphrase, score)
| 1,069 | 31.424242 | 78 | py |
pke | pke-master/examples/compute-df-counts.py | # -*- coding: utf-8 -*-
import sys
import logging
from glob import glob
from string import punctuation
import xml.etree.ElementTree as etree
from pke import compute_document_frequency
# setting info in terminal
logging.basicConfig(level=logging.INFO)
# path to the collection of xml documents
input_dir = sys.argv[1]
# path to the df weights dictionary, saved as a gzipped csv file
output_file = sys.argv[2]
# stoplist are punctuation marks
stoplist = list(punctuation)
def read_corenlp_xml(path):
sentences = []
tree = etree.parse(path, etree.XMLParser())
for sentence in tree.iterfind('./document/sentences/sentence'):
# get the character offsets
starts = [int(u.text) for u in
sentence.iterfind('tokens/token/CharacterOffsetBegin')]
ends = [int(u.text) for u in
sentence.iterfind('tokens/token/CharacterOffsetEnd')]
doc = {
'words': [u.text for u in
sentence.iterfind('tokens/token/word')],
'lemmas': [u.text for u in
sentence.iterfind('tokens/token/lemma')],
'POS': [u.text for u in sentence.iterfind('tokens/token/POS')],
'char_offsets': [(starts[k], ends[k]) for k in
range(len(starts))]
}
sentences.append(
[(doc['words'][i], doc['POS'][i])
for i in range(len(doc['words']))])
return sentences
documents = []
for fn in glob(input_dir + '*.xml'):
doc = read_corenlp_xml(fn)
documents.append(doc)
# compute idf weights
compute_document_frequency(
documents,
output_file=output_file,
language='en', # language of the input files
normalization='stemming', # use porter stemmer
stoplist=stoplist, # stoplist
n=5 # compute n-grams up to 5-grams
)
| 1,841 | 28.238095 | 75 | py |
pke | pke-master/examples/training_and_testing_a_kea_model/test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pke
base = os.path.dirname(__file__)
# create a Kea extractor and set the input language to English (used for
# the stoplist in the candidate selection method)
extractor = pke.supervised.Kea()
# load the content of the document, here in corenlp format
with open(base + os.sep + '2.txt') as f:
doc = f.read()
extractor.load_document(doc)
# select the keyphrase candidates, for Kea the 1-3 grams that do not start or
# end with a stopword.
extractor.candidate_selection()
# load the df counts
df_counts = pke.load_document_frequency_file(
input_file=base + os.sep + 'df.tsv.gz',
delimiter='\t')
# weight the candidates using Kea model.
extractor.candidate_weighting(
model_file=base + os.sep + 'model.pickle',
df=df_counts)
# print the n-highest (10) scored candidates
for (keyphrase, score) in extractor.get_n_best(n=10):
print(keyphrase, score)
| 941 | 25.166667 | 77 | py |
pke | pke-master/examples/training_and_testing_a_kea_model/train.py | # -*- coding: utf-8 -*-
import os
import logging
from glob import glob
import pke
# setting info in terminal
logging.basicConfig(level=logging.INFO)
base = os.path.dirname(__file__)
# path to the collection of documents
documents = []
for fn in glob(base + os.sep + 'train/*.txt'):
with open(fn) as f:
doc = f.read()
doc_id = os.path.basename(fn).rsplit('.', 1)[0]
documents.append((doc_id, doc))
logging.info('Loaded {} documents'.format(len(documents)))
# path to the reference file
reference = {}
with open(base + os.sep + 'gold-annotation.txt') as f:
for line in f:
doc_id, keywords = line.split(' : ')
reference[doc_id] = keywords.split(',')
# path to the df file
df_file = base + os.sep + 'df.tsv.gz'
logging.info('Loading df counts from {}'.format(df_file))
df_counts = pke.load_document_frequency_file(
input_file=df_file, delimiter='\t'
)
# path to the model, saved as a pickle
output_mdl = base + os.sep + 'model.pickle'
pke.train_supervised_model(
documents,
reference,
model_file=output_mdl,
language='en',
normalization='stemming',
df=df_counts,
model=pke.supervised.Kea()
)
| 1,168 | 23.354167 | 58 | py |
pke | pke-master/tests/sample.py | # -*- coding: utf-8 -*
import spacy
nlp = spacy.load("en_core_web_sm")
sample = """Inverse problems for a mathematical model of ion exchange in a compressible ion exchanger.
A mathematical model of ion exchange is considered, allowing for ion exchanger compression in the process
of ion exchange. Two inverse problems are investigated for this model, unique solvability is proved, and
numerical solution methods are proposed. The efficiency of the proposed methods is demonstrated by a
numerical experiment.""".replace("\n", " ")
sample_doc = nlp(sample)
sample_list = [[('Inverse', 'NOUN'), ('problems', 'NOUN'), ('for', 'ADP'), ('a', 'DET'), ('mathematical', 'ADJ'),
('model', 'NOUN'), ('of', 'ADP'), ('ion', 'NOUN'), ('exchange', 'NOUN'), ('in', 'ADP'), ('a', 'DET'),
('compressible', 'ADJ'), ('ion', 'NOUN'), ('exchanger', 'NOUN'), ('.', 'PUNCT')],
[('A', 'DET'), ('mathematical', 'ADJ'), ('model', 'NOUN'), ('of', 'ADP'), ('ion', 'NOUN'),
('exchange', 'NOUN'), ('is', 'AUX'), ('considered', 'VERB'), (',', 'PUNCT'), ('allowing', 'VERB'),
('for', 'ADP'), ('ion', 'NOUN'), ('exchanger', 'NOUN'), ('compression', 'NOUN'), ('in', 'ADP'),
('the', 'DET'), ('process', 'NOUN'), ('of', 'ADP'), ('ion', 'NOUN'), ('exchange', 'NOUN'),
('.', 'PUNCT')],
[('Two', 'NUM'), ('inverse', 'NOUN'), ('problems', 'NOUN'), ('are', 'AUX'), ('investigated', 'VERB'),
('for', 'ADP'), ('this', 'DET'), ('model', 'NOUN'), (',', 'PUNCT'), ('unique', 'ADJ'),
('solvability', 'NOUN'), ('is', 'AUX'), ('proved', 'VERB'), (',', 'PUNCT'), ('and', 'CCONJ'),
('numerical', 'ADJ'), ('solution', 'NOUN'), ('methods', 'NOUN'), ('are', 'AUX'), ('proposed', 'VERB'),
('.', 'PUNCT')],
[('The', 'DET'), ('efficiency', 'NOUN'), ('of', 'ADP'), ('the', 'DET'), ('proposed', 'VERB'),
('methods', 'NOUN'), ('is', 'AUX'), ('demonstrated', 'VERB'), ('by', 'ADP'), ('a', 'DET'),
('numerical', 'ADJ'), ('experiment', 'NOUN'), ('.', 'PUNCT')]]
| 2,140 | 68.064516 | 118 | py |
pke | pke-master/tests/test_reading.py | # -*- coding: utf-8 -*-
import pke
from .sample import sample, sample_doc, sample_list
def test_reading():
# loading from string
extractor1 = pke.base.LoadFile()
extractor1.load_document(sample)
# loading from string
extractor2 = pke.base.LoadFile()
extractor2.load_document(sample_doc)
# loading from preprocessed text
extractor3 = pke.base.LoadFile()
extractor3.load_document(sample_list)
assert len(extractor1.sentences) == 4 and extractor1.sentences == extractor2.sentences and \
extractor2.sentences == extractor3.sentences and extractor1.sentences[0] == extractor2.sentences[0] and \
extractor2.sentences[0] == extractor3.sentences[0]
if __name__ == '__main__':
test_reading()
| 759 | 25.206897 | 116 | py |
pke | pke-master/tests/test_firstphrases.py | # -*- coding: utf-8 -*-
import pke
from .sample import sample_list
valid_pos = {'NOUN', 'PROPN', 'ADJ'}
def test_firstphrases_candidate_selection():
extractor = pke.unsupervised.FirstPhrases()
extractor.load_document(input=sample_list)
extractor.candidate_selection(pos=valid_pos)
assert len(extractor.candidates) == 12
def test_firstphrases_candidate_weighting():
extractor = pke.unsupervised.FirstPhrases()
extractor.load_document(input=sample_list)
extractor.candidate_selection(pos=valid_pos)
extractor.candidate_weighting()
keyphrases = [k for k, s in extractor.get_n_best(n=3)]
assert keyphrases == ['inverse problems', 'mathematical model', 'ion exchange']
if __name__ == '__main__':
test_firstphrases_candidate_selection()
test_firstphrases_candidate_weighting()
| 827 | 28.571429 | 83 | py |
pke | pke-master/tests/__init__.py | 0 | 0 | 0 | py |
|
pke | pke-master/tests/test_utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pke
data_path = os.path.join('tests', 'data')
def create_df(corpus, tmp_path, name='corpus_df.gz'):
df_file = tmp_path / name
pke.utils.compute_document_frequency(
corpus, str(df_file), n=1)
corpus_df = pke.utils.load_document_frequency_file(str(df_file))
return corpus_df, df_file
def test_load_reference():
"""Various tests for loading a reference file."""
id = 'C-41'
g1 = pke.utils.load_references(input_file=data_path + os.sep + 'reference.json',
normalize_reference=True,
language="en",
encoding='utf-8')
g2 = pke.utils.load_references(input_file=data_path + os.sep + 'reference.stem.json',
normalize_reference=False,
language="en",
encoding='utf-8')
g3 = pke.utils.load_references(input_file=data_path + os.sep + 'reference.final',
normalize_reference=True,
language="en",
encoding='utf-8')
g4 = pke.utils.load_references(input_file=data_path + os.sep + 'reference.stem.final',
normalize_reference=False,
language="en",
encoding='utf-8')
assert set(g1[id]) == set(g2[id]) == set(g3[id]) == set(g4[id])
# TODO: test load_document_frequency_file ? As it is used to test
# compute_document_frequency
def test_compute_document_frequency(tmp_path):
from collections import Counter
# tmp_path is a Path object provided automatically by pytest
# Create a corpus
corpus = ['lorem sit amet', 'lorem ipsum']
# Create expected value
expected = Counter([t for v in corpus for t in v.split()])
expected['--NB_DOC--'] = len(corpus)
# Compute document frequency
tmp_freq = tmp_path / 'tmp_doc_freq.tsv.gz'
pke.utils.compute_document_frequency(
corpus, str(tmp_freq), n=1)
# Asserting
df = pke.utils.load_document_frequency_file(str(tmp_freq))
assert df == expected
def test_compute_lda(tmp_path):
import gzip
import pickle
# Create a corpus
corpus = ['lorem sit amet', 'lorem ipsum']
# Create expected value
expected_dict = set(t for v in corpus for t in v.split())
# Compute LDA topics
tmp_lda = tmp_path / 'lda.pickle.gz'
pke.utils.compute_lda_model(
corpus, str(tmp_lda), n_topics=2)
# Asserting
dictionary, _ = pke.utils.load_lda_model(tmp_lda)
assert sorted(dictionary) == sorted(expected_dict)
def test_train_supervised_model(tmp_path):
# Create a corpus
corpus = [('001', 'lorem sit amet'), ('002', 'lorem ipsum')]
reference = {'001': ['ref1', 'ref2'], '002': ['ref1', 'ref2']}
tmp_model = tmp_path / 'model.pickle'
pke.utils.train_supervised_model(
corpus, reference, str(tmp_model),
df=None, leave_one_out=False,
model=pke.supervised.Kea()) # TODO: fix doc for model param
def test_train_supervised_model_leave_one_out(tmp_path):
# Create a corpus
corpus = [('001', 'lorem sit amet'), ('002', 'lorem ipsum')]
reference = {'001': ['ref1', 'ref2'], '002': ['ref1', 'ref2']}
tmp_model = tmp_path / 'model.pickle'
pke.utils.train_supervised_model(
corpus, reference, str(tmp_model),
df=None, leave_one_out=True,
model=pke.supervised.Kea()) # TODO: fix doc for model param
| 3,645 | 31.553571 | 90 | py |
Subsets and Splits